summaryrefslogtreecommitdiffstats
path: root/pc-bios
ModeNameSize
-rw-r--r--Makefile236logstatsplainblame
-rw-r--r--README1850logstatsplainblame
-rw-r--r--acpi-dsdt.aml4521logstatsplainblame
-rw-r--r--bamboo.dtb3211logstatsplainblame
-rw-r--r--bamboo.dts4639logstatsplainblame
-rw-r--r--bios.bin131072logstatsplainblame
d---------keymaps1117logstatsplain
-rwxr-xr-xkvmvapic.bin9216logstatsplainblame
-rw-r--r--linuxboot.bin1024logstatsplainblame
-rw-r--r--multiboot.bin1024logstatsplainblame
-rw-r--r--ohw.diff66528logstatsplainblame
-rw-r--r--openbios-ppc729908logstatsplainblame
-rw-r--r--openbios-sparc32381764logstatsplainblame
-rw-r--r--openbios-sparc641598648logstatsplainblame
d---------optionrom191logstatsplain
-rwxr-xr-xpalcode-clipper185703logstatsplainblame
-rw-r--r--petalogix-ml605.dtb9982logstatsplainblame
-rw-r--r--petalogix-s3adsp1800.dtb8259logstatsplainblame
-rw-r--r--ppc_rom.bin524288logstatsplainblame
-rw-r--r--pxe-e1000.rom67072logstatsplainblame
-rw-r--r--pxe-eepro100.rom61440logstatsplainblame
-rw-r--r--pxe-ne2k_pci.rom61440logstatsplainblame
-rw-r--r--pxe-pcnet.rom61440logstatsplainblame
-rw-r--r--pxe-rtl8139.rom61440logstatsplainblame
-rw-r--r--pxe-virtio.rom60416logstatsplainblame
-rw-r--r--q35-acpi-dsdt.aml7458logstatsplainblame
-rw-r--r--qemu-icon.bmp630logstatsplainblame
-rw-r--r--s390-zipl.rom3304logstatsplainblame
-rwxr-xr-xsgabios.bin4096logstatsplainblame
-rw-r--r--slof.bin880832logstatsplainblame
-rw-r--r--spapr-rtas.bin20logstatsplainblame
d---------spapr-rtas76logstatsplain
-rw-r--r--vgabios-cirrus.bin35840logstatsplainblame
-rw-r--r--vgabios-qxl.bin40448logstatsplainblame
-rw-r--r--vgabios-stdvga.bin40448logstatsplainblame
-rw-r--r--vgabios-vmware.bin40448logstatsplainblame
-rw-r--r--vgabios.bin40448logstatsplainblame
_cpu(ce_data.dst_r_idx)); } - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); } diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h index 95743a57525d..bdec794704d9 100644 --- a/drivers/net/wireless/ath/ath10k/ce.h +++ b/drivers/net/wireless/ath/ath10k/ce.h @@ -122,6 +122,24 @@ struct ath10k_ce_pipe { /* Copy Engine settable attributes */ struct ce_attr; +struct ath10k_bus_ops { + u32 (*read32)(struct ath10k *ar, u32 offset); + void (*write32)(struct ath10k *ar, u32 offset, u32 value); + int (*get_num_banks)(struct ath10k *ar); +}; + +static inline struct ath10k_ce *ath10k_ce_priv(struct ath10k *ar) +{ + return (struct ath10k_ce *)ar->ce_priv; +} + +struct ath10k_ce { + /* protects CE info */ + spinlock_t ce_lock; + const struct ath10k_bus_ops *bus_ops; + struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; +}; + /*==================Send====================*/ /* ath10k_ce_send flags */ @@ -291,9 +309,13 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id) CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB) #define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000 -#define CE_INTERRUPT_SUMMARY(ar) \ - CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( \ - ath10k_pci_read32((ar), CE_WRAPPER_BASE_ADDRESS + \ - CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)) +static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar) +{ + struct ath10k_ce *ce = ath10k_ce_priv(ar); + + return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET( + ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS + + CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS)); +} #endif /* _CE_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 1aa5cf12fce0..2b499af722ad 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -993,6 +993,8 @@ struct ath10k { u32 reg_ack_cts_timeout_orig; } fw_coverage; + void *ce_priv; + /* must be last */ u8 drv_priv[0] __aligned(sizeof(void *)); }; diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 7ebfc409018d..6a91276ce4d7 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -672,16 +672,16 @@ static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - ar_pci->bus_ops->write32(ar, offset, value); + ce->bus_ops->write32(ar, offset, value); } inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - return ar_pci->bus_ops->read32(ar, offset); + return ce->bus_ops->read32(ar, offset); } u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) @@ -761,7 +761,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar) static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; struct sk_buff *skb; dma_addr_t paddr; @@ -784,9 +784,9 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ATH10K_SKB_RXCB(skb)->paddr = paddr; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); if (ret) { dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb), DMA_FROM_DEVICE); @@ -801,6 +801,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) { struct ath10k *ar = pipe->hif_ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl; int ret, num; @@ -810,9 +811,9 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) if (!ce_pipe->dest_ring) return; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); num = __ath10k_ce_rx_num_free_bufs(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); while (num >= 0) { ret = __ath10k_pci_rx_post_buf(pipe); @@ -882,6 +883,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, alloc_nbytes, remaining_bytes; @@ -892,7 +894,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, void *data_buf = NULL; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ce_diag = ar_pci->ce_diag; @@ -986,7 +988,7 @@ done: dma_free_coherent(ar->dev, alloc_nbytes, data_buf, ce_data_base); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -1034,6 +1036,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret = 0; u32 *buf; unsigned int completed_nbytes, orig_nbytes, remaining_bytes; @@ -1043,7 +1046,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, dma_addr_t ce_data_base = 0; int i; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); ce_diag = ar_pci->ce_diag; @@ -1147,7 +1150,7 @@ done: ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n", address, ret); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return ret; } @@ -1342,6 +1345,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl; struct ath10k_ce_ring *src_ring = ce_pipe->src_ring; @@ -1350,7 +1354,7 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, unsigned int write_index; int err, i = 0; - spin_lock_bh(&ar_pci->ce_lock); + spin_lock_bh(&ce->ce_lock); nentries_mask = src_ring->nentries_mask; sw_index = src_ring->sw_index; @@ -1396,14 +1400,14 @@ int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, if (err) goto err; - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return 0; err: for (; i > 0; i--) __ath10k_ce_send_revert(ce_pipe); - spin_unlock_bh(&ar_pci->ce_lock); + spin_unlock_bh(&ce->ce_lock); return err; } @@ -2000,9 +2004,9 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) static int ath10k_bus_get_num_banks(struct ath10k *ar) { - struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); - return ar_pci->bus_ops->get_num_banks(ar); + return ce->bus_ops->get_num_banks(ar); } int ath10k_pci_init_config(struct ath10k *ar) @@ -2173,11 +2177,12 @@ int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe; + struct ath10k_ce *ce = ath10k_ce_priv(ar); int i, ret; for (i = 0; i < CE_COUNT; i++) { pipe = &ar_pci->pipe_info[i]; - pipe->ce_hdl = &ar_pci->ce_states[i]; + pipe->ce_hdl = &ce->ce_states[i]; pipe->pipe_num = i; pipe->hif_ce_state = ar; @@ -2825,7 +2830,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget) * interrupts safer to check for pending interrupts for * immediate servicing. */ - if (CE_INTERRUPT_SUMMARY(ar)) { + if (ath10k_ce_interrupt_summary(ar)) { napi_reschedule(ctx); goto out; } @@ -3142,9 +3147,10 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) int ath10k_pci_setup_resource(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + struct ath10k_ce *ce = ath10k_ce_priv(ar); int ret; - spin_lock_init(&ar_pci->ce_lock); + spin_lock_init(&ce->ce_lock); spin_lock_init(&ar_pci->ps_lock); setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, @@ -3263,10 +3269,11 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->ar = ar; ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; - ar_pci->bus_ops = &ath10k_pci_bus_ops; + ar_pci->ce.bus_ops = &ath10k_pci_bus_ops; ar_pci->pci_soft_reset = pci_soft_reset; ar_pci->pci_hard_reset = pci_hard_reset; ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr; + ar->ce_priv = &ar_pci->ce; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index c1e08ad63940..424ff323b2dc 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -150,12 +150,6 @@ struct ath10k_pci_supp_chip { u32 rev_id; }; -struct ath10k_bus_ops { - u32 (*read32)(struct ath10k *ar, u32 offset); - void (*write32)(struct ath10k *ar, u32 offset, u32 value); - int (*get_num_banks)(struct ath10k *ar); -}; - enum ath10k_pci_irq_mode { ATH10K_PCI_IRQ_AUTO = 0, ATH10K_PCI_IRQ_LEGACY = 1, @@ -177,11 +171,7 @@ struct ath10k_pci { /* Copy Engine used for Diagnostic Accesses */ struct ath10k_ce_pipe *ce_diag; - /* FIXME: document what this really protects */ - spinlock_t ce_lock; - - /* Map CE id to ce_state */ - struct ath10k_ce_pipe ce_states[CE_COUNT_MAX]; + struct ath10k_ce ce; struct timer_list rx_post_retry; /* Due to HW quirks it is recommended to disable ASPM during device @@ -225,8 +215,6 @@ struct ath10k_pci { */ bool pci_ps; - const struct ath10k_bus_ops *bus_ops; - /* Chip specific pci reset routine used to do a safe reset */ int (*pci_soft_reset)(struct ath10k *ar); -- cgit v1.2.3-55-g7522 From f9e18304544e7fbabe9e4b5da8c7a052542000ca Mon Sep 17 00:00:00 2001 From: Govind Singh Date: Wed, 28 Jun 2017 10:18:36 +0530 Subject: ath10k: add copy engine register MAP for wcn3990 target Copy engine is a host to target communication interface between wlan firmware and wlan wcn3990 platform driver. Add copy engine register map for wcn3990 wlan module. This add support for the copy engine source/destination ring configuration for wcn3990 chipset. Signed-off-by: Govind Singh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 5 ++ drivers/net/wireless/ath/ath10k/hw.c | 150 +++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 9 ++ drivers/net/wireless/ath/ath10k/pci.c | 4 + 4 files changed, 168 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 75c5c903c8a6..8ff47458207c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2516,6 +2516,11 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->hw_ce_regs = &qcax_ce_regs; ar->hw_values = &qca4019_values; break; + case ATH10K_HW_WCN3990: + ar->regs = &wcn3990_regs; + ar->hw_ce_regs = &wcn3990_ce_regs; + ar->hw_values = &wcn3990_values; + break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", hw_rev); diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index afb0c01cbb55..a860691d635d 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -192,6 +192,156 @@ const struct ath10k_hw_values qca4019_values = { .ce_desc_meta_data_lsb = 4, }; +const struct ath10k_hw_regs wcn3990_regs = { + .rtc_soc_base_address = 0x00000000, + .rtc_wmac_base_address = 0x00000000, + .soc_core_base_address = 0x00000000, + .ce_wrapper_base_address = 0x0024C000, + .ce0_base_address = 0x00240000, + .ce1_base_address = 0x00241000, + .ce2_base_address = 0x00242000, + .ce3_base_address = 0x00243000, + .ce4_base_address = 0x00244000, + .ce5_base_address = 0x00245000, + .ce6_base_address = 0x00246000, + .ce7_base_address = 0x00247000, + .ce8_base_address = 0x00248000, + .ce9_base_address = 0x00249000, + .ce10_base_address = 0x0024A000, + .ce11_base_address = 0x0024B000, + .soc_chip_id_address = 0x000000f0, + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = { + .msb = 0x00000010, + .lsb = 0x00000010, + .mask = GENMASK(17, 17), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = { + .msb = 0x00000012, + .lsb = 0x00000012, + .mask = GENMASK(18, 18), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = { + .msb = 0x00000000, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = { + .addr = 0x00000018, + .src_ring = &wcn3990_src_ring, + .dst_ring = &wcn3990_dst_ring, + .dmax = &wcn3990_dmax, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = { + .mask = GENMASK(0, 0), +}; + +static struct ath10k_hw_ce_host_ie wcn3990_host_ie = { + .copy_complete = &wcn3990_host_ie_cc, +}; + +static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = { + .dstr_lmask = 0x00000010, + .dstr_hmask = 0x00000008, + .srcr_lmask = 0x00000004, + .srcr_hmask = 0x00000002, + .cc_mask = 0x00000001, + .wm_mask = 0x0000001E, + .addr = 0x00000030, +}; + +static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = { + .axi_err = 0x00000100, + .dstr_add_err = 0x00000200, + .srcr_len_err = 0x00000100, + .dstr_mlen_vio = 0x00000080, + .dstr_overflow = 0x00000040, + .srcr_overflow = 0x00000020, + .err_mask = 0x000003E0, + .addr = 0x00000038, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = { + .msb = 0x00000000, + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = { + .addr = 0x0000004c, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_src_wm_low, + .wm_high = &wcn3990_src_wm_high, +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = { + .lsb = 0x00000010, + .mask = GENMASK(31, 16), +}; + +static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = { + .msb = 0x0000000f, + .lsb = 0x00000000, + .mask = GENMASK(15, 0), +}; + +static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = { + .addr = 0x00000050, + .low_rst = 0x00000000, + .high_rst = 0x00000000, + .wm_low = &wcn3990_dst_wm_low, + .wm_high = &wcn3990_dst_wm_high, +}; + +struct ath10k_hw_ce_regs wcn3990_ce_regs = { + .sr_base_addr = 0x00000000, + .sr_size_addr = 0x00000008, + .dr_base_addr = 0x0000000c, + .dr_size_addr = 0x00000014, + .misc_ie_addr = 0x00000034, + .sr_wr_index_addr = 0x0000003c, + .dst_wr_index_addr = 0x00000040, + .current_srri_addr = 0x00000044, + .current_drri_addr = 0x00000048, + .ddr_addr_for_rri_low = 0x00000004, + .ddr_addr_for_rri_high = 0x00000008, + .ce_rri_low = 0x0024C004, + .ce_rri_high = 0x0024C008, + .host_ie_addr = 0x0000002c, + .ctrl1_regs = &wcn3990_ctrl1, + .host_ie = &wcn3990_host_ie, + .wm_regs = &wcn3990_wm_reg, + .misc_regs = &wcn3990_misc_reg, + .wm_srcr = &wcn3990_wm_src_ring, + .wm_dstr = &wcn3990_wm_dst_ring, +}; + +const struct ath10k_hw_values wcn3990_values = { + .rtc_state_val_on = 5, + .ce_count = 12, + .msi_assign_ce_max = 12, + .num_target_ce_config_wlan = 12, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = { .msb = 0x00000010, .lsb = 0x00000010, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 97dc1479f44e..19e43512af50 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -231,6 +231,7 @@ enum ath10k_hw_rev { ATH10K_HW_QCA9377, ATH10K_HW_QCA4019, ATH10K_HW_QCA9887, + ATH10K_HW_WCN3990, }; struct ath10k_hw_regs { @@ -247,6 +248,10 @@ struct ath10k_hw_regs { u32 ce5_base_address; u32 ce6_base_address; u32 ce7_base_address; + u32 ce8_base_address; + u32 ce9_base_address; + u32 ce10_base_address; + u32 ce11_base_address; u32 soc_reset_control_si0_rst_mask; u32 soc_reset_control_ce_rst_mask; u32 soc_chip_id_address; @@ -267,6 +272,7 @@ extern const struct ath10k_hw_regs qca988x_regs; extern const struct ath10k_hw_regs qca6174_regs; extern const struct ath10k_hw_regs qca99x0_regs; extern const struct ath10k_hw_regs qca4019_regs; +extern const struct ath10k_hw_regs wcn3990_regs; struct ath10k_hw_ce_regs_addr_map { u32 msb; @@ -362,6 +368,8 @@ extern const struct ath10k_hw_values qca6174_values; extern const struct ath10k_hw_values qca99x0_values; extern const struct ath10k_hw_values qca9888_values; extern const struct ath10k_hw_values qca4019_values; +extern const struct ath10k_hw_values wcn3990_values; +extern struct ath10k_hw_ce_regs wcn3990_ce_regs; extern struct ath10k_hw_ce_regs qcax_ce_regs; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, @@ -375,6 +383,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, #define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984) #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) #define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) +#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990) /* Known peculiarities: * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 6a91276ce4d7..a697caec6579 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1597,6 +1597,8 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) * to mask irq/MSI. */ break; + case ATH10K_HW_WCN3990: + break; } } @@ -1623,6 +1625,8 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) * to unmask irq/MSI. */ break; + case ATH10K_HW_WCN3990: + break; } } -- cgit v1.2.3-55-g7522 From 75e0dde2abf6ae8561b133c4069c473641d50c12 Mon Sep 17 00:00:00 2001 From: Tamizh chelvam Date: Mon, 3 Jul 2017 11:02:36 +0530 Subject: ath10k: increase buffer len to print all wmi services All wmi_services are not printing when we give below command. cat /sys/kernel/debug/ieee80211/phyX/ath10k/wmi_services This patch increases the buffer_len to 8192 to print all the wmi_services. Signed-off-by: Tamizh chelvam Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 389fcb7a9fd0..56404fe4e8f5 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -237,7 +237,7 @@ static ssize_t ath10k_read_wmi_services(struct file *file, { struct ath10k *ar = file->private_data; char *buf; - size_t len = 0, buf_len = 4096; + size_t len = 0, buf_len = 8192; const char *name; ssize_t ret_cnt; bool enabled; -- cgit v1.2.3-55-g7522 From 98524e04e0500a04fc461195c99e4385001fc18f Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 6 Jul 2017 16:57:10 -0700 Subject: sunvnet: add support for IPv6 checksum offloads The original code didn't handle non-IPv4 packets very well, so the offload advertising had to be scaled back down to just IP. Here we add the bits needed to support TCP and UDP packets over IPv6 and turn the offload advertising back on. Orabug: 26289579 Signed-off-by: Shannon Nelson Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/ldmvsw.c | 2 +- drivers/net/ethernet/sun/sunvnet.c | 2 +- drivers/net/ethernet/sun/sunvnet_common.c | 90 +++++++++++++++++++++++++++---- 3 files changed, 81 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 8603e397097e..5b56c24b6ed2 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -248,7 +248,7 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[], dev->ethtool_ops = &vsw_ethtool_ops; dev->watchdog_timeo = VSW_TX_TIMEOUT; - dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG; + dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 75b167e3fe98..0b95105f7060 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac, dev->watchdog_timeo = VNET_TX_TIMEOUT; dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | - NETIF_F_IP_CSUM | NETIF_F_SG; + NETIF_F_HW_CSUM | NETIF_F_SG; dev->features = dev->hw_features; /* MTU range: 68 - 65535 */ diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 9e86833249d4..ecf456c7b6d1 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -303,7 +303,7 @@ static struct sk_buff *alloc_and_align_skb(struct net_device *dev, return skb; } -static inline void vnet_fullcsum(struct sk_buff *skb) +static inline void vnet_fullcsum_ipv4(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); int offset = skb_transport_offset(skb); @@ -335,6 +335,40 @@ static inline void vnet_fullcsum(struct sk_buff *skb) } } +#if IS_ENABLED(CONFIG_IPV6) +static inline void vnet_fullcsum_ipv6(struct sk_buff *skb) +{ + struct ipv6hdr *ip6h = ipv6_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IPV6)) + return; + if (ip6h->nexthdr != IPPROTO_TCP && + ip6h->nexthdr != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (ip6h->nexthdr == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (ip6h->nexthdr == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} +#endif + static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) { struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); @@ -394,9 +428,14 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) struct iphdr *iph = ip_hdr(skb); int ihl = iph->ihl * 4; - skb_reset_transport_header(skb); skb_set_transport_header(skb, ihl); - vnet_fullcsum(skb); + vnet_fullcsum_ipv4(skb); +#if IS_ENABLED(CONFIG_IPV6) + } else if (skb->protocol == htons(ETH_P_IPV6)) { + skb_set_transport_header(skb, + sizeof(struct ipv6hdr)); + vnet_fullcsum_ipv6(skb); +#endif } } if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { @@ -1115,24 +1154,47 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) if (skb->ip_summed == CHECKSUM_PARTIAL) start = skb_checksum_start_offset(skb); if (start) { - struct iphdr *iph = ip_hdr(nskb); int offset = start + nskb->csum_offset; + /* copy the headers, no csum here */ if (skb_copy_bits(skb, 0, nskb->data, start)) { dev_kfree_skb(nskb); dev_kfree_skb(skb); return NULL; } + + /* copy the rest, with csum calculation */ *(__sum16 *)(skb->data + offset) = 0; csum = skb_copy_and_csum_bits(skb, start, nskb->data + start, skb->len - start, 0); - if (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) { - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - start, - iph->protocol, csum); + + /* add in the header checksums */ + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(nskb); + + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, + iph->daddr, + skb->len - start, + iph->protocol, + csum); + } + } else if (skb->protocol == htons(ETH_P_IPV6)) { + struct ipv6hdr *ip6h = ipv6_hdr(nskb); + + if (ip6h->nexthdr == IPPROTO_TCP || + ip6h->nexthdr == IPPROTO_UDP) { + csum = csum_ipv6_magic(&ip6h->saddr, + &ip6h->daddr, + skb->len - start, + ip6h->nexthdr, + csum); + } } + + /* save the final result */ *(__sum16 *)(nskb->data + offset) = csum; nskb->ip_summed = CHECKSUM_NONE; @@ -1318,8 +1380,14 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, if (unlikely(!skb)) goto out_dropped; - if (skb->ip_summed == CHECKSUM_PARTIAL) - vnet_fullcsum(skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->protocol == htons(ETH_P_IP)) + vnet_fullcsum_ipv4(skb); +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) + vnet_fullcsum_ipv6(skb); +#endif + } dr = &port->vio.drings[VIO_DRIVER_TX_RING]; i = skb_get_queue_mapping(skb); -- cgit v1.2.3-55-g7522 From 1c3a044c6013b7fcf4738129a1141c9c1994bb86 Mon Sep 17 00:00:00 2001 From: sixiao@microsoft.com Date: Fri, 14 Jul 2017 10:47:20 -0700 Subject: tools: hv: ignore a NIC if it has been configured Let bondvf.sh ignore this NIC if it has been configured, to prevent user configuration from being overwritten unexpectly. Signed-off-by: Simon Xiao Signed-off-by: David S. Miller --- tools/hv/bondvf.sh | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh index 89b25068cd98..80f102860cf8 100755 --- a/tools/hv/bondvf.sh +++ b/tools/hv/bondvf.sh @@ -211,6 +211,30 @@ function create_bond { echo $'\nBond name:' $bondname + if [ $distro == ubuntu ] + then + local mainfn=$cfgdir/interfaces + local s="^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+${bondname}" + + grep -E "$s" $mainfn + if [ $? -eq 0 ] + then + echo "WARNING: ${bondname} has been configured already" + return + fi + elif [ $distro == redhat ] || [ $distro == suse ] + then + local fn=$cfgdir/ifcfg-$bondname + if [ -f $fn ] + then + echo "WARNING: ${bondname} has been configured already" + return + fi + else + echo "Unsupported Distro: ${distro}" + return + fi + echo configuring $primary create_eth_cfg_pri_$distro $primary $bondname @@ -219,8 +243,6 @@ function create_bond { echo creating: $bondname with primary slave: $primary create_bond_cfg_$distro $bondname $primary $secondary - - let bondcnt=bondcnt+1 } for (( i=0; i < $eth_cnt-1; i++ )) @@ -228,5 +250,6 @@ do if [ -n "${list_match[$i]}" ] then create_bond ${list_eth[$i]} ${list_match[$i]} + let bondcnt=bondcnt+1 fi done -- cgit v1.2.3-55-g7522 From ccdb2d17df9f07c291d43b0aeea7c90e4c020489 Mon Sep 17 00:00:00 2001 From: Vincent Bernat Date: Sat, 15 Jul 2017 19:40:20 +0200 Subject: ip6: fix PMTU discovery when using /127 subnets The definition of an "anycast destination address" has been tweaked as a side-effect of commit 2647a9b07032 ("ipv6: Remove external dependency on rt6i_gateway and RTF_ANYCAST"). The first address of a point-to-point /127 subnet is now considered as an anycast address. This prevents ICMPv6 errors to be returned to a sender of such a subnet and breaks PMTU discovery. This can be reproduced with: ip link add name out6 type veth peer name in6 ip link add name out7 type veth peer name in7 ip link set mtu 1400 dev out7 ip link set mtu 1400 dev in7 ip netns add next-hop ip netns add next-next-hop ip link set netns next-hop dev in6 ip link set netns next-hop dev out7 ip link set netns next-next-hop dev in7 ip link set up dev out6 ip addr add 2001:db8:1::12/127 dev out6 ip netns exec next-hop ip link set up dev in6 ip netns exec next-hop ip link set up dev out7 ip netns exec next-hop ip addr add 2001:db8:1::13/127 dev in6 ip netns exec next-hop ip addr add 2001:db8:1::14/127 dev out7 ip netns exec next-hop ip route add default via 2001:db8:1::15 ip netns exec next-hop sysctl -qw net.ipv6.conf.all.forwarding=1 ip netns exec next-next-hop ip link set up dev in7 ip netns exec next-next-hop ip addr add 2001:db8:1::15/127 dev in7 ip netns exec next-next-hop ip addr add 2001:db8:1::50/128 dev in7 ip netns exec next-next-hop ip route add default via 2001:db8:1::14 ip netns exec next-next-hop sysctl -qw net.ipv6.conf.all.forwarding=1 ip route add 2001:db8:1::48/123 via 2001:db8:1::13 sleep 4 ping -M do -s 1452 -c 3 2001:db8:1::50 || true ip route get 2001:db8:1::50 Before the patch, we get: 2001:db8:1::50 from :: via 2001:db8:1::13 dev out6 src 2001:db8:1::12 metric 1024 pref medium After the patch, we get: 2001:db8:1::50 via 2001:db8:1::13 dev out6 src 2001:db8:1::12 metric 0 cache expires 578sec mtu 1400 pref medium Fixes: 2647a9b07032 ("ipv6: Remove external dependency on rt6i_gateway and RTF_ANYCAST") Signed-off-by: Vincent Bernat Signed-off-by: David S. Miller --- include/net/ip6_route.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 199056933dcb..907d39a42f6b 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -194,7 +194,7 @@ static inline bool ipv6_anycast_destination(const struct dst_entry *dst, struct rt6_info *rt = (struct rt6_info *)dst; return rt->rt6i_flags & RTF_ANYCAST || - (rt->rt6i_dst.plen != 128 && + (rt->rt6i_dst.plen < 127 && ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); } -- cgit v1.2.3-55-g7522 From 10244bc2ae7d8543190f4c651cdfaf030c50a802 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:29 +0530 Subject: atm: iphase: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 23536 432 160 24128 5e40 drivers/atm/iphase.o File size After adding 'const': text data bss dec hex filename 23632 336 160 24128 5e40 drivers/atm/iphase.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/iphase.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index a4fa6c82261e..fc72b763fdd7 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -3266,7 +3266,7 @@ static void ia_remove_one(struct pci_dev *pdev) kfree(iadev); } -static struct pci_device_id ia_pci_tbl[] = { +static const struct pci_device_id ia_pci_tbl[] = { { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} -- cgit v1.2.3-55-g7522 From c21c5a7f9dc967f41f591572bcbd29bfcff4331b Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:30 +0530 Subject: atm: ambassador: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 13372 408 4 13784 35d8 drivers/atm/ambassador.o File size After adding 'const': text data bss dec hex filename 13484 296 4 13784 35d8 drivers/atm/ambassador.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/ambassador.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 906705e5f776..acf16c323e38 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -2374,7 +2374,7 @@ MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); /********** module entry **********/ -static struct pci_device_id amb_pci_tbl[] = { +static const struct pci_device_id amb_pci_tbl[] = { { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 }, { PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 }, { 0, } -- cgit v1.2.3-55-g7522 From d5c5665d133c6288bf0e7e6fd49a46463a285bd4 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:31 +0530 Subject: atm: fore200e: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 20025 320 16 20361 4f89 drivers/atm/fore200e.o File size After adding 'const': text data bss dec hex filename 20089 256 16 20361 4f89 drivers/atm/fore200e.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/fore200e.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index f0433adcd8fc..f8b7e86907cc 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2757,7 +2757,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev) } -static struct pci_device_id fore200e_pca_tbl[] = { +static const struct pci_device_id fore200e_pca_tbl[] = { { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) &fore200e_bus[0] }, { 0, } -- cgit v1.2.3-55-g7522 From 2f3e2604fb579e34f775c799eba2afdfcb97fd66 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:32 +0530 Subject: atm: nicstar: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 22781 464 128 23373 5b4d drivers/atm/nicstar.o File size After adding 'const': text data bss dec hex filename 22845 400 128 23373 5b4d drivers/atm/nicstar.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/nicstar.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index d879f3bca107..9588d80f318e 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -253,7 +253,7 @@ static void nicstar_remove_one(struct pci_dev *pcidev) kfree(card); } -static struct pci_device_id nicstar_pci_tbl[] = { +static const struct pci_device_id nicstar_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 }, {0,} /* terminate list */ }; -- cgit v1.2.3-55-g7522 From 5c007845029d1e03df765f8fb2af0a0b8f5126b9 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:33 +0530 Subject: atm: he: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 26514 440 48 27002 697a drivers/atm/he.o File size After adding 'const': text data bss dec hex filename 26578 376 48 27002 697a drivers/atm/he.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/he.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 37ee21c5a5ca..8f6156d475d1 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -2851,7 +2851,7 @@ MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)"); module_param(sdh, bool, 0); MODULE_PARM_DESC(sdh, "use SDH framing (default 0)"); -static struct pci_device_id he_pci_tbl[] = { +static const struct pci_device_id he_pci_tbl[] = { { PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 }, { 0, } }; -- cgit v1.2.3-55-g7522 From 77c0805dc84977e0539134942e678fce77cfb0ef Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:34 +0530 Subject: atm: horizon: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 9859 328 6 10193 27d1 drivers/atm/horizon.o File size After adding 'const': text data bss dec hex filename 9923 264 6 10193 27d1 drivers/atm/horizon.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/horizon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index 0f18480b33b5..7e76b35f422c 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2867,7 +2867,7 @@ MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames"); MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames"); MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles"); -static struct pci_device_id hrz_pci_tbl[] = { +static const struct pci_device_id hrz_pci_tbl[] = { { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } -- cgit v1.2.3-55-g7522 From 6d6148b3e650b82149c45b7597b9b2c64342b979 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:35 +0530 Subject: atm: solos-pci: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 16138 4592 24 20754 5112 drivers/atm/solos-pci.o File size After adding 'const': text data bss dec hex filename 16218 4528 24 20754 5122 drivers/atm/solos-pci.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/solos-pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index c8f2ca6d8b29..585984ee7dbd 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1476,7 +1476,7 @@ static void fpga_remove(struct pci_dev *dev) kfree(card); } -static struct pci_device_id fpga_pci_tbl[] = { +static const struct pci_device_id fpga_pci_tbl[] = { { 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; -- cgit v1.2.3-55-g7522 From 626e87ca8794d1a0c30697d717a0c4b031f731c8 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:36 +0530 Subject: atm: lanai: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 18074 352 0 18426 47fa drivers/atm/lanai.o File size After adding 'const': text data bss dec hex filename 18170 256 0 18426 47fa drivers/atm/lanai.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/lanai.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index 1a9bc51284b0..2351dad78ff5 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2589,7 +2589,7 @@ static int lanai_init_one(struct pci_dev *pci, return result; } -static struct pci_device_id lanai_pci_tbl[] = { +static const struct pci_device_id lanai_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) }, { 0, } /* terminal entry */ -- cgit v1.2.3-55-g7522 From aea39c7f4a56dd2147270b925ed0f107f9403be9 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:37 +0530 Subject: atm: zatm: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 14350 352 40 14742 3996 drivers/atm/zatm.o File size After adding 'const': text data bss dec hex filename 14446 256 40 14742 3996 drivers/atm/zatm.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/zatm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 292dec18ffb8..d3aa7482d1f7 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1642,7 +1642,7 @@ out_free: MODULE_LICENSE("GPL"); -static struct pci_device_id zatm_pci_tbl[] = { +static const struct pci_device_id zatm_pci_tbl[] = { { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER }, { PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 }, { 0, } -- cgit v1.2.3-55-g7522 From 0fdfb33bd911df8d4129f1699d78e23a174dd414 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:38 +0530 Subject: atm: firestream: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 16884 444 28 17356 43cc drivers/atm/firestream.o File size After adding 'const': text data bss dec hex filename 16980 348 28 17356 43cc drivers/atm/firestream.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/firestream.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 22dcab952a24..6b6368a56526 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -2030,7 +2030,7 @@ static void firestream_remove_one(struct pci_dev *pdev) func_exit (); } -static struct pci_device_id firestream_pci_tbl[] = { +static const struct pci_device_id firestream_pci_tbl[] = { { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50}, { PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155}, { 0, } -- cgit v1.2.3-55-g7522 From f283974cb8770415001e5761da587eca06580fe6 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:39 +0530 Subject: atm: eni: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 21565 352 56 21973 55d5 drivers/atm/eni.o File size After adding 'const': text data bss dec hex filename 21661 256 56 21973 55d5 drivers/atm/eni.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/eni.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index b042ec458544..ce47eb17901d 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2292,7 +2292,7 @@ err_disable: } -static struct pci_device_id eni_pci_tbl[] = { +static const struct pci_device_id eni_pci_tbl[] = { { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ }, { PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ }, { 0, } -- cgit v1.2.3-55-g7522 From 97ae5c2aa440588d1d7975e6e76f7f99c88c08a7 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 16 Jul 2017 15:02:40 +0530 Subject: atm: idt77252: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 27702 468 16 28186 6e1a drivers/atm/idt77252.o File size After adding 'const': text data bss dec hex filename 27766 404 16 28186 6e1a drivers/atm/idt77252.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/atm/idt77252.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index 60bacba03d17..b7a168c46692 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3725,7 +3725,7 @@ err_out_disable_pdev: return err; } -static struct pci_device_id idt77252_pci_tbl[] = +static const struct pci_device_id idt77252_pci_tbl[] = { { PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77252), 0 }, { 0, } -- cgit v1.2.3-55-g7522 From aed20a53a7d91e45c6a8cb8920c77aaaa88f76ee Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Sun, 16 Jul 2017 16:43:46 -0700 Subject: rds: cancel send/recv work before queuing connection shutdown We could end up executing rds_conn_shutdown before the rds_recv_worker thread, then rds_conn_shutdown -> rds_tcp_conn_shutdown can do a sock_release and set sock->sk to null, which may interleave in bad ways with rds_recv_worker, e.g., it could result in: "BUG: unable to handle kernel NULL pointer dereference at 0000000000000078" [ffff881769f6fd70] release_sock at ffffffff815f337b [ffff881769f6fd90] rds_tcp_recv at ffffffffa043c888 [rds_tcp] [ffff881769f6fdb0] rds_recv_worker at ffffffffa04a4810 [rds] [ffff881769f6fde0] process_one_work at ffffffff810a14c1 [ffff881769f6fe40] worker_thread at ffffffff810a1940 [ffff881769f6fec0] kthread at ffffffff810a6b1e Also, do not enqueue any new shutdown workq items when the connection is shutting down (this may happen for rds-tcp in softirq mode, if a FIN or CLOSE is received while the modules is in the middle of an unload) Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- net/rds/connection.c | 16 ++++++++++------ net/rds/rds.h | 2 +- net/rds/tcp.c | 2 +- net/rds/tcp_connect.c | 4 ++-- net/rds/tcp_send.c | 2 +- net/rds/threads.c | 2 +- 6 files changed, 16 insertions(+), 12 deletions(-) diff --git a/net/rds/connection.c b/net/rds/connection.c index 50a3789ac23e..005bca68aa94 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -374,13 +374,13 @@ static void rds_conn_path_destroy(struct rds_conn_path *cp) if (!cp->cp_transport_data) return; - rds_conn_path_drop(cp); - flush_work(&cp->cp_down_w); - /* make sure lingering queued work won't try to ref the conn */ cancel_delayed_work_sync(&cp->cp_send_w); cancel_delayed_work_sync(&cp->cp_recv_w); + rds_conn_path_drop(cp, true); + flush_work(&cp->cp_down_w); + /* tear down queued messages */ list_for_each_entry_safe(rm, rtmp, &cp->cp_send_queue, @@ -664,9 +664,13 @@ void rds_conn_exit(void) /* * Force a disconnect */ -void rds_conn_path_drop(struct rds_conn_path *cp) +void rds_conn_path_drop(struct rds_conn_path *cp, bool destroy) { atomic_set(&cp->cp_state, RDS_CONN_ERROR); + + if (!destroy && cp->cp_conn->c_destroy_in_prog) + return; + queue_work(rds_wq, &cp->cp_down_w); } EXPORT_SYMBOL_GPL(rds_conn_path_drop); @@ -674,7 +678,7 @@ EXPORT_SYMBOL_GPL(rds_conn_path_drop); void rds_conn_drop(struct rds_connection *conn) { WARN_ON(conn->c_trans->t_mp_capable); - rds_conn_path_drop(&conn->c_path[0]); + rds_conn_path_drop(&conn->c_path[0], false); } EXPORT_SYMBOL_GPL(rds_conn_drop); @@ -706,5 +710,5 @@ __rds_conn_path_error(struct rds_conn_path *cp, const char *fmt, ...) vprintk(fmt, ap); va_end(ap); - rds_conn_path_drop(cp); + rds_conn_path_drop(cp, false); } diff --git a/net/rds/rds.h b/net/rds/rds.h index 516bcc89b46f..3382695bf46c 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -700,7 +700,7 @@ struct rds_connection *rds_conn_create_outgoing(struct net *net, void rds_conn_shutdown(struct rds_conn_path *cpath); void rds_conn_destroy(struct rds_connection *conn); void rds_conn_drop(struct rds_connection *conn); -void rds_conn_path_drop(struct rds_conn_path *cpath); +void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy); void rds_conn_connect_if_down(struct rds_connection *conn); void rds_conn_path_connect_if_down(struct rds_conn_path *cp); void rds_for_each_conn_info(struct socket *sock, unsigned int len, diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 431404dbdad1..6b7ee71f40c6 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -592,7 +592,7 @@ static void rds_tcp_sysctl_reset(struct net *net) continue; /* reconnect with new parameters */ - rds_conn_path_drop(tc->t_cpath); + rds_conn_path_drop(tc->t_cpath, false); } spin_unlock_irq(&rds_tcp_conn_lock); } diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index cbe08a1fa4c7..46f74dad0e16 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c @@ -69,14 +69,14 @@ void rds_tcp_state_change(struct sock *sk) if (!IS_CANONICAL(cp->cp_conn->c_laddr, cp->cp_conn->c_faddr) && rds_conn_path_transition(cp, RDS_CONN_CONNECTING, RDS_CONN_ERROR)) { - rds_conn_path_drop(cp); + rds_conn_path_drop(cp, false); } else { rds_connect_path_complete(cp, RDS_CONN_CONNECTING); } break; case TCP_CLOSE_WAIT: case TCP_CLOSE: - rds_conn_path_drop(cp); + rds_conn_path_drop(cp, false); default: break; } diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c index 0d8616aa5bad..dc860d1bb608 100644 --- a/net/rds/tcp_send.c +++ b/net/rds/tcp_send.c @@ -157,7 +157,7 @@ out: "returned %d, " "disconnecting and reconnecting\n", &conn->c_faddr, cp->cp_index, ret); - rds_conn_path_drop(cp); + rds_conn_path_drop(cp, false); } } } diff --git a/net/rds/threads.c b/net/rds/threads.c index 2852bc1d37d4..f121daa402c8 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c @@ -78,7 +78,7 @@ void rds_connect_path_complete(struct rds_conn_path *cp, int curr) "current state is %d\n", __func__, atomic_read(&cp->cp_state)); - rds_conn_path_drop(cp); + rds_conn_path_drop(cp, false); return; } -- cgit v1.2.3-55-g7522 From a38905e6aa1a758af003b80f3318196eadb86dfe Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:49 +0800 Subject: sctp: remove the typedef sctp_ipv4addr_param_t This patch is to remove the typedef sctp_ipv4addr_param_t, and replace with struct sctp_ipv4addr_param in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/protocol.c | 2 +- net/sctp/sm_make_chunk.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 99e866487e2f..e42095d7ce57 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -273,10 +273,10 @@ struct sctp_init_chunk { /* Section 3.3.2.1. IPv4 Address Parameter (5) */ -typedef struct sctp_ipv4addr_param { +struct sctp_ipv4addr_param { struct sctp_paramhdr param_hdr; - struct in_addr addr; -} sctp_ipv4addr_param_t; + struct in_addr addr; +}; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ typedef struct sctp_ipv6addr_param { diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 989a900383b5..852556d67ae3 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -292,7 +292,7 @@ static void sctp_v4_from_addr_param(union sctp_addr *addr, static int sctp_v4_to_addr_param(const union sctp_addr *addr, union sctp_addr_param *param) { - int length = sizeof(sctp_ipv4addr_param_t); + int length = sizeof(struct sctp_ipv4addr_param); param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; param->v4.param_hdr.length = htons(length); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 4e16b02ed832..0dc64da74d55 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3153,7 +3153,7 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, case SCTP_PARAM_ERR_CAUSE: break; case SCTP_PARAM_IPV4_ADDRESS: - if (length != sizeof(sctp_ipv4addr_param_t)) + if (length != sizeof(struct sctp_ipv4addr_param)) return false; /* ensure there is only one addr param and it's in the * beginning of addip_hdr params, or we reject it. -- cgit v1.2.3-55-g7522 From 00987cc07e3f0f01699800cd89adf13a908cdee5 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:50 +0800 Subject: sctp: remove the typedef sctp_ipv6addr_param_t This patch is to remove the typedef sctp_ipv6addr_param_t, and replace with struct sctp_ipv6addr_param in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/ipv6.c | 2 +- net/sctp/sm_make_chunk.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index e42095d7ce57..6b45c8a38642 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -279,10 +279,10 @@ struct sctp_ipv4addr_param { }; /* Section 3.3.2.1. IPv6 Address Parameter (6) */ -typedef struct sctp_ipv6addr_param { +struct sctp_ipv6addr_param { struct sctp_paramhdr param_hdr; struct in6_addr addr; -} sctp_ipv6addr_param_t; +}; /* Section 3.3.2.1 Cookie Preservative (9) */ typedef struct sctp_cookie_preserve_param { diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 2a186b201ad2..107d7c912922 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -497,7 +497,7 @@ static void sctp_v6_from_addr_param(union sctp_addr *addr, static int sctp_v6_to_addr_param(const union sctp_addr *addr, union sctp_addr_param *param) { - int length = sizeof(sctp_ipv6addr_param_t); + int length = sizeof(struct sctp_ipv6addr_param); param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; param->v6.param_hdr.length = htons(length); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 0dc64da74d55..e5be305072b5 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3163,7 +3163,7 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, addr_param_seen = true; break; case SCTP_PARAM_IPV6_ADDRESS: - if (length != sizeof(sctp_ipv6addr_param_t)) + if (length != sizeof(struct sctp_ipv6addr_param)) return false; if (param.v != addip->addip_hdr.params) return false; -- cgit v1.2.3-55-g7522 From 365ddb65e77f6b99d4aba09e0d8a096aada57815 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:51 +0800 Subject: sctp: remove the typedef sctp_cookie_preserve_param_t This patch is to remove the typedef sctp_cookie_preserve_param_t, and replace with struct sctp_cookie_preserve_param in the places where it's using this typedef. It is also to fix some indents in sctp_sf_do_5_2_6_stale(). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/sm_statefuns.c | 11 +++++------ 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 6b45c8a38642..d8f9d8f8649b 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -285,10 +285,10 @@ struct sctp_ipv6addr_param { }; /* Section 3.3.2.1 Cookie Preservative (9) */ -typedef struct sctp_cookie_preserve_param { +struct sctp_cookie_preserve_param { struct sctp_paramhdr param_hdr; - __be32 lifespan_increment; -} sctp_cookie_preserve_param_t; + __be32 lifespan_increment; +}; /* Section 3.3.2.1 Host Name Address (11) */ typedef struct sctp_hostname_param { diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index b2a74c3823ee..ae4c48c4f657 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2336,13 +2336,12 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - struct sctp_chunk *chunk = arg; - u32 stale; - sctp_cookie_preserve_param_t bht; - sctp_errhdr_t *err; - struct sctp_chunk *reply; - struct sctp_bind_addr *bp; int attempts = asoc->init_err_counter + 1; + struct sctp_chunk *chunk = arg, *reply; + struct sctp_cookie_preserve_param bht; + struct sctp_bind_addr *bp; + sctp_errhdr_t *err; + u32 stale; if (attempts > asoc->max_init_attempts) { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, -- cgit v1.2.3-55-g7522 From df9af0063f154c1a4f22a5570749d185d080cf56 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:52 +0800 Subject: sctp: remove the typedef sctp_hostname_param_t Remove this typedef, there is even no places using it. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index d8f9d8f8649b..c43e9067d41a 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -291,10 +291,10 @@ struct sctp_cookie_preserve_param { }; /* Section 3.3.2.1 Host Name Address (11) */ -typedef struct sctp_hostname_param { +struct sctp_hostname_param { struct sctp_paramhdr param_hdr; uint8_t hostname[0]; -} sctp_hostname_param_t; +}; /* Section 3.3.2.1 Supported Address Types (12) */ typedef struct sctp_supported_addrs_param { -- cgit v1.2.3-55-g7522 From e925d506f1a21f7fd24a8fdd3e73e0810c655de4 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:53 +0800 Subject: sctp: remove the typedef sctp_supported_addrs_param_t This patch is to remove the typedef sctp_supported_addrs_param_t, and replace with struct sctp_supported_addrs_param in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_make_chunk.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index c43e9067d41a..3ca3ab7302a6 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -297,10 +297,10 @@ struct sctp_hostname_param { }; /* Section 3.3.2.1 Supported Address Types (12) */ -typedef struct sctp_supported_addrs_param { +struct sctp_supported_addrs_param { struct sctp_paramhdr param_hdr; __be16 types[0]; -} sctp_supported_addrs_param_t; +}; /* Appendix A. ECN Capable (32768) */ typedef struct sctp_ecn_capable_param { diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e5be305072b5..fb06d4fd0515 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -223,7 +223,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, struct sctp_chunk *retval = NULL; int num_types, addrs_len = 0; struct sctp_sock *sp; - sctp_supported_addrs_param_t sat; + struct sctp_supported_addrs_param sat; __be16 types[2]; sctp_adaptation_ind_param_t aiparam; sctp_supported_ext_param_t ext_param; -- cgit v1.2.3-55-g7522 From c1dd5df39be5a98c843b9352c22c5569f84bec44 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:54 +0800 Subject: sctp: remove struct sctp_ecn_capable_param Remove it, there is even no places using it. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 3ca3ab7302a6..75524829aa81 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -302,11 +302,6 @@ struct sctp_supported_addrs_param { __be16 types[0]; }; -/* Appendix A. ECN Capable (32768) */ -typedef struct sctp_ecn_capable_param { - struct sctp_paramhdr param_hdr; -} sctp_ecn_capable_param_t; - /* ADDIP Section 3.2.6 Adaptation Layer Indication */ typedef struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; -- cgit v1.2.3-55-g7522 From 85f6bd24ac579ef0926eb4c564ba1f3c8a7f8563 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:55 +0800 Subject: sctp: remove the typedef sctp_adaptation_ind_param_t This patch is to remove the typedef sctp_adaptation_ind_param_t, and replace with struct sctp_adaptation_ind_param in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_make_chunk.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 75524829aa81..72b87874ea76 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -303,10 +303,10 @@ struct sctp_supported_addrs_param { }; /* ADDIP Section 3.2.6 Adaptation Layer Indication */ -typedef struct sctp_adaptation_ind_param { +struct sctp_adaptation_ind_param { struct sctp_paramhdr param_hdr; __be32 adaptation_ind; -} sctp_adaptation_ind_param_t; +}; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ typedef struct sctp_supported_ext_param { diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index fb06d4fd0515..d5f82c2f8c84 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -225,7 +225,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, struct sctp_sock *sp; struct sctp_supported_addrs_param sat; __be16 types[2]; - sctp_adaptation_ind_param_t aiparam; + struct sctp_adaptation_ind_param aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; @@ -393,7 +393,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, sctp_cookie_param_t *cookie; int cookie_len; size_t chunksize; - sctp_adaptation_ind_param_t aiparam; + struct sctp_adaptation_ind_param aiparam; sctp_supported_ext_param_t ext_param; int num_ext = 0; __u8 extensions[3]; -- cgit v1.2.3-55-g7522 From 15328d9feede450d64ff77cac5d25bc734ec8b27 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:56 +0800 Subject: sctp: remove the typedef sctp_supported_ext_param_t This patch is to remove the typedef sctp_supported_ext_param_t, and replace with struct sctp_supported_ext_param in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_make_chunk.c | 22 ++++++++-------------- 2 files changed, 10 insertions(+), 16 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 72b87874ea76..76245685f923 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -309,10 +309,10 @@ struct sctp_adaptation_ind_param { }; /* ADDIP Section 4.2.7 Supported Extensions Parameter */ -typedef struct sctp_supported_ext_param { +struct sctp_supported_ext_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; -} sctp_supported_ext_param_t; +}; /* AUTH Section 3.1 Random */ typedef struct sctp_random_param { diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index d5f82c2f8c84..06dc351b6ba1 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -226,7 +226,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, struct sctp_supported_addrs_param sat; __be16 types[2]; struct sctp_adaptation_ind_param aiparam; - sctp_supported_ext_param_t ext_param; + struct sctp_supported_ext_param ext_param; int num_ext = 0; __u8 extensions[3]; struct sctp_paramhdr *auth_chunks = NULL, @@ -305,8 +305,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, /* If we have any extensions to report, account for that */ if (num_ext) - chunksize += SCTP_PAD4(sizeof(sctp_supported_ext_param_t) + - num_ext); + chunksize += SCTP_PAD4(sizeof(ext_param) + num_ext); /* RFC 2960 3.3.2 Initiation (INIT) (1) * @@ -348,10 +347,8 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, */ if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; - ext_param.param_hdr.length = - htons(sizeof(sctp_supported_ext_param_t) + num_ext); - sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), - &ext_param); + ext_param.param_hdr.length = htons(sizeof(ext_param) + num_ext); + sctp_addto_chunk(retval, sizeof(ext_param), &ext_param); sctp_addto_param(retval, num_ext, extensions); } @@ -394,7 +391,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, int cookie_len; size_t chunksize; struct sctp_adaptation_ind_param aiparam; - sctp_supported_ext_param_t ext_param; + struct sctp_supported_ext_param ext_param; int num_ext = 0; __u8 extensions[3]; struct sctp_paramhdr *auth_chunks = NULL, @@ -468,8 +465,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, } if (num_ext) - chunksize += SCTP_PAD4(sizeof(sctp_supported_ext_param_t) + - num_ext); + chunksize += SCTP_PAD4(sizeof(ext_param) + num_ext); /* Now allocate and fill out the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize, gfp); @@ -495,10 +491,8 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); if (num_ext) { ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; - ext_param.param_hdr.length = - htons(sizeof(sctp_supported_ext_param_t) + num_ext); - sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), - &ext_param); + ext_param.param_hdr.length = htons(sizeof(ext_param) + num_ext); + sctp_addto_chunk(retval, sizeof(ext_param), &ext_param); sctp_addto_param(retval, num_ext, extensions); } if (asoc->peer.prsctp_capable) -- cgit v1.2.3-55-g7522 From b02db702face3791889a4fcf06691c086648ee89 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:57 +0800 Subject: sctp: remove the typedef sctp_random_param_t This patch is to remove the typedef sctp_random_param_t, and replace with struct sctp_random_param in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- include/net/sctp/structs.h | 2 +- net/sctp/auth.c | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 76245685f923..9b1aa3907c9e 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -315,10 +315,10 @@ struct sctp_supported_ext_param { }; /* AUTH Section 3.1 Random */ -typedef struct sctp_random_param { +struct sctp_random_param { struct sctp_paramhdr param_hdr; __u8 random_val[0]; -} sctp_random_param_t; +}; /* AUTH Section 3.2 Chunk List */ typedef struct sctp_chunks_param { diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 5ab29af8ca8a..f22c079fd1f1 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1556,7 +1556,7 @@ struct sctp_association { * and authenticated chunk list. All that is part of the * cookie and these are just pointers to those locations */ - sctp_random_param_t *peer_random; + struct sctp_random_param *peer_random; sctp_chunks_param_t *peer_chunks; sctp_hmac_algo_param_t *peer_hmacs; } peer; diff --git a/net/sctp/auth.c b/net/sctp/auth.c index e001b01b0e68..0d9c63eba978 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -185,7 +185,7 @@ static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1, * are called the two key vectors. */ static struct sctp_auth_bytes *sctp_auth_make_key_vector( - sctp_random_param_t *random, + struct sctp_random_param *random, sctp_chunks_param_t *chunks, sctp_hmac_algo_param_t *hmacs, gfp_t gfp) @@ -226,10 +226,9 @@ static struct sctp_auth_bytes *sctp_auth_make_local_vector( gfp_t gfp) { return sctp_auth_make_key_vector( - (sctp_random_param_t *)asoc->c.auth_random, - (sctp_chunks_param_t *)asoc->c.auth_chunks, - (sctp_hmac_algo_param_t *)asoc->c.auth_hmacs, - gfp); + (struct sctp_random_param *)asoc->c.auth_random, + (sctp_chunks_param_t *)asoc->c.auth_chunks, + (sctp_hmac_algo_param_t *)asoc->c.auth_hmacs, gfp); } /* Make a key vector based on peer's parameters */ -- cgit v1.2.3-55-g7522 From a762a9d94d44980e3690f9de87b918376daa6428 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:58 +0800 Subject: sctp: remove the typedef sctp_chunks_param_t This patch is to remove the typedef sctp_chunks_param_t, and replace with struct sctp_chunks_param in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- include/net/sctp/structs.h | 2 +- net/sctp/auth.c | 4 ++-- net/sctp/endpointola.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 9b1aa3907c9e..b52def9bcfa1 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -321,10 +321,10 @@ struct sctp_random_param { }; /* AUTH Section 3.2 Chunk List */ -typedef struct sctp_chunks_param { +struct sctp_chunks_param { struct sctp_paramhdr param_hdr; __u8 chunks[0]; -} sctp_chunks_param_t; +}; /* AUTH Section 3.3 HMAC Algorithm */ typedef struct sctp_hmac_algo_param { diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index f22c079fd1f1..8042e6380b0e 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1557,7 +1557,7 @@ struct sctp_association { * cookie and these are just pointers to those locations */ struct sctp_random_param *peer_random; - sctp_chunks_param_t *peer_chunks; + struct sctp_chunks_param *peer_chunks; sctp_hmac_algo_param_t *peer_hmacs; } peer; diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 0d9c63eba978..367994d9712a 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -186,7 +186,7 @@ static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1, */ static struct sctp_auth_bytes *sctp_auth_make_key_vector( struct sctp_random_param *random, - sctp_chunks_param_t *chunks, + struct sctp_chunks_param *chunks, sctp_hmac_algo_param_t *hmacs, gfp_t gfp) { @@ -227,7 +227,7 @@ static struct sctp_auth_bytes *sctp_auth_make_local_vector( { return sctp_auth_make_key_vector( (struct sctp_random_param *)asoc->c.auth_random, - (sctp_chunks_param_t *)asoc->c.auth_chunks, + (struct sctp_chunks_param *)asoc->c.auth_chunks, (sctp_hmac_algo_param_t *)asoc->c.auth_hmacs, gfp); } diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 0e86f988f836..35bf5af124fc 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -78,8 +78,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, if (!auth_hmacs) goto nomem; - auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) + - SCTP_NUM_CHUNK_TYPES, gfp); + auth_chunks = kzalloc(sizeof(*auth_chunks) + + SCTP_NUM_CHUNK_TYPES, gfp); if (!auth_chunks) goto nomem; -- cgit v1.2.3-55-g7522 From 1474774a7f0daf9878fd9537a24714f419e744ed Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 17 Jul 2017 11:29:59 +0800 Subject: sctp: remove the typedef sctp_hmac_algo_param_t This patch is to remove the typedef sctp_hmac_algo_param_t, and replace with struct sctp_hmac_algo_param in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- include/net/sctp/structs.h | 2 +- net/sctp/auth.c | 4 ++-- net/sctp/endpointola.c | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index b52def9bcfa1..913474dfc96c 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -327,10 +327,10 @@ struct sctp_chunks_param { }; /* AUTH Section 3.3 HMAC Algorithm */ -typedef struct sctp_hmac_algo_param { +struct sctp_hmac_algo_param { struct sctp_paramhdr param_hdr; __be16 hmac_ids[0]; -} sctp_hmac_algo_param_t; +}; /* RFC 2960. Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2): * The INIT ACK chunk is used to acknowledge the initiation of an SCTP diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 8042e6380b0e..66cd7639b912 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1558,7 +1558,7 @@ struct sctp_association { */ struct sctp_random_param *peer_random; struct sctp_chunks_param *peer_chunks; - sctp_hmac_algo_param_t *peer_hmacs; + struct sctp_hmac_algo_param *peer_hmacs; } peer; /* State : A state variable indicating what state the diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 367994d9712a..00667c50efa7 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -187,7 +187,7 @@ static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1, static struct sctp_auth_bytes *sctp_auth_make_key_vector( struct sctp_random_param *random, struct sctp_chunks_param *chunks, - sctp_hmac_algo_param_t *hmacs, + struct sctp_hmac_algo_param *hmacs, gfp_t gfp) { struct sctp_auth_bytes *new; @@ -228,7 +228,7 @@ static struct sctp_auth_bytes *sctp_auth_make_local_vector( return sctp_auth_make_key_vector( (struct sctp_random_param *)asoc->c.auth_random, (struct sctp_chunks_param *)asoc->c.auth_chunks, - (sctp_hmac_algo_param_t *)asoc->c.auth_hmacs, gfp); + (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs, gfp); } /* Make a key vector based on peer's parameters */ diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 35bf5af124fc..3d506b2f6193 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -73,8 +73,8 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, * variables. There are arrays that we encode directly * into parameters to make the rest of the operations easier. */ - auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) + - sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); + auth_hmacs = kzalloc(sizeof(*auth_hmacs) + + sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); if (!auth_hmacs) goto nomem; -- cgit v1.2.3-55-g7522 From ee06b1728b95643668e40fc58ae118aeb7c1753e Mon Sep 17 00:00:00 2001 From: Alvaro G. M Date: Mon, 17 Jul 2017 09:12:28 +0200 Subject: net: axienet: add support for standard phy-mode binding Keep supporting proprietary "xlnx,phy-type" attribute and add support for MII connectivity to the PHY. Reviewed-by: Andrew Lunn Signed-off-by: Alvaro Gamez Machado Signed-off-by: David S. Miller --- .../devicetree/bindings/net/xilinx_axienet.txt | 55 ++++++++++++++++++++++ drivers/net/ethernet/xilinx/xilinx_axienet.h | 4 +- drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 48 ++++++++++++++----- 3 files changed, 93 insertions(+), 14 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/xilinx_axienet.txt diff --git a/Documentation/devicetree/bindings/net/xilinx_axienet.txt b/Documentation/devicetree/bindings/net/xilinx_axienet.txt new file mode 100644 index 000000000000..38f9ec076743 --- /dev/null +++ b/Documentation/devicetree/bindings/net/xilinx_axienet.txt @@ -0,0 +1,55 @@ +XILINX AXI ETHERNET Device Tree Bindings +-------------------------------------------------------- + +Also called AXI 1G/2.5G Ethernet Subsystem, the xilinx axi ethernet IP core +provides connectivity to an external ethernet PHY supporting different +interfaces: MII, GMII, RGMII, SGMII, 1000BaseX. It also includes two +segments of memory for buffering TX and RX, as well as the capability of +offloading TX/RX checksum calculation off the processor. + +Management configuration is done through the AXI interface, while payload is +sent and received through means of an AXI DMA controller. This driver +includes the DMA driver code, so this driver is incompatible with AXI DMA +driver. + +For more details about mdio please refer phy.txt file in the same directory. + +Required properties: +- compatible : Must be one of "xlnx,axi-ethernet-1.00.a", + "xlnx,axi-ethernet-1.01.a", "xlnx,axi-ethernet-2.01.a" +- reg : Address and length of the IO space. +- interrupts : Should be a list of two interrupt, TX and RX. +- phy-handle : Should point to the external phy device. + See ethernet.txt file in the same directory. +- xlnx,rxmem : Set to allocated memory buffer for Rx/Tx in the hardware + +Optional properties: +- phy-mode : See ethernet.txt +- xlnx,phy-type : Deprecated, do not use, but still accepted in preference + to phy-mode. +- xlnx,txcsum : 0 or empty for disabling TX checksum offload, + 1 to enable partial TX checksum offload, + 2 to enable full TX checksum offload +- xlnx,rxcsum : Same values as xlnx,txcsum but for RX checksum offload + +Example: + axi_ethernet_eth: ethernet@40c00000 { + compatible = "xlnx,axi-ethernet-1.00.a"; + device_type = "network"; + interrupt-parent = <µblaze_0_axi_intc>; + interrupts = <2 0>; + phy-mode = "mii"; + reg = <0x40c00000 0x40000>; + xlnx,rxcsum = <0x2>; + xlnx,rxmem = <0x800>; + xlnx,txcsum = <0x2>; + phy-handle = <&phy0>; + axi_ethernetlite_0_mdio: mdio { + #address-cells = <1>; + #size-cells = <0>; + phy0: phy@0 { + device_type = "ethernet-phy"; + reg = <1>; + }; + }; + }; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index af27f7d1cbf3..5ef626331f85 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -389,7 +389,7 @@ struct axidma_bd { * @dma_err_tasklet: Tasklet structure to process Axi DMA errors * @tx_irq: Axidma TX IRQ number * @rx_irq: Axidma RX IRQ number - * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X + * @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X * @options: AxiEthernet option word * @last_link: Phy link state in which the PHY was negotiated earlier * @features: Stores the extended features supported by the axienet hw @@ -432,7 +432,7 @@ struct axienet_local { int tx_irq; int rx_irq; - u32 phy_type; + phy_interface_t phy_mode; u32 options; /* Current options word */ u32 last_link; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 33c595f4691d..e74e1e897864 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -531,11 +531,11 @@ static void axienet_adjust_link(struct net_device *ndev) link_state = phy->speed | (phy->duplex << 1) | phy->link; if (lp->last_link != link_state) { if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { - if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) + if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) setspeed = 0; } else { if ((phy->speed == SPEED_1000) && - (lp->phy_type == XAE_PHY_TYPE_MII)) + (lp->phy_mode == PHY_INTERFACE_MODE_MII)) setspeed = 0; } @@ -935,15 +935,8 @@ static int axienet_open(struct net_device *ndev) return ret; if (lp->phy_node) { - if (lp->phy_type == XAE_PHY_TYPE_GMII) { - phydev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_GMII); - } else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) { - phydev = of_phy_connect(lp->ndev, lp->phy_node, - axienet_adjust_link, 0, - PHY_INTERFACE_MODE_RGMII_ID); - } + phydev = of_phy_connect(lp->ndev, lp->phy_node, + axienet_adjust_link, 0, lp->phy_mode); if (!phydev) dev_err(lp->dev, "of_phy_connect() failed\n"); @@ -1539,7 +1532,38 @@ static int axienet_probe(struct platform_device *pdev) * the device-tree and accordingly set flags. */ of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); - of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &lp->phy_type); + + /* Start with the proprietary, and broken phy_type */ + ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); + if (!ret) { + netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); + switch (value) { + case XAE_PHY_TYPE_MII: + lp->phy_mode = PHY_INTERFACE_MODE_MII; + break; + case XAE_PHY_TYPE_GMII: + lp->phy_mode = PHY_INTERFACE_MODE_GMII; + break; + case XAE_PHY_TYPE_RGMII_2_0: + lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + break; + case XAE_PHY_TYPE_SGMII: + lp->phy_mode = PHY_INTERFACE_MODE_SGMII; + break; + case XAE_PHY_TYPE_1000BASE_X: + lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; + break; + default: + ret = -EINVAL; + goto free_netdev; + } + } else { + lp->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (lp->phy_mode < 0) { + ret = -EINVAL; + goto free_netdev; + } + } /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); -- cgit v1.2.3-55-g7522 From b3a703c7a698bcc0938fe22ba74d32593ad6e662 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Mon, 17 Jul 2017 09:33:52 +0100 Subject: dt-bindings: net: ravb : Add support for r8a7743 SoC Add a new compatible string for the RZ/G1M (R8A7743) SoC. Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../devicetree/bindings/net/renesas,ravb.txt | 29 +++++++++++++--------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index b519503be51a..4717bc24eada 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt @@ -4,19 +4,24 @@ This file provides information on what the device node for the Ethernet AVB interface contains. Required properties: -- compatible: "renesas,etheravb-r8a7790" if the device is a part of R8A7790 SoC. - "renesas,etheravb-r8a7791" if the device is a part of R8A7791 SoC. - "renesas,etheravb-r8a7792" if the device is a part of R8A7792 SoC. - "renesas,etheravb-r8a7793" if the device is a part of R8A7793 SoC. - "renesas,etheravb-r8a7794" if the device is a part of R8A7794 SoC. - "renesas,etheravb-r8a7795" if the device is a part of R8A7795 SoC. - "renesas,etheravb-r8a7796" if the device is a part of R8A7796 SoC. - "renesas,etheravb-rcar-gen2" for generic R-Car Gen 2 compatible interface. - "renesas,etheravb-rcar-gen3" for generic R-Car Gen 3 compatible interface. +- compatible: Must contain one or more of the following: + - "renesas,etheravb-r8a7743" for the R8A7743 SoC. + - "renesas,etheravb-r8a7790" for the R8A7790 SoC. + - "renesas,etheravb-r8a7791" for the R8A7791 SoC. + - "renesas,etheravb-r8a7792" for the R8A7792 SoC. + - "renesas,etheravb-r8a7793" for the R8A7793 SoC. + - "renesas,etheravb-r8a7794" for the R8A7794 SoC. + - "renesas,etheravb-rcar-gen2" as a fallback for the above + R-Car Gen2 and RZ/G1 devices. - When compatible with the generic version, nodes must list the - SoC-specific version corresponding to the platform first - followed by the generic version. + - "renesas,etheravb-r8a7795" for the R8A7795 SoC. + - "renesas,etheravb-r8a7796" for the R8A7796 SoC. + - "renesas,etheravb-rcar-gen3" as a fallback for the above + R-Car Gen3 devices. + + When compatible with the generic version, nodes must list the + SoC-specific version corresponding to the platform first followed by + the generic version. - reg: offset and length of (1) the register block and (2) the stream buffer. - interrupts: A list of interrupt-specifiers, one for each entry in -- cgit v1.2.3-55-g7522 From 3ccc6c6faaa93da70989177b91c7c3ef0df10937 Mon Sep 17 00:00:00 2001 From: linzhang Date: Mon, 17 Jul 2017 17:25:02 +0800 Subject: skbuff: optimize the pull_pages code in __pskb_pull_tail() In the pull_pages code block, if the first frag size > eat, we can end the loop in advance to avoid extra copy. Signed-off-by: Lin Zhang Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/skbuff.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8b11341ed69a..b352c6bcfb31 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1719,6 +1719,8 @@ pull_pages: if (eat) { skb_shinfo(skb)->frags[k].page_offset += eat; skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); + if (!i) + goto end; eat = 0; } k++; @@ -1726,6 +1728,7 @@ pull_pages: } skb_shinfo(skb)->nr_frags = k; +end: skb->tail += delta; skb->data_len -= delta; -- cgit v1.2.3-55-g7522 From 27eac47b00789522ba00501b0838026e1ecb6f05 Mon Sep 17 00:00:00 2001 From: David Herrmann Date: Mon, 17 Jul 2017 11:35:54 +0200 Subject: net/unix: drop obsolete fd-recursion limits All unix sockets now account inflight FDs to the respective sender. This was introduced in: commit 712f4aad406bb1ed67f3f98d04c044191f0ff593 Author: willy tarreau Date: Sun Jan 10 07:54:56 2016 +0100 unix: properly account for FDs passed over unix sockets and further refined in: commit 415e3d3e90ce9e18727e8843ae343eda5a58fad6 Author: Hannes Frederic Sowa Date: Wed Feb 3 02:11:03 2016 +0100 unix: correctly track in-flight fds in sending process user_struct Hence, regardless of the stacking depth of FDs, the total number of inflight FDs is limited, and accounted. There is no known way for a local user to exceed those limits or exploit the accounting. Furthermore, the GC logic is independent of the recursion/stacking depth as well. It solely depends on the total number of inflight FDs, regardless of their layout. Lastly, the current `recursion_level' suffers a TOCTOU race, since it checks and inherits depths only at queue time. If we consider `A<-B' to mean `queue-B-on-A', the following sequence circumvents the recursion level easily: A<-B B<-C C<-D ... Y<-Z resulting in: A<-B<-C<-...<-Z With all of this in mind, lets drop the recursion limit. It has no additional security value, anymore. On the contrary, it randomly confuses message brokers that try to forward file-descriptors, since any sendmsg(2) call can fail spuriously with ETOOMANYREFS if a client maliciously modifies the FD while inflight. Cc: Alban Crequy Cc: Simon McVittie Signed-off-by: David Herrmann Reviewed-by: Tom Gundersen Signed-off-by: David S. Miller --- include/net/af_unix.h | 1 - net/unix/af_unix.c | 24 +----------------------- 2 files changed, 1 insertion(+), 24 deletions(-) diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 678e4d6fa317..3b3194b2fc65 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -58,7 +58,6 @@ struct unix_sock { struct list_head link; atomic_long_t inflight; spinlock_t lock; - unsigned char recursion_level; unsigned long gc_flags; #define UNIX_GC_CANDIDATE 0 #define UNIX_GC_MAYBE_CYCLE 1 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 7b52a380d710..5c53f22d62e8 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1528,26 +1528,13 @@ static inline bool too_many_unix_fds(struct task_struct *p) return false; } -#define MAX_RECURSION_LEVEL 4 - static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; - unsigned char max_level = 0; if (too_many_unix_fds(current)) return -ETOOMANYREFS; - for (i = scm->fp->count - 1; i >= 0; i--) { - struct sock *sk = unix_get_socket(scm->fp->fp[i]); - - if (sk) - max_level = max(max_level, - unix_sk(sk)->recursion_level); - } - if (unlikely(max_level > MAX_RECURSION_LEVEL)) - return -ETOOMANYREFS; - /* * Need to duplicate file references for the sake of garbage * collection. Otherwise a socket in the fps might become a @@ -1559,7 +1546,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) for (i = scm->fp->count - 1; i >= 0; i--) unix_inflight(scm->fp->user, scm->fp->fp[i]); - return max_level; + return 0; } static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) @@ -1649,7 +1636,6 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, struct sk_buff *skb; long timeo; struct scm_cookie scm; - int max_level; int data_len = 0; int sk_locked; @@ -1701,7 +1687,6 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, err = unix_scm_to_skb(&scm, skb, true); if (err < 0) goto out_free; - max_level = err + 1; skb_put(skb, len - data_len); skb->data_len = data_len; @@ -1819,8 +1804,6 @@ restart_locked: __net_timestamp(skb); maybe_add_creds(skb, sock, other); skb_queue_tail(&other->sk_receive_queue, skb); - if (max_level > unix_sk(other)->recursion_level) - unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other); sock_put(other); @@ -1855,7 +1838,6 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, int sent = 0; struct scm_cookie scm; bool fds_sent = false; - int max_level; int data_len; wait_for_unix_gc(); @@ -1905,7 +1887,6 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, kfree_skb(skb); goto out_err; } - max_level = err + 1; fds_sent = true; skb_put(skb, size - data_len); @@ -1925,8 +1906,6 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, maybe_add_creds(skb, sock, other); skb_queue_tail(&other->sk_receive_queue, skb); - if (max_level > unix_sk(other)->recursion_level) - unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other); sent += size; @@ -2324,7 +2303,6 @@ redo: last_len = last ? last->len : 0; again: if (skb == NULL) { - unix_sk(sk)->recursion_level = 0; if (copied >= target) goto unlock; -- cgit v1.2.3-55-g7522 From b145425f269a17ed344d737f746b844dfac60c82 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 17 Jul 2017 02:56:10 -0700 Subject: inetpeer: remove AVL implementation in favor of RB tree As discussed in Faro during Netfilter Workshop 2017, RB trees can be used with RCU, using a seqlock. Note that net/rxrpc/conn_service.c is already using this. This patch converts inetpeer from AVL tree to RB tree, since it allows to remove private AVL implementation in favor of shared RB code. $ size net/ipv4/inetpeer.before net/ipv4/inetpeer.after text data bss dec hex filename 3195 40 128 3363 d23 net/ipv4/inetpeer.before 1562 24 0 1586 632 net/ipv4/inetpeer.after The same technique can be used to speed up net/netfilter/nft_set_rbtree.c (removing rwlock contention in fast path) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/inetpeer.h | 11 +- net/ipv4/inetpeer.c | 428 +++++++++++-------------------------------------- 2 files changed, 92 insertions(+), 347 deletions(-) diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index f2a215fc78e4..950ed182f62f 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -33,18 +33,12 @@ struct inetpeer_addr { }; struct inet_peer { - /* group together avl_left,avl_right,v4daddr to speedup lookups */ - struct inet_peer __rcu *avl_left, *avl_right; + struct rb_node rb_node; struct inetpeer_addr daddr; - __u32 avl_height; u32 metrics[RTAX_MAX]; u32 rate_tokens; /* rate limiting for ICMP */ unsigned long rate_last; - union { - struct list_head gc_list; - struct rcu_head gc_rcu; - }; /* * Once inet_peer is queued for deletion (refcnt == 0), following field * is not available: rid @@ -55,7 +49,6 @@ struct inet_peer { atomic_t rid; /* Frag reception counter */ }; struct rcu_head rcu; - struct inet_peer *gc_next; }; /* following fields might be frequently dirtied */ @@ -64,7 +57,7 @@ struct inet_peer { }; struct inet_peer_base { - struct inet_peer __rcu *root; + struct rb_root rb_root; seqlock_t lock; int total; }; diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index c5a117cc6619..337ad41bb80a 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -33,7 +33,7 @@ * also be removed if the pool is overloaded i.e. if the total amount of * entries is greater-or-equal than the threshold. * - * Node pool is organised as an AVL tree. + * Node pool is organised as an RB tree. * Such an implementation has been chosen not just for fun. It's a way to * prevent easy and efficient DoS attacks by creating hash collisions. A huge * amount of long living nodes in a single hash slot would significantly delay @@ -45,7 +45,7 @@ * AND reference count being 0. * 3. Global variable peer_total is modified under the pool lock. * 4. struct inet_peer fields modification: - * avl_left, avl_right, avl_parent, avl_height: pool lock + * rb_node: pool lock * refcnt: atomically against modifications on other CPU; * usually under some other lock to prevent node disappearing * daddr: unchangeable @@ -53,30 +53,15 @@ static struct kmem_cache *peer_cachep __read_mostly; -static LIST_HEAD(gc_list); -static const int gc_delay = 60 * HZ; -static struct delayed_work gc_work; -static DEFINE_SPINLOCK(gc_lock); - -#define node_height(x) x->avl_height - -#define peer_avl_empty ((struct inet_peer *)&peer_fake_node) -#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node) -static const struct inet_peer peer_fake_node = { - .avl_left = peer_avl_empty_rcu, - .avl_right = peer_avl_empty_rcu, - .avl_height = 0 -}; - void inet_peer_base_init(struct inet_peer_base *bp) { - bp->root = peer_avl_empty_rcu; + bp->rb_root = RB_ROOT; seqlock_init(&bp->lock); bp->total = 0; } EXPORT_SYMBOL_GPL(inet_peer_base_init); -#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ +#define PEER_MAX_GC 32 /* Exported for sysctl_net_ipv4. */ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more @@ -84,53 +69,6 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ -static void inetpeer_gc_worker(struct work_struct *work) -{ - struct inet_peer *p, *n, *c; - struct list_head list; - - spin_lock_bh(&gc_lock); - list_replace_init(&gc_list, &list); - spin_unlock_bh(&gc_lock); - - if (list_empty(&list)) - return; - - list_for_each_entry_safe(p, n, &list, gc_list) { - - if (need_resched()) - cond_resched(); - - c = rcu_dereference_protected(p->avl_left, 1); - if (c != peer_avl_empty) { - list_add_tail(&c->gc_list, &list); - p->avl_left = peer_avl_empty_rcu; - } - - c = rcu_dereference_protected(p->avl_right, 1); - if (c != peer_avl_empty) { - list_add_tail(&c->gc_list, &list); - p->avl_right = peer_avl_empty_rcu; - } - - n = list_entry(p->gc_list.next, struct inet_peer, gc_list); - - if (refcount_read(&p->refcnt) == 1) { - list_del(&p->gc_list); - kmem_cache_free(peer_cachep, p); - } - } - - if (list_empty(&list)) - return; - - spin_lock_bh(&gc_lock); - list_splice(&list, &gc_list); - spin_unlock_bh(&gc_lock); - - schedule_delayed_work(&gc_work, gc_delay); -} - /* Called from ip_output.c:ip_init */ void __init inet_initpeers(void) { @@ -153,225 +91,62 @@ void __init inet_initpeers(void) sizeof(struct inet_peer), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); - - INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker); } -#define rcu_deref_locked(X, BASE) \ - rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock)) - -/* - * Called with local BH disabled and the pool lock held. - */ -#define lookup(_daddr, _stack, _base) \ -({ \ - struct inet_peer *u; \ - struct inet_peer __rcu **v; \ - \ - stackptr = _stack; \ - *stackptr++ = &_base->root; \ - for (u = rcu_deref_locked(_base->root, _base); \ - u != peer_avl_empty;) { \ - int cmp = inetpeer_addr_cmp(_daddr, &u->daddr); \ - if (cmp == 0) \ - break; \ - if (cmp == -1) \ - v = &u->avl_left; \ - else \ - v = &u->avl_right; \ - *stackptr++ = v; \ - u = rcu_deref_locked(*v, _base); \ - } \ - u; \ -}) - -/* - * Called with rcu_read_lock() - * Because we hold no lock against a writer, its quite possible we fall - * in an endless loop. - * But every pointer we follow is guaranteed to be valid thanks to RCU. - * We exit from this function if number of links exceeds PEER_MAXDEPTH - */ -static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, - struct inet_peer_base *base) +/* Called with rcu_read_lock() or base->lock held */ +static struct inet_peer *lookup(const struct inetpeer_addr *daddr, + struct inet_peer_base *base, + unsigned int seq, + struct inet_peer *gc_stack[], + unsigned int *gc_cnt, + struct rb_node **parent_p, + struct rb_node ***pp_p) { - struct inet_peer *u = rcu_dereference(base->root); - int count = 0; + struct rb_node **pp, *parent; + struct inet_peer *p; + + pp = &base->rb_root.rb_node; + parent = NULL; + while (*pp) { + int cmp; - while (u != peer_avl_empty) { - int cmp = inetpeer_addr_cmp(daddr, &u->daddr); + parent = rcu_dereference_raw(*pp); + p = rb_entry(parent, struct inet_peer, rb_node); + cmp = inetpeer_addr_cmp(daddr, &p->daddr); if (cmp == 0) { - /* Before taking a reference, check if this entry was - * deleted (refcnt=0) - */ - if (!refcount_inc_not_zero(&u->refcnt)) { - u = NULL; - } - return u; + if (!refcount_inc_not_zero(&p->refcnt)) + break; + return p; + } + if (gc_stack) { + if (*gc_cnt < PEER_MAX_GC) + gc_stack[(*gc_cnt)++] = p; + } else if (unlikely(read_seqretry(&base->lock, seq))) { + break; } if (cmp == -1) - u = rcu_dereference(u->avl_left); + pp = &(*pp)->rb_left; else - u = rcu_dereference(u->avl_right); - if (unlikely(++count == PEER_MAXDEPTH)) - break; + pp = &(*pp)->rb_right; } + *parent_p = parent; + *pp_p = pp; return NULL; } -/* Called with local BH disabled and the pool lock held. */ -#define lookup_rightempty(start, base) \ -({ \ - struct inet_peer *u; \ - struct inet_peer __rcu **v; \ - *stackptr++ = &start->avl_left; \ - v = &start->avl_left; \ - for (u = rcu_deref_locked(*v, base); \ - u->avl_right != peer_avl_empty_rcu;) { \ - v = &u->avl_right; \ - *stackptr++ = v; \ - u = rcu_deref_locked(*v, base); \ - } \ - u; \ -}) - -/* Called with local BH disabled and the pool lock held. - * Variable names are the proof of operation correctness. - * Look into mm/map_avl.c for more detail description of the ideas. - */ -static void peer_avl_rebalance(struct inet_peer __rcu **stack[], - struct inet_peer __rcu ***stackend, - struct inet_peer_base *base) -{ - struct inet_peer __rcu **nodep; - struct inet_peer *node, *l, *r; - int lh, rh; - - while (stackend > stack) { - nodep = *--stackend; - node = rcu_deref_locked(*nodep, base); - l = rcu_deref_locked(node->avl_left, base); - r = rcu_deref_locked(node->avl_right, base); - lh = node_height(l); - rh = node_height(r); - if (lh > rh + 1) { /* l: RH+2 */ - struct inet_peer *ll, *lr, *lrl, *lrr; - int lrh; - ll = rcu_deref_locked(l->avl_left, base); - lr = rcu_deref_locked(l->avl_right, base); - lrh = node_height(lr); - if (lrh <= node_height(ll)) { /* ll: RH+1 */ - RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ - RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ - node->avl_height = lrh + 1; /* RH+1 or RH+2 */ - RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */ - RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */ - l->avl_height = node->avl_height + 1; - RCU_INIT_POINTER(*nodep, l); - } else { /* ll: RH, lr: RH+1 */ - lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */ - lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */ - RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ - RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ - node->avl_height = rh + 1; /* node: RH+1 */ - RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */ - RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */ - l->avl_height = rh + 1; /* l: RH+1 */ - RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */ - RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */ - lr->avl_height = rh + 2; - RCU_INIT_POINTER(*nodep, lr); - } - } else if (rh > lh + 1) { /* r: LH+2 */ - struct inet_peer *rr, *rl, *rlr, *rll; - int rlh; - rr = rcu_deref_locked(r->avl_right, base); - rl = rcu_deref_locked(r->avl_left, base); - rlh = node_height(rl); - if (rlh <= node_height(rr)) { /* rr: LH+1 */ - RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ - RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ - node->avl_height = rlh + 1; /* LH+1 or LH+2 */ - RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */ - RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */ - r->avl_height = node->avl_height + 1; - RCU_INIT_POINTER(*nodep, r); - } else { /* rr: RH, rl: RH+1 */ - rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */ - rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */ - RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ - RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ - node->avl_height = lh + 1; /* node: LH+1 */ - RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */ - RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */ - r->avl_height = lh + 1; /* r: LH+1 */ - RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ - RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ - rl->avl_height = lh + 2; - RCU_INIT_POINTER(*nodep, rl); - } - } else { - node->avl_height = (lh > rh ? lh : rh) + 1; - } - } -} - -/* Called with local BH disabled and the pool lock held. */ -#define link_to_pool(n, base) \ -do { \ - n->avl_height = 1; \ - n->avl_left = peer_avl_empty_rcu; \ - n->avl_right = peer_avl_empty_rcu; \ - /* lockless readers can catch us now */ \ - rcu_assign_pointer(**--stackptr, n); \ - peer_avl_rebalance(stack, stackptr, base); \ -} while (0) - static void inetpeer_free_rcu(struct rcu_head *head) { kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); } -static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, - struct inet_peer __rcu **stack[PEER_MAXDEPTH]) -{ - struct inet_peer __rcu ***stackptr, ***delp; - - if (lookup(&p->daddr, stack, base) != p) - BUG(); - delp = stackptr - 1; /* *delp[0] == p */ - if (p->avl_left == peer_avl_empty_rcu) { - *delp[0] = p->avl_right; - --stackptr; - } else { - /* look for a node to insert instead of p */ - struct inet_peer *t; - t = lookup_rightempty(p, base); - BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t); - **--stackptr = t->avl_left; - /* t is removed, t->daddr > x->daddr for any - * x in p->avl_left subtree. - * Put t in the old place of p. */ - RCU_INIT_POINTER(*delp[0], t); - t->avl_left = p->avl_left; - t->avl_right = p->avl_right; - t->avl_height = p->avl_height; - BUG_ON(delp[1] != &p->avl_left); - delp[1] = &t->avl_left; /* was &p->avl_left */ - } - peer_avl_rebalance(stack, stackptr, base); - base->total--; - call_rcu(&p->rcu, inetpeer_free_rcu); -} - /* perform garbage collect on all items stacked during a lookup */ -static int inet_peer_gc(struct inet_peer_base *base, - struct inet_peer __rcu **stack[PEER_MAXDEPTH], - struct inet_peer __rcu ***stackptr) +static void inet_peer_gc(struct inet_peer_base *base, + struct inet_peer *gc_stack[], + unsigned int gc_cnt) { - struct inet_peer *p, *gchead = NULL; + struct inet_peer *p; __u32 delta, ttl; - int cnt = 0; + int i; if (base->total >= inet_peer_threshold) ttl = 0; /* be aggressive */ @@ -379,43 +154,38 @@ static int inet_peer_gc(struct inet_peer_base *base, ttl = inet_peer_maxttl - (inet_peer_maxttl - inet_peer_minttl) / HZ * base->total / inet_peer_threshold * HZ; - stackptr--; /* last stack slot is peer_avl_empty */ - while (stackptr > stack) { - stackptr--; - p = rcu_deref_locked(**stackptr, base); - if (refcount_read(&p->refcnt) == 1) { - smp_rmb(); - delta = (__u32)jiffies - p->dtime; - if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) { - p->gc_next = gchead; - gchead = p; - } - } + for (i = 0; i < gc_cnt; i++) { + p = gc_stack[i]; + delta = (__u32)jiffies - p->dtime; + if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) + gc_stack[i] = NULL; } - while ((p = gchead) != NULL) { - gchead = p->gc_next; - cnt++; - unlink_from_pool(p, base, stack); + for (i = 0; i < gc_cnt; i++) { + p = gc_stack[i]; + if (p) { + rb_erase(&p->rb_node, &base->rb_root); + base->total--; + call_rcu(&p->rcu, inetpeer_free_rcu); + } } - return cnt; } struct inet_peer *inet_getpeer(struct inet_peer_base *base, const struct inetpeer_addr *daddr, int create) { - struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; - struct inet_peer *p; - unsigned int sequence; - int invalidated, gccnt = 0; + struct inet_peer *p, *gc_stack[PEER_MAX_GC]; + struct rb_node **pp, *parent; + unsigned int gc_cnt, seq; + int invalidated; /* Attempt a lockless lookup first. * Because of a concurrent writer, we might not find an existing entry. */ rcu_read_lock(); - sequence = read_seqbegin(&base->lock); - p = lookup_rcu(daddr, base); - invalidated = read_seqretry(&base->lock, sequence); + seq = read_seqbegin(&base->lock); + p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); + invalidated = read_seqretry(&base->lock, seq); rcu_read_unlock(); if (p) @@ -428,36 +198,31 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, /* retry an exact lookup, taking the lock before. * At least, nodes should be hot in our cache. */ + parent = NULL; write_seqlock_bh(&base->lock); -relookup: - p = lookup(daddr, stack, base); - if (p != peer_avl_empty) { - refcount_inc(&p->refcnt); - write_sequnlock_bh(&base->lock); - return p; - } - if (!gccnt) { - gccnt = inet_peer_gc(base, stack, stackptr); - if (gccnt && create) - goto relookup; - } - p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; - if (p) { - p->daddr = *daddr; - refcount_set(&p->refcnt, 2); - atomic_set(&p->rid, 0); - p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; - p->rate_tokens = 0; - /* 60*HZ is arbitrary, but chosen enough high so that the first - * calculation of tokens is at its maximum. - */ - p->rate_last = jiffies - 60*HZ; - INIT_LIST_HEAD(&p->gc_list); - /* Link the node. */ - link_to_pool(p, base); - base->total++; + gc_cnt = 0; + p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); + if (!p && create) { + p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); + if (p) { + p->daddr = *daddr; + refcount_set(&p->refcnt, 2); + atomic_set(&p->rid, 0); + p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; + p->rate_tokens = 0; + /* 60*HZ is arbitrary, but chosen enough high so that the first + * calculation of tokens is at its maximum. + */ + p->rate_last = jiffies - 60*HZ; + + rb_link_node(&p->rb_node, parent, pp); + rb_insert_color(&p->rb_node, &base->rb_root); + base->total++; + } } + if (gc_cnt) + inet_peer_gc(base, gc_stack, gc_cnt); write_sequnlock_bh(&base->lock); return p; @@ -467,8 +232,9 @@ EXPORT_SYMBOL_GPL(inet_getpeer); void inet_putpeer(struct inet_peer *p) { p->dtime = (__u32)jiffies; - smp_mb__before_atomic(); - refcount_dec(&p->refcnt); + + if (refcount_dec_and_test(&p->refcnt)) + call_rcu(&p->rcu, inetpeer_free_rcu); } EXPORT_SYMBOL_GPL(inet_putpeer); @@ -513,30 +279,16 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) } EXPORT_SYMBOL(inet_peer_xrlim_allow); -static void inetpeer_inval_rcu(struct rcu_head *head) -{ - struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu); - - spin_lock_bh(&gc_lock); - list_add_tail(&p->gc_list, &gc_list); - spin_unlock_bh(&gc_lock); - - schedule_delayed_work(&gc_work, gc_delay); -} - void inetpeer_invalidate_tree(struct inet_peer_base *base) { - struct inet_peer *root; - - write_seqlock_bh(&base->lock); + struct inet_peer *p, *n; - root = rcu_deref_locked(base->root, base); - if (root != peer_avl_empty) { - base->root = peer_avl_empty_rcu; - base->total = 0; - call_rcu(&root->gc_rcu, inetpeer_inval_rcu); + rbtree_postorder_for_each_entry_safe(p, n, &base->rb_root, rb_node) { + inet_putpeer(p); + cond_resched(); } - write_sequnlock_bh(&base->lock); + base->rb_root = RB_ROOT; + base->total = 0; } EXPORT_SYMBOL(inetpeer_invalidate_tree); -- cgit v1.2.3-55-g7522 From 5f57e0909136f912b1a8ed677ef6eed8cbb3ec4f Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 17 Jul 2017 14:07:26 +0200 Subject: mlxsw: acl: Add ip ttl acl element Define new element for ip ttl and place it into scratch area. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 9807ef814e42..789ebb31f4e5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -57,6 +57,7 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_VID, MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, + MLXSW_AFK_ELEMENT_IP_TTL_, MLXSW_AFK_ELEMENT_MAX, }; @@ -104,6 +105,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(VID, 0x10, 8, 12), MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), + MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), -- cgit v1.2.3-55-g7522 From 046759a6cf36118f5f4468f5a3998aada040ca5d Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 17 Jul 2017 14:07:27 +0200 Subject: mlxsw: spectrum: Add ttl to the ipv4 acl block Add ttl field to the ipv4 acl block. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index 85d5001a5818..8a4767ca6909 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -70,6 +70,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 61a10f166f97..2c57be7a8399 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -984,6 +984,7 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_VID, MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, + MLXSW_AFK_ELEMENT_IP_TTL_, }; static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { -- cgit v1.2.3-55-g7522 From fcbca8217d62405e91b14953cfb005f83cfa90f1 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 17 Jul 2017 14:07:28 +0200 Subject: mlxsw: spectrum_flower: Add support for ip ttl Support offloading rules that match on ip ttl. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 37 ++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 21bb2bf62d3e..84fe33cf1e95 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -212,11 +212,39 @@ static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule_info *rulei, + struct tc_cls_flower_offload *f, + u16 n_proto) +{ + struct flow_dissector_key_ip *key, *mask; + + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) + return 0; + + if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) { + dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n"); + return -EINVAL; + } + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IP, + f->key); + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IP, + f->mask); + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, + key->ttl, mask->ttl); + return 0; +} + static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { + u16 n_proto_mask = 0; + u16 n_proto_key = 0; u16 addr_type = 0; u8 ip_proto = 0; int err; @@ -229,6 +257,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS) | BIT(FLOW_DISSECTOR_KEY_TCP) | + BIT(FLOW_DISSECTOR_KEY_IP) | BIT(FLOW_DISSECTOR_KEY_VLAN))) { dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n"); return -EOPNOTSUPP; @@ -253,8 +282,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_BASIC, f->mask); - u16 n_proto_key = ntohs(key->n_proto); - u16 n_proto_mask = ntohs(mask->n_proto); + n_proto_key = ntohs(key->n_proto); + n_proto_mask = ntohs(mask->n_proto); if (n_proto_key == ETH_P_ALL) { n_proto_key = 0; @@ -324,6 +353,10 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; + err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask); + if (err) + return err; + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); } -- cgit v1.2.3-55-g7522 From 80d0fe4710c80e2ade26d7e3030c5c985f34907e Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 17 Jul 2017 14:07:29 +0200 Subject: mlxsw: acl: Add ip tos acl element Define new element for ip tos (ecn, dscp) and place it into scratch area. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index 789ebb31f4e5..f6963b0b4a55 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -58,6 +58,8 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, MLXSW_AFK_ELEMENT_IP_TTL_, + MLXSW_AFK_ELEMENT_IP_ECN, + MLXSW_AFK_ELEMENT_IP_DSCP, MLXSW_AFK_ELEMENT_MAX, }; @@ -106,6 +108,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(PCP, 0x10, 20, 3), MLXSW_AFK_ELEMENT_INFO_U32(TCP_FLAGS, 0x10, 23, 9), MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x14, 0, 8), + MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x14, 9, 2), + MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x14, 11, 6), MLXSW_AFK_ELEMENT_INFO_U32(SRC_IP4, 0x18, 0, 32), MLXSW_AFK_ELEMENT_INFO_U32(DST_IP4, 0x1C, 0, 32), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP6_HI, 0x18, 8), -- cgit v1.2.3-55-g7522 From abac7b011d527cfc98dba6a7422bdedcdedda039 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 17 Jul 2017 14:07:30 +0200 Subject: mlxsw: spectrum: Add tos to the ipv4 acl block Add ecn and dscp fields to the ipv4 acl block. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h index 8a4767ca6909..fb8031828454 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h @@ -70,7 +70,9 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_dip[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4[] = { MLXSW_AFK_ELEMENT_INST_U32(SRC_IP4, 0x00, 0, 32), + MLXSW_AFK_ELEMENT_INST_U32(IP_ECN, 0x04, 4, 2), MLXSW_AFK_ELEMENT_INST_U32(IP_TTL_, 0x04, 24, 8), + MLXSW_AFK_ELEMENT_INST_U32(IP_DSCP, 0x08, 0, 6), MLXSW_AFK_ELEMENT_INST_U32(TCP_FLAGS, 0x08, 8, 9), /* TCP_CONTROL+TCP_ECN */ }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 2c57be7a8399..bc5173f1b5c1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -985,6 +985,8 @@ static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = { MLXSW_AFK_ELEMENT_PCP, MLXSW_AFK_ELEMENT_TCP_FLAGS, MLXSW_AFK_ELEMENT_IP_TTL_, + MLXSW_AFK_ELEMENT_IP_ECN, + MLXSW_AFK_ELEMENT_IP_DSCP, }; static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = { -- cgit v1.2.3-55-g7522 From 87996f91f739b7971097428372165a0b66683d0d Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 17 Jul 2017 14:07:31 +0200 Subject: mlxsw: spectrum_flower: Add support for ip tos Support offloading rules that match on ip tos. Signed-off-by: Or Gerlitz Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 84fe33cf1e95..400ad4081660 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -235,6 +235,13 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, f->mask); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_, key->ttl, mask->ttl); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN, + key->tos & 0x3, mask->tos & 0x3); + + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP, + key->tos >> 6, mask->tos >> 6); + return 0; } -- cgit v1.2.3-55-g7522 From 0fcc484748c9dcad5238373a4b2e1b2f309392eb Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 17 Jul 2017 14:15:29 +0200 Subject: mlxsw: spectrum: Mark packets trapped in router In commit 1c6c6d221e2b ("mlxsw: spectrum: Mirror certain packets to CPU") we marked packets that were mirrored to the CPU, so that they won't be flooded again by the bridge driver. However, certain packets are trapped in the device's router block, after passing through the bridge block where they were potentially flooded. Mark all packets coming from L3 traps, so that they won't be potentially flooded again by the bridge driver. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 60bf8f27cc00..83e77b959703 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3334,14 +3334,14 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), /* L3 traps */ - MLXSW_SP_RXL_NO_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_NO_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_NO_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), - MLXSW_SP_RXL_NO_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), - MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), - MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), - MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), + MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), + MLXSW_SP_RXL_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), -- cgit v1.2.3-55-g7522 From 7607dd35fc34893214284cca740d015154d20452 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 17 Jul 2017 14:15:30 +0200 Subject: mlxsw: spectrum: Trap IPv4 packets with Router Alert option In case local sockets have the IP_ROUTER_ALERT socket option set, then they expect to get packets with the Router Alert option. Trap such packets, so that the kernel could inspect them and potentially send them to interested sockets. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 + drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 83e77b959703..bc35b400e6f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3342,6 +3342,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), MLXSW_SP_RXL_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 12b5ed58f3eb..4946d4e2b3f9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -66,6 +66,7 @@ enum { MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, MLXSW_TRAP_ID_BGP_IPV4 = 0x88, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, + MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, MLXSW_TRAP_ID_ACL0 = 0x1C0, MLXSW_TRAP_ID_MAX = 0x1FF -- cgit v1.2.3-55-g7522 From 588823f97df3d5bf219d8f0bfea1c23ce367b84d Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 17 Jul 2017 14:15:31 +0200 Subject: mlxsw: spectrum: Add support for IPv6 MLDv1/2 traps Add support for IPv6 MLDv1/2 packet trapping. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 1 + drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 10 ++++++++++ drivers/net/ethernet/mellanox/mlxsw/trap.h | 4 ++++ 3 files changed, 15 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 1bd34d9a7b9e..0ca196899e18 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3688,6 +3688,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, }; /* reg_htgt_trap_group diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bc35b400e6f1..1aa6298ea6cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3333,6 +3333,14 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false), MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD, + false), + MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD, + false), /* L3 traps */ MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), @@ -3377,6 +3385,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) burst_size = 7; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: rate = 16 * 1024; burst_size = 10; break; @@ -3441,6 +3450,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD: priority = 3; tc = 3; break; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 4946d4e2b3f9..891b4ee6eeb2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -63,6 +63,10 @@ enum { MLXSW_TRAP_ID_LBERROR = 0x54, MLXSW_TRAP_ID_OSPF = 0x55, MLXSW_TRAP_ID_IP2ME = 0x5F, + MLXSW_TRAP_ID_IPV6_MLDV12_LISTENER_QUERY = 0x65, + MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_REPORT = 0x66, + MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_DONE = 0x67, + MLXSW_TRAP_ID_IPV6_MLDV2_LISTENER_REPORT = 0x68, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, MLXSW_TRAP_ID_BGP_IPV4 = 0x88, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, -- cgit v1.2.3-55-g7522 From 9df552ef3e214e32b7a0458d3bdc430643aa553b Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 17 Jul 2017 14:15:32 +0200 Subject: mlxsw: spectrum: Improve IPv6 unregistered multicast flooding Up until now IPv6 unregistered multicast traffic would be flooded like broadcast, even when MLD snooping was enabled on the bridge. This was intentional as MLD packet traps were missing, preventing the bridge driver from programming MDB entries to the device. Previous patch added these traps, so we can now finally flood IPv6 unregistered multicast packets to specific ports via the multicast table instead of flooding them to all ports via the broadcast table. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index 6afbe9ec64e2..bbd238e50f05 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -109,7 +109,6 @@ static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_BROADCAST] = 1, - [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP] = 1, [MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL] = 1, [MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST] = 1, @@ -117,6 +116,7 @@ static const int mlxsw_sp_sfgc_bc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { static const int mlxsw_sp_sfgc_mc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4] = 1, + [MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6] = 1, }; static const int *mlxsw_sp_packet_type_sfgc_types[] = { -- cgit v1.2.3-55-g7522 From 90382dca61137444cb8f639db0a76c4d084763d7 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:26:24 -0700 Subject: ixgbe: NULL xdp_tx rings on resource cleanup tx_rings and rx_rings are cleaned up on close paths in ixgbe driver however, xdp_rings are not. Set the xdp_rings to NULL here so that we can use the pointer to indicate if the XDP rings are initialized. Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index b45fdc98033d..f1bfae0c41d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -1018,8 +1018,12 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; struct ixgbe_ring *ring; - ixgbe_for_each_ring(ring, q_vector->tx) - adapter->tx_ring[ring->queue_index] = NULL; + ixgbe_for_each_ring(ring, q_vector->tx) { + if (ring_is_xdp(ring)) + adapter->xdp_ring[ring->queue_index] = NULL; + else + adapter->tx_ring[ring->queue_index] = NULL; + } ixgbe_for_each_ring(ring, q_vector->rx) adapter->rx_ring[ring->queue_index] = NULL; -- cgit v1.2.3-55-g7522 From d445516966dcb2924741b13b27738b54df2af01a Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:26:45 -0700 Subject: net: xdp: support xdp generic on virtual devices XDP generic allows users to test XDP programs and/or run them with degraded performance on devices that do not yet support XDP. For testing I typically test eBPF programs using a set of veth devices. This allows testing topologies that would otherwise be difficult to setup especially in the early stages of development. This patch adds a xdp generic hook to the netif_rx_internal() function which is called from dev_forward_skb(). With this addition attaching XDP programs to veth devices works as expected! Also I noticed multiple drivers using netif_rx(). These devices will also benefit and generic XDP will work for them as well. Signed-off-by: John Fastabend Tested-by: Andy Gospodarek Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- net/core/dev.c | 208 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 113 insertions(+), 95 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 02440518dd69..a1ed7b41b3e8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3865,6 +3865,107 @@ drop: return NET_RX_DROP; } +static u32 netif_receive_generic_xdp(struct sk_buff *skb, + struct bpf_prog *xdp_prog) +{ + struct xdp_buff xdp; + u32 act = XDP_DROP; + void *orig_data; + int hlen, off; + u32 mac_len; + + /* Reinjected packets coming from act_mirred or similar should + * not get XDP generic processing. + */ + if (skb_cloned(skb)) + return XDP_PASS; + + if (skb_linearize(skb)) + goto do_drop; + + /* The XDP program wants to see the packet starting at the MAC + * header. + */ + mac_len = skb->data - skb_mac_header(skb); + hlen = skb_headlen(skb) + mac_len; + xdp.data = skb->data - mac_len; + xdp.data_end = xdp.data + hlen; + xdp.data_hard_start = skb->data - skb_headroom(skb); + orig_data = xdp.data; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + off = xdp.data - orig_data; + if (off > 0) + __skb_pull(skb, off); + else if (off < 0) + __skb_push(skb, -off); + + switch (act) { + case XDP_TX: + __skb_push(skb, mac_len); + /* fall through */ + case XDP_PASS: + break; + + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(skb->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + do_drop: + kfree_skb(skb); + break; + } + + return act; +} + +/* When doing generic XDP we have to bypass the qdisc layer and the + * network taps in order to match in-driver-XDP behavior. + */ +static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) +{ + struct net_device *dev = skb->dev; + struct netdev_queue *txq; + bool free_skb = true; + int cpu, rc; + + txq = netdev_pick_tx(dev, skb, NULL); + cpu = smp_processor_id(); + HARD_TX_LOCK(dev, txq, cpu); + if (!netif_xmit_stopped(txq)) { + rc = netdev_start_xmit(skb, dev, txq, 0); + if (dev_xmit_complete(rc)) + free_skb = false; + } + HARD_TX_UNLOCK(dev, txq); + if (free_skb) { + trace_xdp_exception(dev, xdp_prog, XDP_TX); + kfree_skb(skb); + } +} + +static struct static_key generic_xdp_needed __read_mostly; + +static int do_xdp_generic(struct sk_buff *skb) +{ + struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog); + + if (xdp_prog) { + u32 act = netif_receive_generic_xdp(skb, xdp_prog); + + if (act != XDP_PASS) { + if (act == XDP_TX) + generic_xdp_tx(skb, xdp_prog); + return XDP_DROP; + } + } + return XDP_PASS; +} + static int netif_rx_internal(struct sk_buff *skb) { int ret; @@ -3872,6 +3973,14 @@ static int netif_rx_internal(struct sk_buff *skb) net_timestamp_check(netdev_tstamp_prequeue, skb); trace_netif_rx(skb); + + if (static_key_false(&generic_xdp_needed)) { + int ret = do_xdp_generic(skb); + + if (ret != XDP_PASS) + return NET_RX_DROP; + } + #ifdef CONFIG_RPS if (static_key_false(&rps_needed)) { struct rps_dev_flow voidflow, *rflow = &voidflow; @@ -4338,8 +4447,6 @@ static int __netif_receive_skb(struct sk_buff *skb) return ret; } -static struct static_key generic_xdp_needed __read_mostly; - static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp) { struct bpf_prog *old = rtnl_dereference(dev->xdp_prog); @@ -4373,89 +4480,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp) return ret; } -static u32 netif_receive_generic_xdp(struct sk_buff *skb, - struct bpf_prog *xdp_prog) -{ - struct xdp_buff xdp; - u32 act = XDP_DROP; - void *orig_data; - int hlen, off; - u32 mac_len; - - /* Reinjected packets coming from act_mirred or similar should - * not get XDP generic processing. - */ - if (skb_cloned(skb)) - return XDP_PASS; - - if (skb_linearize(skb)) - goto do_drop; - - /* The XDP program wants to see the packet starting at the MAC - * header. - */ - mac_len = skb->data - skb_mac_header(skb); - hlen = skb_headlen(skb) + mac_len; - xdp.data = skb->data - mac_len; - xdp.data_end = xdp.data + hlen; - xdp.data_hard_start = skb->data - skb_headroom(skb); - orig_data = xdp.data; - - act = bpf_prog_run_xdp(xdp_prog, &xdp); - - off = xdp.data - orig_data; - if (off > 0) - __skb_pull(skb, off); - else if (off < 0) - __skb_push(skb, -off); - - switch (act) { - case XDP_TX: - __skb_push(skb, mac_len); - /* fall through */ - case XDP_PASS: - break; - - default: - bpf_warn_invalid_xdp_action(act); - /* fall through */ - case XDP_ABORTED: - trace_xdp_exception(skb->dev, xdp_prog, act); - /* fall through */ - case XDP_DROP: - do_drop: - kfree_skb(skb); - break; - } - - return act; -} - -/* When doing generic XDP we have to bypass the qdisc layer and the - * network taps in order to match in-driver-XDP behavior. - */ -static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) -{ - struct net_device *dev = skb->dev; - struct netdev_queue *txq; - bool free_skb = true; - int cpu, rc; - - txq = netdev_pick_tx(dev, skb, NULL); - cpu = smp_processor_id(); - HARD_TX_LOCK(dev, txq, cpu); - if (!netif_xmit_stopped(txq)) { - rc = netdev_start_xmit(skb, dev, txq, 0); - if (dev_xmit_complete(rc)) - free_skb = false; - } - HARD_TX_UNLOCK(dev, txq); - if (free_skb) { - trace_xdp_exception(dev, xdp_prog, XDP_TX); - kfree_skb(skb); - } -} - static int netif_receive_skb_internal(struct sk_buff *skb) { int ret; @@ -4468,17 +4492,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb) rcu_read_lock(); if (static_key_false(&generic_xdp_needed)) { - struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog); - - if (xdp_prog) { - u32 act = netif_receive_generic_xdp(skb, xdp_prog); + int ret = do_xdp_generic(skb); - if (act != XDP_PASS) { - rcu_read_unlock(); - if (act == XDP_TX) - generic_xdp_tx(skb, xdp_prog); - return NET_RX_DROP; - } + if (ret != XDP_PASS) { + rcu_read_unlock(); + return NET_RX_DROP; } } -- cgit v1.2.3-55-g7522 From 814abfabef3ceed390c10d06a0cc69a86454b6cf Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:27:07 -0700 Subject: xdp: add bpf_redirect helper function This adds support for a bpf_redirect helper function to the XDP infrastructure. For now this only supports redirecting to the egress path of a port. In order to support drivers handling a xdp_buff natively this patches uses a new ndo operation ndo_xdp_xmit() that takes pushes a xdp_buff to the specified device. If the program specifies either (a) an unknown device or (b) a device that does not support the operation a BPF warning is thrown and the XDP_ABORTED error code is returned. Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/linux/filter.h | 4 ++++ include/linux/netdevice.h | 6 ++++++ include/uapi/linux/bpf.h | 1 + net/core/filter.c | 52 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+) diff --git a/include/linux/filter.h b/include/linux/filter.h index bfef1e5734f8..64cae7a08148 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -711,7 +711,11 @@ bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); + +int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp); + void bpf_warn_invalid_xdp_action(u32 act); +void bpf_warn_invalid_xdp_redirect(u32 ifindex); #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 779b23595596..77f5376005e6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -66,6 +66,7 @@ struct mpls_dev; /* UDP Tunnel offloads */ struct udp_tunnel_info; struct bpf_prog; +struct xdp_buff; void netdev_set_default_ethtool_ops(struct net_device *dev, const struct ethtool_ops *ops); @@ -1138,6 +1139,9 @@ struct xfrmdev_ops { * int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); * This function is used to set or query state related to XDP on the * netdevice. See definition of enum xdp_netdev_command for details. + * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp); + * This function is used to submit a XDP packet for transmit on a + * netdevice. * */ struct net_device_ops { @@ -1323,6 +1327,8 @@ struct net_device_ops { int needed_headroom); int (*ndo_xdp)(struct net_device *dev, struct netdev_xdp *xdp); + int (*ndo_xdp_xmit)(struct net_device *dev, + struct xdp_buff *xdp); }; /** diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e99e3e6f8b37..4dbb7a3f4677 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -717,6 +717,7 @@ enum xdp_action { XDP_DROP, XDP_PASS, XDP_TX, + XDP_REDIRECT, }; /* user accessible metadata for XDP packet hook diff --git a/net/core/filter.c b/net/core/filter.c index c7f737058d89..d606a66d1040 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2412,6 +2412,51 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .arg2_type = ARG_ANYTHING, }; +static int __bpf_tx_xdp(struct net_device *dev, struct xdp_buff *xdp) +{ + if (dev->netdev_ops->ndo_xdp_xmit) { + dev->netdev_ops->ndo_xdp_xmit(dev, xdp); + return 0; + } + bpf_warn_invalid_xdp_redirect(dev->ifindex); + return -EOPNOTSUPP; +} + +int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + + dev = dev_get_by_index_rcu(dev_net(dev), ri->ifindex); + ri->ifindex = 0; + if (unlikely(!dev)) { + bpf_warn_invalid_xdp_redirect(ri->ifindex); + return -EINVAL; + } + + return __bpf_tx_xdp(dev, xdp); +} +EXPORT_SYMBOL_GPL(xdp_do_redirect); + +BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + + if (unlikely(flags)) + return XDP_ABORTED; + + ri->ifindex = ifindex; + ri->flags = flags; + return XDP_REDIRECT; +} + +static const struct bpf_func_proto bpf_xdp_redirect_proto = { + .func = bpf_xdp_redirect, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_ANYTHING, + .arg2_type = ARG_ANYTHING, +}; + bool bpf_helper_changes_pkt_data(void *func) { if (func == bpf_skb_vlan_push || @@ -3011,6 +3056,8 @@ xdp_func_proto(enum bpf_func_id func_id) return &bpf_get_smp_processor_id_proto; case BPF_FUNC_xdp_adjust_head: return &bpf_xdp_adjust_head_proto; + case BPF_FUNC_redirect: + return &bpf_xdp_redirect_proto; default: return bpf_base_func_proto(func_id); } @@ -3310,6 +3357,11 @@ void bpf_warn_invalid_xdp_action(u32 act) } EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); +void bpf_warn_invalid_xdp_redirect(u32 ifindex) +{ + WARN_ONCE(1, "Illegal XDP redirect to unsupported device ifindex(%i)\n", ifindex); +} + static bool __is_valid_sock_ops_access(int off, int size) { if (off < 0 || off >= sizeof(struct bpf_sock_ops)) -- cgit v1.2.3-55-g7522 From 832622e6bd1884c95475094941914969ff82b329 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:27:28 -0700 Subject: xdp: sample program for new bpf_redirect helper This implements a sample program for testing bpf_redirect. It reports the number of packets redirected per second and as input takes the ifindex of the device to run the xdp program on and the ifindex of the interface to redirect packets to. Signed-off-by: John Fastabend Tested-by: Andy Gospodarek Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- samples/bpf/Makefile | 4 ++ samples/bpf/xdp_redirect_kern.c | 81 +++++++++++++++++++++++++++++++ samples/bpf/xdp_redirect_user.c | 102 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+) create mode 100644 samples/bpf/xdp_redirect_kern.c create mode 100644 samples/bpf/xdp_redirect_user.c diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 87246be6feb8..97734ced947e 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -37,6 +37,7 @@ hostprogs-y += xdp_tx_iptunnel hostprogs-y += test_map_in_map hostprogs-y += per_socket_stats_example hostprogs-y += load_sock_ops +hostprogs-y += xdp_redirect # Libbpf dependencies LIBBPF := ../../tools/lib/bpf/bpf.o @@ -78,6 +79,7 @@ lwt_len_hist-objs := bpf_load.o $(LIBBPF) lwt_len_hist_user.o xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o +xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -119,6 +121,7 @@ always += tcp_bufs_kern.o always += tcp_cong_kern.o always += tcp_iw_kern.o always += tcp_clamp_kern.o +always += xdp_redirect_kern.o HOSTCFLAGS += -I$(objtree)/usr/include HOSTCFLAGS += -I$(srctree)/tools/lib/ @@ -155,6 +158,7 @@ HOSTLOADLIBES_tc_l2_redirect += -l elf HOSTLOADLIBES_lwt_len_hist += -l elf HOSTLOADLIBES_xdp_tx_iptunnel += -lelf HOSTLOADLIBES_test_map_in_map += -lelf +HOSTLOADLIBES_xdp_redirect += -lelf # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang diff --git a/samples/bpf/xdp_redirect_kern.c b/samples/bpf/xdp_redirect_kern.c new file mode 100644 index 000000000000..a34ad457a684 --- /dev/null +++ b/samples/bpf/xdp_redirect_kern.c @@ -0,0 +1,81 @@ +/* Copyright (c) 2016 John Fastabend + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#define KBUILD_MODNAME "foo" +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" + +struct bpf_map_def SEC("maps") tx_port = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") rxcnt = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(long), + .max_entries = 1, +}; + + +static void swap_src_dst_mac(void *data) +{ + unsigned short *p = data; + unsigned short dst[3]; + + dst[0] = p[0]; + dst[1] = p[1]; + dst[2] = p[2]; + p[0] = p[3]; + p[1] = p[4]; + p[2] = p[5]; + p[3] = dst[0]; + p[4] = dst[1]; + p[5] = dst[2]; +} + +SEC("xdp_redirect") +int xdp_redirect_prog(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct ethhdr *eth = data; + int rc = XDP_DROP; + int *ifindex, port = 0; + long *value; + u32 key = 0; + u64 nh_off; + + nh_off = sizeof(*eth); + if (data + nh_off > data_end) + return rc; + + ifindex = bpf_map_lookup_elem(&tx_port, &port); + if (!ifindex) + return rc; + + value = bpf_map_lookup_elem(&rxcnt, &key); + if (value) + *value += 1; + + swap_src_dst_mac(data); + return bpf_redirect(*ifindex, 0); +} + +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c new file mode 100644 index 000000000000..761a91d5d7b4 --- /dev/null +++ b/samples/bpf/xdp_redirect_user.c @@ -0,0 +1,102 @@ +/* Copyright (c) 2016 John Fastabend + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_load.h" +#include "bpf_util.h" +#include "libbpf.h" + +static int ifindex_in; +static int ifindex_out; + +static void int_exit(int sig) +{ + set_link_xdp_fd(ifindex_in, -1, 0); + exit(0); +} + +/* simple per-protocol drop counter + */ +static void poll_stats(int interval, int ifindex) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + __u64 values[nr_cpus], prev[nr_cpus]; + + memset(prev, 0, sizeof(prev)); + + while (1) { + __u64 sum = 0; + __u32 key = 0; + int i; + + sleep(interval); + assert(bpf_map_lookup_elem(map_fd[1], &key, values) == 0); + for (i = 0; i < nr_cpus; i++) + sum += (values[i] - prev[i]); + if (sum) + printf("ifindex %i: %10llu pkt/s\n", + ifindex, sum / interval); + memcpy(prev, values, sizeof(values)); + } +} + +int main(int ac, char **argv) +{ + char filename[256]; + int ret, key = 0; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (ac != 3) { + printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]); + return 1; + } + + ifindex_in = strtoul(argv[1], NULL, 0); + ifindex_out = strtoul(argv[2], NULL, 0); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + if (!prog_fd[0]) { + printf("load_bpf_file: %s\n", strerror(errno)); + return 1; + } + + signal(SIGINT, int_exit); + + if (set_link_xdp_fd(ifindex_in, prog_fd[0], 0) < 0) { + printf("link set xdp fd failed\n"); + return 1; + } + + /* bpf redirect port */ + ret = bpf_map_update_elem(map_fd[0], &key, &ifindex_out, 0); + if (ret) { + perror("bpf_update_elem"); + goto out; + } + + poll_stats(2, ifindex_out); + +out: + return 0; +} -- cgit v1.2.3-55-g7522 From 6103aa96ec077c976e851e0b89cc2446cb76573d Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:27:50 -0700 Subject: net: implement XDP_REDIRECT for xdp generic Add support for redirect to xdp generic creating a fall back for devices that do not yet have support and allowing test infrastructure using veth pairs to be built. Signed-off-by: John Fastabend Tested-by: Andy Gospodarek Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/linux/filter.h | 1 + net/core/dev.c | 22 ++++++++++++++++++++-- net/core/filter.c | 26 ++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 64cae7a08148..10df7daf5ec6 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -712,6 +712,7 @@ bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); +int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb); int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp); void bpf_warn_invalid_xdp_action(u32 act); diff --git a/net/core/dev.c b/net/core/dev.c index a1ed7b41b3e8..9f3f4083ada5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3902,6 +3902,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, __skb_push(skb, -off); switch (act) { + case XDP_REDIRECT: case XDP_TX: __skb_push(skb, mac_len); /* fall through */ @@ -3956,14 +3957,27 @@ static int do_xdp_generic(struct sk_buff *skb) if (xdp_prog) { u32 act = netif_receive_generic_xdp(skb, xdp_prog); + int err; if (act != XDP_PASS) { - if (act == XDP_TX) + switch (act) { + case XDP_REDIRECT: + err = xdp_do_generic_redirect(skb->dev, skb); + if (err) + goto out_redir; + /* fallthru to submit skb */ + case XDP_TX: generic_xdp_tx(skb, xdp_prog); + break; + } return XDP_DROP; } } return XDP_PASS; +out_redir: + trace_xdp_exception(skb->dev, xdp_prog, XDP_REDIRECT); + kfree_skb(skb); + return XDP_DROP; } static int netif_rx_internal(struct sk_buff *skb) @@ -3977,8 +3991,12 @@ static int netif_rx_internal(struct sk_buff *skb) if (static_key_false(&generic_xdp_needed)) { int ret = do_xdp_generic(skb); + /* Consider XDP consuming the packet a success from + * the netdev point of view we do not want to count + * this as an error. + */ if (ret != XDP_PASS) - return NET_RX_DROP; + return NET_RX_SUCCESS; } #ifdef CONFIG_RPS diff --git a/net/core/filter.c b/net/core/filter.c index d606a66d1040..eeb713461c25 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2437,6 +2437,32 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp) } EXPORT_SYMBOL_GPL(xdp_do_redirect); +int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + unsigned int len; + + dev = dev_get_by_index_rcu(dev_net(dev), ri->ifindex); + ri->ifindex = 0; + if (unlikely(!dev)) { + bpf_warn_invalid_xdp_redirect(ri->ifindex); + goto err; + } + + if (unlikely(!(dev->flags & IFF_UP))) + goto err; + + len = dev->mtu + dev->hard_header_len + VLAN_HLEN; + if (skb->len > len) + goto err; + + skb->dev = dev; + return 0; +err: + return -EINVAL; +} +EXPORT_SYMBOL_GPL(xdp_do_generic_redirect); + BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); -- cgit v1.2.3-55-g7522 From 6453073987ba392510ab6c8b657844a9312c67f7 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:28:12 -0700 Subject: ixgbe: add initial support for xdp redirect There are optimizations we can add after the basic feature is enabled. But, for now keep the patch simple. Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 41 ++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f1dbdf26d8e1..3db04736a048 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2214,7 +2214,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, struct xdp_buff *xdp) { - int result = IXGBE_XDP_PASS; + int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; u32 act; @@ -2231,6 +2231,13 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, case XDP_TX: result = ixgbe_xmit_xdp_ring(adapter, xdp); break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp); + if (!err) + result = IXGBE_XDP_TX; + else + result = IXGBE_XDP_CONSUMED; + break; default: bpf_warn_invalid_xdp_action(act); /* fallthrough */ @@ -9823,6 +9830,37 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_xdp *xdp) } } +static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + int err; + + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return -EINVAL; + + /* During program transitions its possible adapter->xdp_prog is assigned + * but ring has not been configured yet. In this case simply abort xmit. + */ + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return -EINVAL; + + err = ixgbe_xmit_xdp_ring(adapter, xdp); + if (err != IXGBE_XDP_TX) + return -ENOMEM; + + /* Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. + */ + wmb(); + + ring = adapter->xdp_ring[smp_processor_id()]; + writel(ring->next_to_use, ring->tail); + + return 0; +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -9869,6 +9907,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, .ndo_features_check = ixgbe_features_check, .ndo_xdp = ixgbe_xdp, + .ndo_xdp_xmit = ixgbe_xdp_xmit, }; /** -- cgit v1.2.3-55-g7522 From 5acaee0a8964c9bab7775ab8bedcd1f66a2a1011 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:28:35 -0700 Subject: xdp: add trace event for xdp redirect This adds a trace event for xdp redirect which may help when debugging XDP programs that use redirect bpf commands. Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- include/linux/filter.h | 4 +++- include/trace/events/xdp.h | 31 ++++++++++++++++++++++++++- net/core/filter.c | 13 +++++++---- 4 files changed, 43 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3db04736a048..38f7ff97d636 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2232,7 +2232,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, result = ixgbe_xmit_xdp_ring(adapter, xdp); break; case XDP_REDIRECT: - err = xdp_do_redirect(adapter->netdev, xdp); + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); if (!err) result = IXGBE_XDP_TX; else diff --git a/include/linux/filter.h b/include/linux/filter.h index 10df7daf5ec6..ce8211fa91c7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -713,7 +713,9 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb); -int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp); +int xdp_do_redirect(struct net_device *dev, + struct xdp_buff *xdp, + struct bpf_prog *prog); void bpf_warn_invalid_xdp_action(u32 act); void bpf_warn_invalid_xdp_redirect(u32 ifindex); diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 1b61357d3f57..7b1eb7b4be41 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -12,7 +12,8 @@ FN(ABORTED) \ FN(DROP) \ FN(PASS) \ - FN(TX) + FN(TX) \ + FN(REDIRECT) #define __XDP_ACT_TP_FN(x) \ TRACE_DEFINE_ENUM(XDP_##x); @@ -48,6 +49,34 @@ TRACE_EVENT(xdp_exception, __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) ); +TRACE_EVENT(xdp_redirect, + + TP_PROTO(const struct net_device *from, + const struct net_device *to, + const struct bpf_prog *xdp, u32 act), + + TP_ARGS(from, to, xdp, act), + + TP_STRUCT__entry( + __string(name_from, from->name) + __string(name_to, to->name) + __array(u8, prog_tag, 8) + __field(u32, act) + ), + + TP_fast_assign( + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag)); + memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag)); + __assign_str(name_from, from->name); + __assign_str(name_to, to->name); + __entry->act = act; + ), + + TP_printk("prog=%s from=%s to=%s action=%s", + __print_hex_str(__entry->prog_tag, 8), + __get_str(name_from), __get_str(name_to), + __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) +); #endif /* _TRACE_XDP_H */ #include diff --git a/net/core/filter.c b/net/core/filter.c index eeb713461c25..e30d38b27f21 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -55,6 +55,7 @@ #include #include #include +#include /** * sk_filter_trim_cap - run a packet through a socket filter @@ -2422,18 +2423,22 @@ static int __bpf_tx_xdp(struct net_device *dev, struct xdp_buff *xdp) return -EOPNOTSUPP; } -int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp) +int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct net_device *fwd; - dev = dev_get_by_index_rcu(dev_net(dev), ri->ifindex); + fwd = dev_get_by_index_rcu(dev_net(dev), ri->ifindex); ri->ifindex = 0; - if (unlikely(!dev)) { + if (unlikely(!fwd)) { bpf_warn_invalid_xdp_redirect(ri->ifindex); return -EINVAL; } - return __bpf_tx_xdp(dev, xdp); + trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT); + + return __bpf_tx_xdp(fwd, xdp); } EXPORT_SYMBOL_GPL(xdp_do_redirect); -- cgit v1.2.3-55-g7522 From 546ac1ffb70d25b56c1126940e5ec639c4dd7413 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:28:56 -0700 Subject: bpf: add devmap, a map for storing net device references Device map (devmap) is a BPF map, primarily useful for networking applications, that uses a key to lookup a reference to a netdevice. The map provides a clean way for BPF programs to build virtual port to physical port maps. Additionally, it provides a scoping function for the redirect action itself allowing multiple optimizations. Future patches will leverage the map to provide batching at the XDP layer. Another optimization/feature, that is not yet implemented, would be to support multiple netdevices per key to support efficient multicast and broadcast support. Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/linux/bpf_types.h | 3 + include/uapi/linux/bpf.h | 1 + kernel/bpf/Makefile | 3 + kernel/bpf/devmap.c | 264 ++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 8 + tools/testing/selftests/bpf/test_maps.c | 15 ++ 6 files changed, 294 insertions(+) create mode 100644 kernel/bpf/devmap.c diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 3d137c33d664..b1e1035ca24b 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -35,3 +35,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops) #endif BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) +#ifdef CONFIG_NET +BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +#endif diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4dbb7a3f4677..ecbb0e7e15bc 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -104,6 +104,7 @@ enum bpf_map_type { BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, + BPF_MAP_TYPE_DEVMAP, }; enum bpf_prog_type { diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index e1e5e658f2db..48e92705be59 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -2,6 +2,9 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o +ifeq ($(CONFIG_NET),y) +obj-$(CONFIG_BPF_SYSCALL) += devmap.o +endif ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o endif diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c new file mode 100644 index 000000000000..1a878356bd37 --- /dev/null +++ b/kernel/bpf/devmap.c @@ -0,0 +1,264 @@ +/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/* Devmaps primary use is as a backend map for XDP BPF helper call + * bpf_redirect_map(). Because XDP is mostly concerned with performance we + * spent some effort to ensure the datapath with redirect maps does not use + * any locking. This is a quick note on the details. + * + * We have three possible paths to get into the devmap control plane bpf + * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall + * will invoke an update, delete, or lookup operation. To ensure updates and + * deletes appear atomic from the datapath side xchg() is used to modify the + * netdev_map array. Then because the datapath does a lookup into the netdev_map + * array (read-only) from an RCU critical section we use call_rcu() to wait for + * an rcu grace period before free'ing the old data structures. This ensures the + * datapath always has a valid copy. However, the datapath does a "flush" + * operation that pushes any pending packets in the driver outside the RCU + * critical section. Each bpf_dtab_netdev tracks these pending operations using + * an atomic per-cpu bitmap. The bpf_dtab_netdev object will not be destroyed + * until all bits are cleared indicating outstanding flush operations have + * completed. + * + * BPF syscalls may race with BPF program calls on any of the update, delete + * or lookup operations. As noted above the xchg() operation also keep the + * netdev_map consistent in this case. From the devmap side BPF programs + * calling into these operations are the same as multiple user space threads + * making system calls. + */ +#include +#include +#include +#include +#include "percpu_freelist.h" +#include "bpf_lru_list.h" +#include "map_in_map.h" + +struct bpf_dtab_netdev { + struct net_device *dev; + int key; + struct rcu_head rcu; + struct bpf_dtab *dtab; +}; + +struct bpf_dtab { + struct bpf_map map; + struct bpf_dtab_netdev **netdev_map; +}; + +static struct bpf_map *dev_map_alloc(union bpf_attr *attr) +{ + struct bpf_dtab *dtab; + u64 cost; + int err; + + /* check sanity of attributes */ + if (attr->max_entries == 0 || attr->key_size != 4 || + attr->value_size != 4 || attr->map_flags) + return ERR_PTR(-EINVAL); + + /* if value_size is bigger, the user space won't be able to + * access the elements. + */ + if (attr->value_size > KMALLOC_MAX_SIZE) + return ERR_PTR(-E2BIG); + + dtab = kzalloc(sizeof(*dtab), GFP_USER); + if (!dtab) + return ERR_PTR(-ENOMEM); + + /* mandatory map attributes */ + dtab->map.map_type = attr->map_type; + dtab->map.key_size = attr->key_size; + dtab->map.value_size = attr->value_size; + dtab->map.max_entries = attr->max_entries; + dtab->map.map_flags = attr->map_flags; + + err = -ENOMEM; + + /* make sure page count doesn't overflow */ + cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); + if (cost >= U32_MAX - PAGE_SIZE) + goto free_dtab; + + dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + /* if map size is larger than memlock limit, reject it early */ + err = bpf_map_precharge_memlock(dtab->map.pages); + if (err) + goto free_dtab; + + dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * + sizeof(struct bpf_dtab_netdev *)); + if (!dtab->netdev_map) + goto free_dtab; + + return &dtab->map; + +free_dtab: + kfree(dtab); + return ERR_PTR(err); +} + +static void dev_map_free(struct bpf_map *map) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + int i; + + /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, + * so the programs (can be more than one that used this map) were + * disconnected from events. Wait for outstanding critical sections in + * these programs to complete. The rcu critical section only guarantees + * no further reads against netdev_map. It does __not__ ensure pending + * flush operations (if any) are complete. + */ + synchronize_rcu(); + + for (i = 0; i < dtab->map.max_entries; i++) { + struct bpf_dtab_netdev *dev; + + dev = dtab->netdev_map[i]; + if (!dev) + continue; + + dev_put(dev->dev); + kfree(dev); + } + + /* At this point bpf program is detached and all pending operations + * _must_ be complete + */ + bpf_map_area_free(dtab->netdev_map); + kfree(dtab); +} + +static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + u32 index = key ? *(u32 *)key : U32_MAX; + u32 *next = (u32 *)next_key; + + if (index >= dtab->map.max_entries) { + *next = 0; + return 0; + } + + if (index == dtab->map.max_entries - 1) + return -ENOENT; + + *next = index + 1; + return 0; +} + +/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or + * update happens in parallel here a dev_put wont happen until after reading the + * ifindex. + */ +static void *dev_map_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + struct bpf_dtab_netdev *dev; + u32 i = *(u32 *)key; + + if (i >= map->max_entries) + return NULL; + + dev = READ_ONCE(dtab->netdev_map[i]); + return dev ? &dev->dev->ifindex : NULL; +} + +static void __dev_map_entry_free(struct rcu_head *rcu) +{ + struct bpf_dtab_netdev *old_dev; + + old_dev = container_of(rcu, struct bpf_dtab_netdev, rcu); + dev_put(old_dev->dev); + kfree(old_dev); +} + +static int dev_map_delete_elem(struct bpf_map *map, void *key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + struct bpf_dtab_netdev *old_dev; + int k = *(u32 *)key; + + if (k >= map->max_entries) + return -EINVAL; + + /* Use synchronize_rcu() here to ensure any rcu critical sections + * have completed, but this does not guarantee a flush has happened + * yet. Because driver side rcu_read_lock/unlock only protects the + * running XDP program. However, for pending flush operations the + * dev and ctx are stored in another per cpu map. And additionally, + * the driver tear down ensures all soft irqs are complete before + * removing the net device in the case of dev_put equals zero. + */ + old_dev = xchg(&dtab->netdev_map[k], NULL); + if (old_dev) + call_rcu(&old_dev->rcu, __dev_map_entry_free); + return 0; +} + +static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, + u64 map_flags) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + struct net *net = current->nsproxy->net_ns; + struct bpf_dtab_netdev *dev, *old_dev; + u32 i = *(u32 *)key; + u32 ifindex = *(u32 *)value; + + if (unlikely(map_flags > BPF_EXIST)) + return -EINVAL; + + if (unlikely(i >= dtab->map.max_entries)) + return -E2BIG; + + if (unlikely(map_flags == BPF_NOEXIST)) + return -EEXIST; + + if (!ifindex) { + dev = NULL; + } else { + dev = kmalloc(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN); + if (!dev) + return -ENOMEM; + + dev->dev = dev_get_by_index(net, ifindex); + if (!dev->dev) { + kfree(dev); + return -EINVAL; + } + + dev->key = i; + dev->dtab = dtab; + } + + /* Use call_rcu() here to ensure rcu critical sections have completed + * Remembering the driver side flush operation will happen before the + * net device is removed. + */ + old_dev = xchg(&dtab->netdev_map[i], dev); + if (old_dev) + call_rcu(&old_dev->rcu, __dev_map_entry_free); + + return 0; +} + +const struct bpf_map_ops dev_map_ops = { + .map_alloc = dev_map_alloc, + .map_free = dev_map_free, + .map_get_next_key = dev_map_get_next_key, + .map_lookup_elem = dev_map_lookup_elem, + .map_update_elem = dev_map_update_elem, + .map_delete_elem = dev_map_delete_elem, +}; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6a86723c5b64..4016774d5cca 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1276,6 +1276,14 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; + /* devmap returns a pointer to a live net_device ifindex that we cannot + * allow to be modified from bpf side. So do not allow lookup elements + * for now. + */ + case BPF_MAP_TYPE_DEVMAP: + if (func_id == BPF_FUNC_map_lookup_elem) + goto error; + break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 79601c81e169..36d6ac3f0c1c 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -438,6 +438,21 @@ static void test_arraymap_percpu_many_keys(void) close(fd); } +static void test_devmap(int task, void *data) +{ + int next_key, fd; + __u32 key, value; + + fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP, sizeof(key), sizeof(value), + 2, 0); + if (fd < 0) { + printf("Failed to create arraymap '%s'!\n", strerror(errno)); + exit(1); + } + + close(fd); +} + #define MAP_SIZE (32 * 1024) static void test_map_large(void) -- cgit v1.2.3-55-g7522 From 97f91a7cf04ff605845c20948b8a80e54cbd3376 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:29:18 -0700 Subject: bpf: add bpf_redirect_map helper routine BPF programs can use the devmap with a bpf_redirect_map() helper routine to forward packets to netdevice in map. Signed-off-by: John Fastabend Signed-off-by: Jesper Dangaard Brouer Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 3 +++ include/uapi/linux/bpf.h | 8 +++++++- kernel/bpf/devmap.c | 12 +++++++++++ kernel/bpf/verifier.c | 4 ++++ net/core/filter.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 78 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b69e7a5869ff..d0d3281ac678 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -379,4 +379,7 @@ extern const struct bpf_func_proto bpf_get_stackid_proto; void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +/* Map specifics */ +struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); + #endif /* _LINUX_BPF_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ecbb0e7e15bc..1106a8c4cd36 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -348,6 +348,11 @@ union bpf_attr { * @flags: bit 0 - if set, redirect to ingress instead of egress * other bits - reserved * Return: TC_ACT_REDIRECT + * int bpf_redirect_map(key, map, flags) + * redirect to endpoint in map + * @key: index in map to lookup + * @map: fd of map to do lookup in + * @flags: -- * * u32 bpf_get_route_realm(skb) * retrieve a dst's tclassid @@ -592,7 +597,8 @@ union bpf_attr { FN(get_socket_uid), \ FN(set_hash), \ FN(setsockopt), \ - FN(skb_adjust_room), + FN(skb_adjust_room), \ + FN(redirect_map), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 1a878356bd37..36dc13deb2e1 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -159,6 +159,18 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } +struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + struct bpf_dtab_netdev *dev; + + if (key >= map->max_entries) + return NULL; + + dev = READ_ONCE(dtab->netdev_map[key]); + return dev ? dev->dev : NULL; +} + /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or * update happens in parallel here a dev_put wont happen until after reading the * ifindex. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4016774d5cca..df05d65f0c87 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1312,6 +1312,10 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; + case BPF_FUNC_redirect_map: + if (map->map_type != BPF_MAP_TYPE_DEVMAP) + goto error; + break; default: break; } diff --git a/net/core/filter.c b/net/core/filter.c index e30d38b27f21..e93a558324b5 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1779,6 +1779,7 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = { struct redirect_info { u32 ifindex; u32 flags; + struct bpf_map *map; }; static DEFINE_PER_CPU(struct redirect_info, redirect_info); @@ -1792,6 +1793,7 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) ri->ifindex = ifindex; ri->flags = flags; + ri->map = NULL; return TC_ACT_REDIRECT; } @@ -1819,6 +1821,29 @@ static const struct bpf_func_proto bpf_redirect_proto = { .arg2_type = ARG_ANYTHING, }; +BPF_CALL_3(bpf_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + + if (unlikely(flags)) + return XDP_ABORTED; + + ri->ifindex = ifindex; + ri->flags = flags; + ri->map = map; + + return XDP_REDIRECT; +} + +static const struct bpf_func_proto bpf_redirect_map_proto = { + .func = bpf_redirect_map, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) { return task_get_classid(skb); @@ -2423,14 +2448,39 @@ static int __bpf_tx_xdp(struct net_device *dev, struct xdp_buff *xdp) return -EOPNOTSUPP; } +int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_map *map = ri->map; + struct net_device *fwd; + int err = -EINVAL; + + ri->ifindex = 0; + ri->map = NULL; + + fwd = __dev_map_lookup_elem(map, ri->ifindex); + if (!fwd) + goto out; + + trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT); + err = __bpf_tx_xdp(fwd, xdp); +out: + return err; +} + int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct net_device *fwd; + if (ri->map) + return xdp_do_redirect_map(dev, xdp, xdp_prog); + fwd = dev_get_by_index_rcu(dev_net(dev), ri->ifindex); ri->ifindex = 0; + ri->map = NULL; if (unlikely(!fwd)) { bpf_warn_invalid_xdp_redirect(ri->ifindex); return -EINVAL; @@ -3089,6 +3139,8 @@ xdp_func_proto(enum bpf_func_id func_id) return &bpf_xdp_adjust_head_proto; case BPF_FUNC_redirect: return &bpf_xdp_redirect_proto; + case BPF_FUNC_redirect_map: + return &bpf_redirect_map_proto; default: return bpf_base_func_proto(func_id); } -- cgit v1.2.3-55-g7522 From 11393cc9b9be2a1f61559e6fb9c27bc8fa20b1ff Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:29:40 -0700 Subject: xdp: Add batching support to redirect map For performance reasons we want to avoid updating the tail pointer in the driver tx ring as much as possible. To accomplish this we add batching support to the redirect path in XDP. This adds another ndo op "xdp_flush" that is used to inform the driver that it should bump the tail pointer on the TX ring. Signed-off-by: John Fastabend Signed-off-by: Jesper Dangaard Brouer Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 28 ++++++++- include/linux/bpf.h | 2 + include/linux/filter.h | 7 +++ include/linux/netdevice.h | 5 +- kernel/bpf/devmap.c | 84 ++++++++++++++++++++++++++- net/core/filter.c | 55 ++++++++++++++---- 6 files changed, 166 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 38f7ff97d636..0f867dcda65f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2415,6 +2415,8 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, */ wmb(); writel(ring->next_to_use, ring->tail); + + xdp_do_flush_map(); } u64_stats_update_begin(&rx_ring->syncp); @@ -5817,6 +5819,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) usleep_range(10000, 20000); + /* synchronize_sched() needed for pending XDP buffers to drain */ + if (adapter->xdp_ring[0]) + synchronize_sched(); netif_tx_stop_all_queues(netdev); /* call carrier off first to avoid false dev_watchdog timeouts */ @@ -9850,15 +9855,31 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) if (err != IXGBE_XDP_TX) return -ENOMEM; + return 0; +} + +static void ixgbe_xdp_flush(struct net_device *dev) +{ + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring *ring; + + /* Its possible the device went down between xdp xmit and flush so + * we need to ensure device is still up. + */ + if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state))) + return; + + ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL; + if (unlikely(!ring)) + return; + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. */ wmb(); - - ring = adapter->xdp_ring[smp_processor_id()]; writel(ring->next_to_use, ring->tail); - return 0; + return; } static const struct net_device_ops ixgbe_netdev_ops = { @@ -9908,6 +9929,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_features_check = ixgbe_features_check, .ndo_xdp = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, + .ndo_xdp_flush = ixgbe_xdp_flush, }; /** diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d0d3281ac678..6850a760dc94 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -381,5 +381,7 @@ u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); /* Map specifics */ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); +void __dev_map_insert_ctx(struct bpf_map *map, u32 index); +void __dev_map_flush(struct bpf_map *map); #endif /* _LINUX_BPF_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index ce8211fa91c7..3323ee91c172 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -712,10 +712,17 @@ bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); +/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the + * same cpu context. Further for best results no more than a single map + * for the do_redirect/do_flush pair should be used. This limitation is + * because we only track one map and force a flush when the map changes. + * This does not appear to be a real limiation for existing software. + */ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb); int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *prog); +void xdp_do_flush_map(void); void bpf_warn_invalid_xdp_action(u32 act); void bpf_warn_invalid_xdp_redirect(u32 ifindex); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 77f5376005e6..03b104908235 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1142,7 +1142,9 @@ struct xfrmdev_ops { * int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp); * This function is used to submit a XDP packet for transmit on a * netdevice. - * + * void (*ndo_xdp_flush)(struct net_device *dev); + * This function is used to inform the driver to flush a paticular + * xpd tx queue. Must be called on same CPU as xdp_xmit. */ struct net_device_ops { int (*ndo_init)(struct net_device *dev); @@ -1329,6 +1331,7 @@ struct net_device_ops { struct netdev_xdp *xdp); int (*ndo_xdp_xmit)(struct net_device *dev, struct xdp_buff *xdp); + void (*ndo_xdp_flush)(struct net_device *dev); }; /** diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 36dc13deb2e1..b2ef04a1c86a 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -53,6 +53,7 @@ struct bpf_dtab_netdev { struct bpf_dtab { struct bpf_map map; struct bpf_dtab_netdev **netdev_map; + unsigned long int __percpu *flush_needed; }; static struct bpf_map *dev_map_alloc(union bpf_attr *attr) @@ -87,6 +88,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) /* make sure page count doesn't overflow */ cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); + cost += BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); if (cost >= U32_MAX - PAGE_SIZE) goto free_dtab; @@ -97,6 +99,14 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) if (err) goto free_dtab; + /* A per cpu bitfield with a bit per possible net device */ + dtab->flush_needed = __alloc_percpu( + BITS_TO_LONGS(attr->max_entries) * + sizeof(unsigned long), + __alignof__(unsigned long)); + if (!dtab->flush_needed) + goto free_dtab; + dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *)); if (!dtab->netdev_map) @@ -105,6 +115,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) return &dtab->map; free_dtab: + free_percpu(dtab->flush_needed); kfree(dtab); return ERR_PTR(err); } @@ -112,7 +123,7 @@ free_dtab: static void dev_map_free(struct bpf_map *map) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); - int i; + int i, cpu; /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, * so the programs (can be more than one that used this map) were @@ -123,6 +134,18 @@ static void dev_map_free(struct bpf_map *map) */ synchronize_rcu(); + /* To ensure all pending flush operations have completed wait for flush + * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. + * Because the above synchronize_rcu() ensures the map is disconnected + * from the program we can assume no new bits will be set. + */ + for_each_online_cpu(cpu) { + unsigned long *bitmap = per_cpu_ptr(dtab->flush_needed, cpu); + + while (!bitmap_empty(bitmap, dtab->map.max_entries)) + cpu_relax(); + } + for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; @@ -137,6 +160,7 @@ static void dev_map_free(struct bpf_map *map) /* At this point bpf program is detached and all pending operations * _must_ be complete */ + free_percpu(dtab->flush_needed); bpf_map_area_free(dtab->netdev_map); kfree(dtab); } @@ -159,6 +183,14 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } +void __dev_map_insert_ctx(struct bpf_map *map, u32 key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); + + __set_bit(key, bitmap); +} + struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); @@ -171,6 +203,39 @@ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) return dev ? dev->dev : NULL; } +/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled + * from the driver before returning from its napi->poll() routine. The poll() + * routine is called either from busy_poll context or net_rx_action signaled + * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the + * net device can be torn down. On devmap tear down we ensure the ctx bitmap + * is zeroed before completing to ensure all flush operations have completed. + */ +void __dev_map_flush(struct bpf_map *map) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); + u32 bit; + + for_each_set_bit(bit, bitmap, map->max_entries) { + struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]); + struct net_device *netdev; + + /* This is possible if the dev entry is removed by user space + * between xdp redirect and flush op. + */ + if (unlikely(!dev)) + continue; + + netdev = dev->dev; + + __clear_bit(bit, bitmap); + if (unlikely(!netdev || !netdev->netdev_ops->ndo_xdp_flush)) + continue; + + netdev->netdev_ops->ndo_xdp_flush(netdev); + } +} + /* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or * update happens in parallel here a dev_put wont happen until after reading the * ifindex. @@ -188,11 +253,28 @@ static void *dev_map_lookup_elem(struct bpf_map *map, void *key) return dev ? &dev->dev->ifindex : NULL; } +static void dev_map_flush_old(struct bpf_dtab_netdev *old_dev) +{ + if (old_dev->dev->netdev_ops->ndo_xdp_flush) { + struct net_device *fl = old_dev->dev; + unsigned long *bitmap; + int cpu; + + for_each_online_cpu(cpu) { + bitmap = per_cpu_ptr(old_dev->dtab->flush_needed, cpu); + __clear_bit(old_dev->key, bitmap); + + fl->netdev_ops->ndo_xdp_flush(old_dev->dev); + } + } +} + static void __dev_map_entry_free(struct rcu_head *rcu) { struct bpf_dtab_netdev *old_dev; old_dev = container_of(rcu, struct bpf_dtab_netdev, rcu); + dev_map_flush_old(old_dev); dev_put(old_dev->dev); kfree(old_dev); } diff --git a/net/core/filter.c b/net/core/filter.c index e93a558324b5..e23aa6fa1119 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1780,6 +1780,7 @@ struct redirect_info { u32 ifindex; u32 flags; struct bpf_map *map; + struct bpf_map *map_to_flush; }; static DEFINE_PER_CPU(struct redirect_info, redirect_info); @@ -2438,34 +2439,68 @@ static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { .arg2_type = ARG_ANYTHING, }; -static int __bpf_tx_xdp(struct net_device *dev, struct xdp_buff *xdp) +static int __bpf_tx_xdp(struct net_device *dev, + struct bpf_map *map, + struct xdp_buff *xdp, + u32 index) { - if (dev->netdev_ops->ndo_xdp_xmit) { - dev->netdev_ops->ndo_xdp_xmit(dev, xdp); - return 0; + int err; + + if (!dev->netdev_ops->ndo_xdp_xmit) { + bpf_warn_invalid_xdp_redirect(dev->ifindex); + return -EOPNOTSUPP; } - bpf_warn_invalid_xdp_redirect(dev->ifindex); - return -EOPNOTSUPP; + + err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp); + if (err) + return err; + + if (map) + __dev_map_insert_ctx(map, index); + else + dev->netdev_ops->ndo_xdp_flush(dev); + + return err; } +void xdp_do_flush_map(void) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct bpf_map *map = ri->map_to_flush; + + ri->map = NULL; + ri->map_to_flush = NULL; + + if (map) + __dev_map_flush(map); +} +EXPORT_SYMBOL_GPL(xdp_do_flush_map); + int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct bpf_map *map = ri->map; + u32 index = ri->ifindex; struct net_device *fwd; int err = -EINVAL; ri->ifindex = 0; ri->map = NULL; - fwd = __dev_map_lookup_elem(map, ri->ifindex); + fwd = __dev_map_lookup_elem(map, index); if (!fwd) goto out; - trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT); - err = __bpf_tx_xdp(fwd, xdp); + if (ri->map_to_flush && (ri->map_to_flush != map)) + xdp_do_flush_map(); + + err = __bpf_tx_xdp(fwd, map, xdp, index); + if (likely(!err)) + ri->map_to_flush = map; + out: + trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT); return err; } @@ -2488,7 +2523,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT); - return __bpf_tx_xdp(fwd, xdp); + return __bpf_tx_xdp(fwd, NULL, xdp, 0); } EXPORT_SYMBOL_GPL(xdp_do_redirect); -- cgit v1.2.3-55-g7522 From 2ddf71e23cc246e95af72a6deed67b4a50a7b81c Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:30:02 -0700 Subject: net: add notifier hooks for devmap bpf map The BPF map devmap holds a refcnt on the net_device structure when it is in the map. We need to do this to ensure on driver unload we don't lose a dev reference. However, its not very convenient to have to manually unload the map when destroying a net device so add notifier handlers to do the cleanup automatically. But this creates a race between update/destroy BPF syscall and programs and the unregister netdev hook. Unfortunately, the best I could come up with is either to live with requiring manual removal of net devices from the map before removing the net device OR to add a mutex in devmap to ensure the map is not modified while we are removing a device. The fallout also requires that BPF programs no longer update/delete the map from the BPF program side because the mutex may sleep and this can not be done from inside an rcu critical section. This is not a real problem though because I have not come up with any use cases where this is actually useful in practice. If/when we come up with a compelling user for this we may need to revisit this. Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/linux/filter.h | 2 +- kernel/bpf/devmap.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 2 +- 3 files changed, 75 insertions(+), 2 deletions(-) diff --git a/include/linux/filter.h b/include/linux/filter.h index 3323ee91c172..d19ed3c15e1e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -716,7 +716,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, * same cpu context. Further for best results no more than a single map * for the do_redirect/do_flush pair should be used. This limitation is * because we only track one map and force a flush when the map changes. - * This does not appear to be a real limiation for existing software. + * This does not appear to be a real limitation for existing software. */ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb); int xdp_do_redirect(struct net_device *dev, diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index b2ef04a1c86a..899364d097f5 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -34,6 +34,17 @@ * netdev_map consistent in this case. From the devmap side BPF programs * calling into these operations are the same as multiple user space threads * making system calls. + * + * Finally, any of the above may race with a netdev_unregister notifier. The + * unregister notifier must search for net devices in the map structure that + * contain a reference to the net device and remove them. This is a two step + * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) + * check to see if the ifindex is the same as the net_device being removed. + * Unfortunately, the xchg() operations do not protect against this. To avoid + * potentially removing incorrect objects the dev_map_list_mutex protects + * conflicting netdev unregister and BPF syscall operations. Updates and + * deletes from a BPF program (done in rcu critical section) are blocked + * because of this mutex. */ #include #include @@ -54,8 +65,12 @@ struct bpf_dtab { struct bpf_map map; struct bpf_dtab_netdev **netdev_map; unsigned long int __percpu *flush_needed; + struct list_head list; }; +static DEFINE_MUTEX(dev_map_list_mutex); +static LIST_HEAD(dev_map_list); + static struct bpf_map *dev_map_alloc(union bpf_attr *attr) { struct bpf_dtab *dtab; @@ -112,6 +127,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) if (!dtab->netdev_map) goto free_dtab; + mutex_lock(&dev_map_list_mutex); + list_add_tail(&dtab->list, &dev_map_list); + mutex_unlock(&dev_map_list_mutex); return &dtab->map; free_dtab: @@ -146,6 +164,11 @@ static void dev_map_free(struct bpf_map *map) cpu_relax(); } + /* Although we should no longer have datapath or bpf syscall operations + * at this point we we can still race with netdev notifier, hence the + * lock. + */ + mutex_lock(&dev_map_list_mutex); for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; @@ -160,6 +183,8 @@ static void dev_map_free(struct bpf_map *map) /* At this point bpf program is detached and all pending operations * _must_ be complete */ + list_del(&dtab->list); + mutex_unlock(&dev_map_list_mutex); free_percpu(dtab->flush_needed); bpf_map_area_free(dtab->netdev_map); kfree(dtab); @@ -296,9 +321,11 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) * the driver tear down ensures all soft irqs are complete before * removing the net device in the case of dev_put equals zero. */ + mutex_lock(&dev_map_list_mutex); old_dev = xchg(&dtab->netdev_map[k], NULL); if (old_dev) call_rcu(&old_dev->rcu, __dev_map_entry_free); + mutex_unlock(&dev_map_list_mutex); return 0; } @@ -341,9 +368,11 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, * Remembering the driver side flush operation will happen before the * net device is removed. */ + mutex_lock(&dev_map_list_mutex); old_dev = xchg(&dtab->netdev_map[i], dev); if (old_dev) call_rcu(&old_dev->rcu, __dev_map_entry_free); + mutex_unlock(&dev_map_list_mutex); return 0; } @@ -356,3 +385,47 @@ const struct bpf_map_ops dev_map_ops = { .map_update_elem = dev_map_update_elem, .map_delete_elem = dev_map_delete_elem, }; + +static int dev_map_notification(struct notifier_block *notifier, + ulong event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct bpf_dtab *dtab; + int i; + + switch (event) { + case NETDEV_UNREGISTER: + mutex_lock(&dev_map_list_mutex); + list_for_each_entry(dtab, &dev_map_list, list) { + for (i = 0; i < dtab->map.max_entries; i++) { + struct bpf_dtab_netdev *dev; + + dev = dtab->netdev_map[i]; + if (!dev || + dev->dev->ifindex != netdev->ifindex) + continue; + dev = xchg(&dtab->netdev_map[i], NULL); + if (dev) + call_rcu(&dev->rcu, + __dev_map_entry_free); + } + } + mutex_unlock(&dev_map_list_mutex); + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block dev_map_notifier = { + .notifier_call = dev_map_notification, +}; + +static int __init dev_map_init(void) +{ + register_netdevice_notifier(&dev_map_notifier); + return 0; +} + +subsys_initcall(dev_map_init); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index df05d65f0c87..ebe9b38ff522 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1281,7 +1281,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) * for now. */ case BPF_MAP_TYPE_DEVMAP: - if (func_id == BPF_FUNC_map_lookup_elem) + if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: -- cgit v1.2.3-55-g7522 From 9d6e005287ee23c7e25b04f4ad007bdbaf4fc438 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 09:30:25 -0700 Subject: xdp: bpf redirect with map sample program Signed-off-by: John Fastabend Tested-by: Andy Gospodarek Acked-by: Daniel Borkmann Acked-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- samples/bpf/Makefile | 4 ++ samples/bpf/xdp_redirect_map_kern.c | 83 +++++++++++++++++++++++ samples/bpf/xdp_redirect_map_user.c | 105 ++++++++++++++++++++++++++++++ tools/testing/selftests/bpf/bpf_helpers.h | 2 + 4 files changed, 194 insertions(+) create mode 100644 samples/bpf/xdp_redirect_map_kern.c create mode 100644 samples/bpf/xdp_redirect_map_user.c diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 97734ced947e..770d46cdf9f4 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -38,6 +38,7 @@ hostprogs-y += test_map_in_map hostprogs-y += per_socket_stats_example hostprogs-y += load_sock_ops hostprogs-y += xdp_redirect +hostprogs-y += xdp_redirect_map # Libbpf dependencies LIBBPF := ../../tools/lib/bpf/bpf.o @@ -80,6 +81,7 @@ xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o +xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -122,6 +124,7 @@ always += tcp_cong_kern.o always += tcp_iw_kern.o always += tcp_clamp_kern.o always += xdp_redirect_kern.o +always += xdp_redirect_map_kern.o HOSTCFLAGS += -I$(objtree)/usr/include HOSTCFLAGS += -I$(srctree)/tools/lib/ @@ -159,6 +162,7 @@ HOSTLOADLIBES_lwt_len_hist += -l elf HOSTLOADLIBES_xdp_tx_iptunnel += -lelf HOSTLOADLIBES_test_map_in_map += -lelf HOSTLOADLIBES_xdp_redirect += -lelf +HOSTLOADLIBES_xdp_redirect_map += -lelf # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang diff --git a/samples/bpf/xdp_redirect_map_kern.c b/samples/bpf/xdp_redirect_map_kern.c new file mode 100644 index 000000000000..2faf196e17ea --- /dev/null +++ b/samples/bpf/xdp_redirect_map_kern.c @@ -0,0 +1,83 @@ +/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#define KBUILD_MODNAME "foo" +#include +#include +#include +#include +#include +#include +#include +#include "bpf_helpers.h" + +struct bpf_map_def SEC("maps") tx_port = { + .type = BPF_MAP_TYPE_DEVMAP, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 100, +}; + +struct bpf_map_def SEC("maps") rxcnt = { + .type = BPF_MAP_TYPE_PERCPU_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(long), + .max_entries = 1, +}; + + +static void swap_src_dst_mac(void *data) +{ + unsigned short *p = data; + unsigned short dst[3]; + + dst[0] = p[0]; + dst[1] = p[1]; + dst[2] = p[2]; + p[0] = p[3]; + p[1] = p[4]; + p[2] = p[5]; + p[3] = dst[0]; + p[4] = dst[1]; + p[5] = dst[2]; +} + +SEC("xdp_redirect_map") +int xdp_redirect_map_prog(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + void *data = (void *)(long)ctx->data; + struct ethhdr *eth = data; + int rc = XDP_DROP; + int vport, port = 0, m = 0; + long *value; + u32 key = 0; + u64 nh_off; + + nh_off = sizeof(*eth); + if (data + nh_off > data_end) + return rc; + + /* constant virtual port */ + vport = 0; + + /* count packet in global counter */ + value = bpf_map_lookup_elem(&rxcnt, &key); + if (value) + *value += 1; + + swap_src_dst_mac(data); + + /* send packet out physical port */ + return bpf_redirect_map(&tx_port, vport, 0); +} + +char _license[] SEC("license") = "GPL"; diff --git a/samples/bpf/xdp_redirect_map_user.c b/samples/bpf/xdp_redirect_map_user.c new file mode 100644 index 000000000000..0b8009a85415 --- /dev/null +++ b/samples/bpf/xdp_redirect_map_user.c @@ -0,0 +1,105 @@ +/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "bpf_load.h" +#include "bpf_util.h" +#include "libbpf.h" + +static int ifindex_in; +static int ifindex_out; + +static void int_exit(int sig) +{ + set_link_xdp_fd(ifindex_in, -1, 0); + exit(0); +} + +/* simple per-protocol drop counter + */ +static void poll_stats(int interval, int ifindex) +{ + unsigned int nr_cpus = bpf_num_possible_cpus(); + __u64 values[nr_cpus], prev[nr_cpus]; + + memset(prev, 0, sizeof(prev)); + + while (1) { + __u64 sum = 0; + __u32 key = 0; + int i; + + sleep(interval); + assert(bpf_map_lookup_elem(map_fd[1], &key, values) == 0); + for (i = 0; i < nr_cpus; i++) + sum += (values[i] - prev[i]); + if (sum) + printf("ifindex %i: %10llu pkt/s\n", + ifindex, sum / interval); + memcpy(prev, values, sizeof(values)); + } +} + +int main(int ac, char **argv) +{ + char filename[256]; + int ret, key = 0; + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (ac != 3) { + printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]); + return 1; + } + + ifindex_in = strtoul(argv[1], NULL, 0); + ifindex_out = strtoul(argv[2], NULL, 0); + + if (load_bpf_file(filename)) { + printf("%s", bpf_log_buf); + return 1; + } + + if (!prog_fd[0]) { + printf("load_bpf_file: %s\n", strerror(errno)); + return 1; + } + + signal(SIGINT, int_exit); + + if (set_link_xdp_fd(ifindex_in, prog_fd[0], 0) < 0) { + printf("link set xdp fd failed\n"); + return 1; + } + + printf("map[0] (vports) = %i, map[1] (map) = %i, map[2] (count) = %i\n", + map_fd[0], map_fd[1], map_fd[2]); + + /* populate virtual to physical port map */ + ret = bpf_map_update_elem(map_fd[0], &key, &ifindex_out, 0); + if (ret) { + perror("bpf_update_elem"); + goto out; + } + + poll_stats(2, ifindex_out); + +out: + return 0; +} diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index d50ac342dc92..acbd60519467 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -38,6 +38,8 @@ static int (*bpf_clone_redirect)(void *ctx, int ifindex, int flags) = (void *) BPF_FUNC_clone_redirect; static int (*bpf_redirect)(int ifindex, int flags) = (void *) BPF_FUNC_redirect; +static int (*bpf_redirect_map)(void *map, int key, int flags) = + (void *) BPF_FUNC_redirect_map; static int (*bpf_perf_event_output)(void *ctx, void *map, unsigned long long flags, void *data, int size) = -- cgit v1.2.3-55-g7522 From ed22e2f6b72de9dc2a2fe43cd553cf4d36f70785 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 06:28:56 -0700 Subject: s2io: Remove UFO support. Signed-off-by: David S. Miller --- drivers/net/ethernet/neterion/s2io.c | 45 ------------------------------------ 1 file changed, 45 deletions(-) diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index fd2ec36c6fa1..462eda926b1c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -42,8 +42,6 @@ * aggregated as a single large packet * napi: This parameter used to enable/disable NAPI (polling Rx) * Possible values '1' for enable and '0' for disable. Default is '1' - * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO) - * Possible values '1' for enable and '0' for disable. Default is '0' * vlan_tag_strip: This can be used to enable or disable vlan stripping. * Possible values '1' for enable , '0' for disable. * Default is '2' - which means disable in promisc mode @@ -453,7 +451,6 @@ S2IO_PARM_INT(lro_max_pkts, 0xFFFF); S2IO_PARM_INT(indicate_max_pkts, 0); S2IO_PARM_INT(napi, 1); -S2IO_PARM_INT(ufo, 0); S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC); static unsigned int tx_fifo_len[MAX_TX_FIFOS] = @@ -4128,32 +4125,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) } frg_len = skb_headlen(skb); - if (offload_type == SKB_GSO_UDP) { - int ufo_size; - - ufo_size = s2io_udp_mss(skb); - ufo_size &= ~7; - txdp->Control_1 |= TXD_UFO_EN; - txdp->Control_1 |= TXD_UFO_MSS(ufo_size); - txdp->Control_1 |= TXD_BUFFER0_SIZE(8); -#ifdef __BIG_ENDIAN - /* both variants do cpu_to_be64(be32_to_cpu(...)) */ - fifo->ufo_in_band_v[put_off] = - (__force u64)skb_shinfo(skb)->ip6_frag_id; -#else - fifo->ufo_in_band_v[put_off] = - (__force u64)skb_shinfo(skb)->ip6_frag_id << 32; -#endif - txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; - txdp->Buffer_Pointer = pci_map_single(sp->pdev, - fifo->ufo_in_band_v, - sizeof(u64), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) - goto pci_map_failed; - txdp++; - } - txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) @@ -4161,8 +4132,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) txdp->Host_Control = (unsigned long)skb; txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len); - if (offload_type == SKB_GSO_UDP) - txdp->Control_1 |= TXD_UFO_EN; frg_cnt = skb_shinfo(skb)->nr_frags; /* For fragmented SKB. */ @@ -4177,14 +4146,9 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev) skb_frag_size(frag), DMA_TO_DEVICE); txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); - if (offload_type == SKB_GSO_UDP) - txdp->Control_1 |= TXD_UFO_EN; } txdp->Control_1 |= TXD_GATHER_CODE_LAST; - if (offload_type == SKB_GSO_UDP) - frg_cnt++; /* as Txd0 was used for inband header */ - tx_fifo = mac_control->tx_FIFO_start[queue]; val64 = fifo->list_info[put_off].list_phy_addr; writeq(val64, &tx_fifo->TxDL_Pointer); @@ -7910,11 +7874,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) NETIF_F_RXCSUM | NETIF_F_LRO; dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; - if (sp->device_type & XFRAME_II_DEVICE) { - dev->hw_features |= NETIF_F_UFO; - if (ufo) - dev->features |= NETIF_F_UFO; - } if (sp->high_dma_flag == true) dev->features |= NETIF_F_HIGHDMA; dev->watchdog_timeo = WATCH_DOG_TIMEOUT; @@ -8147,10 +8106,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", dev->name); - if (ufo) - DBG_PRINT(ERR_DBG, - "%s: UDP Fragmentation Offload(UFO) enabled\n", - dev->name); /* Initialize device name */ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, sp->product_name); -- cgit v1.2.3-55-g7522 From f9c45ae020bab86a820d8ef9097d021d5496b855 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 06:31:05 -0700 Subject: macb: Remove bogus reference to NETIF_F_UFO. This driver doesn't actually support UFO explicitly yet it advertises this in netdev->features. Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 26d25749c3e4..6df2cad61647 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -68,7 +68,7 @@ #define GEM_MAX_TX_LEN ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1))) #define GEM_MTU_MIN_SIZE ETH_MIN_MTU -#define MACB_NETIF_LSO (NETIF_F_TSO | NETIF_F_UFO) +#define MACB_NETIF_LSO NETIF_F_TSO #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) -- cgit v1.2.3-55-g7522 From 182e0b6b58463b85f9a34dd038847e4ab3604a4f Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 06:32:14 -0700 Subject: ipvlan: Stop advertising NETIF_F_UFO support. It is going away. Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 2 +- drivers/net/ipvlan/ipvtap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index f37e3c1fd4e7..fdde20735416 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -169,7 +169,7 @@ static void ipvlan_port_destroy(struct net_device *dev) #define IPVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 22f133ea8d7b..5dea2063dbc8 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c @@ -24,7 +24,7 @@ #include #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) static dev_t ipvtap_major; static struct cdev ipvtap_cdev; -- cgit v1.2.3-55-g7522 From fb652fdfe83710da0ca13448a41b7ed027d0a984 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 06:33:08 -0700 Subject: macvlan/macvtap: Remove NETIF_F_UFO advertisement. It is going away. Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 2 +- drivers/net/macvtap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0f581ee74fe4..ca35c6ba7947 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -841,7 +841,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ - NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_LRO | \ + NETIF_F_GSO | NETIF_F_TSO | NETIF_F_LRO | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 91e7b19bbf86..c2d0ea2fb019 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -49,7 +49,7 @@ static struct class macvtap_class = { static struct cdev macvtap_cdev; #define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ - NETIF_F_TSO6 | NETIF_F_UFO) + NETIF_F_TSO6) static void macvtap_count_tx_dropped(struct tap_dev *tap) { -- cgit v1.2.3-55-g7522 From d591a1f3aad92ade4642e4173f4c368006c27f0f Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 06:35:32 -0700 Subject: tun/tap: Remove references to NETIF_F_UFO. It is going away. Signed-off-by: David S. Miller --- drivers/net/tap.c | 7 ++----- drivers/net/tun.c | 7 +------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 3570c7576993..ca267fd28ab8 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -943,9 +943,6 @@ static int set_offload(struct tap_queue *q, unsigned long arg) if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } - - if (arg & TUN_F_UFO) - feature_mask |= NETIF_F_UFO; } /* tun/tap driver inverts the usage for TSO offloads, where @@ -956,7 +953,7 @@ static int set_offload(struct tap_queue *q, unsigned long arg) * When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ - if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) + if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; @@ -1078,7 +1075,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN | TUN_F_UFO)) + TUN_F_TSO_ECN)) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3d4c24572ecd..a93392d7a340 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -199,7 +199,7 @@ struct tun_struct { struct net_device *dev; netdev_features_t set_features; #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ - NETIF_F_TSO6|NETIF_F_UFO) + NETIF_F_TSO6) int align; int vnet_hdr_sz; @@ -1921,11 +1921,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } - - if (arg & TUN_F_UFO) { - features |= NETIF_F_UFO; - arg &= ~TUN_F_UFO; - } } /* This gives the user a way to test for new features in future by -- cgit v1.2.3-55-g7522 From 2082499a95ad31b88466e50f4c61513e3873ba9e Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 06:36:07 -0700 Subject: dummy: Remove references to NETIF_F_UFO. It is going away. Signed-off-by: David S. Miller --- drivers/net/dummy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index d0c165d2086e..d0a1f9ce3168 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -345,7 +345,7 @@ static void dummy_setup(struct net_device *dev) dev->flags &= ~IFF_MULTICAST; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST; - dev->features |= NETIF_F_ALL_TSO | NETIF_F_UFO; + dev->features |= NETIF_F_ALL_TSO; dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX; dev->features |= NETIF_F_GSO_ENCAP_ALL; dev->hw_features |= dev->features; -- cgit v1.2.3-55-g7522 From e078de03788353b220f3d501fc3607cc92db28c1 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 06:37:32 -0700 Subject: virtio_net: Remove references to NETIF_F_UFO. It is going away. Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 99a26a9efec1..99830167ea2f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2429,7 +2429,7 @@ static int virtnet_probe(struct virtio_device *vdev) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { - dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO + dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6; } /* Individual feature bits: what can host handle? */ @@ -2439,13 +2439,11 @@ static int virtnet_probe(struct virtio_device *vdev) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; - if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) - dev->hw_features |= NETIF_F_UFO; dev->features |= NETIF_F_GSO_ROBUST; if (gso) - dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); + dev->features |= dev->hw_features & NETIF_F_ALL_TSO; /* (!csum && gso) case will be fixed by register_netdev() */ } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) -- cgit v1.2.3-55-g7522 From d4c023f4f3dd96734ef53d4b588136a872300046 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 07:04:22 -0700 Subject: net: Remove references to NETIF_F_UFO in netdev_fix_features(). It is going away. Signed-off-by: David S. Miller --- net/core/dev.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 9f3f4083ada5..467420eda02e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7271,24 +7271,6 @@ static netdev_features_t netdev_fix_features(struct net_device *dev, features &= ~NETIF_F_GSO; } - /* UFO needs SG and checksumming */ - if (features & NETIF_F_UFO) { - /* maybe split UFO into V4 and V6? */ - if (!(features & NETIF_F_HW_CSUM) && - ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) != - (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) { - netdev_dbg(dev, - "Dropping NETIF_F_UFO since no checksum offload features.\n"); - features &= ~NETIF_F_UFO; - } - - if (!(features & NETIF_F_SG)) { - netdev_dbg(dev, - "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n"); - features &= ~NETIF_F_UFO; - } - } - /* GSO partial features require GSO partial be set */ if ((features & dev->gso_partial_features) && !(features & NETIF_F_GSO_PARTIAL)) { -- cgit v1.2.3-55-g7522 From 08a00fea6de277df12ccfadc21bf779df2e83705 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 07:04:34 -0700 Subject: net: Remove references to NETIF_F_UFO from ethtool. It is going away. Signed-off-by: David S. Miller --- net/core/ethtool.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 674b6c9cec18..78408ab77a10 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -76,7 +76,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_LRO_BIT] = "rx-lro", [NETIF_F_TSO_BIT] = "tx-tcp-segmentation", - [NETIF_F_UFO_BIT] = "tx-udp-fragmentation", [NETIF_F_GSO_ROBUST_BIT] = "tx-gso-robust", [NETIF_F_TSO_ECN_BIT] = "tx-tcp-ecn-segmentation", [NETIF_F_TSO_MANGLEID_BIT] = "tx-tcp-mangleid-segmentation", @@ -299,9 +298,6 @@ static netdev_features_t ethtool_get_feature_mask(u32 eth_cmd) case ETHTOOL_GTSO: case ETHTOOL_STSO: return NETIF_F_ALL_TSO; - case ETHTOOL_GUFO: - case ETHTOOL_SUFO: - return NETIF_F_UFO; case ETHTOOL_GGSO: case ETHTOOL_SGSO: return NETIF_F_GSO; @@ -2555,7 +2551,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GPHYSTATS: case ETHTOOL_GTSO: case ETHTOOL_GPERMADDR: - case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: case ETHTOOL_GFLAGS: @@ -2723,7 +2718,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GRXCSUM: case ETHTOOL_GSG: case ETHTOOL_GTSO: - case ETHTOOL_GUFO: case ETHTOOL_GGSO: case ETHTOOL_GGRO: rc = ethtool_get_one_feature(dev, useraddr, ethcmd); @@ -2732,7 +2726,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_SRXCSUM: case ETHTOOL_SSG: case ETHTOOL_STSO: - case ETHTOOL_SUFO: case ETHTOOL_SGSO: case ETHTOOL_SGRO: rc = ethtool_set_one_feature(dev, useraddr, ethcmd); -- cgit v1.2.3-55-g7522 From 988cf74deb45bd6ee27433b7b5d1be6004d842b8 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 07:07:18 -0700 Subject: inet: Stop generating UFO packets. Signed-off-by: David S. Miller --- net/ipv4/ip_output.c | 76 --------------------------------------------------- net/ipv6/ip6_output.c | 76 --------------------------------------------------- 2 files changed, 152 deletions(-) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 7eb252dcecee..d338f865951a 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -853,61 +853,6 @@ csum_page(struct page *page, int offset, int copy) return csum; } -static inline int ip_ufo_append_data(struct sock *sk, - struct sk_buff_head *queue, - int getfrag(void *from, char *to, int offset, int len, - int odd, struct sk_buff *skb), - void *from, int length, int hh_len, int fragheaderlen, - int transhdrlen, int maxfraglen, unsigned int flags) -{ - struct sk_buff *skb; - int err; - - /* There is support for UDP fragmentation offload by network - * device, so create one single skb packet containing complete - * udp datagram - */ - skb = skb_peek_tail(queue); - if (!skb) { - skb = sock_alloc_send_skb(sk, - hh_len + fragheaderlen + transhdrlen + 20, - (flags & MSG_DONTWAIT), &err); - - if (!skb) - return err; - - /* reserve space for Hardware header */ - skb_reserve(skb, hh_len); - - /* create space for UDP/IP header */ - skb_put(skb, fragheaderlen + transhdrlen); - - /* initialize network header pointer */ - skb_reset_network_header(skb); - - /* initialize protocol header pointer */ - skb->transport_header = skb->network_header + fragheaderlen; - - skb->csum = 0; - - if (flags & MSG_CONFIRM) - skb_set_dst_pending_confirm(skb, 1); - - __skb_queue_tail(queue, skb); - } else if (skb_is_gso(skb)) { - goto append; - } - - skb->ip_summed = CHECKSUM_PARTIAL; - /* specify the length of each IP datagram fragment */ - skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen; - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - -append: - return skb_append_datato_frags(sk, skb, getfrag, from, - (length - transhdrlen)); -} - static int __ip_append_data(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, @@ -965,18 +910,6 @@ static int __ip_append_data(struct sock *sk, csummode = CHECKSUM_PARTIAL; cork->length += length; - if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) || - (skb && skb_is_gso(skb))) && - (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && - (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) { - err = ip_ufo_append_data(sk, queue, getfrag, from, length, - hh_len, fragheaderlen, transhdrlen, - maxfraglen, flags); - if (err) - goto error; - return 0; - } /* So, what's going on in the loop below? * @@ -1287,15 +1220,6 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, if (!skb) return -EINVAL; - if ((size + skb->len > mtu) && - (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO)) { - if (skb->ip_summed != CHECKSUM_PARTIAL) - return -EOPNOTSUPP; - - skb_shinfo(skb)->gso_size = mtu - fragheaderlen; - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - } cork->length += size; while (size > 0) { diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 1422d6c08377..c6ec06465ce0 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1114,69 +1114,6 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, } EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); -static inline int ip6_ufo_append_data(struct sock *sk, - struct sk_buff_head *queue, - int getfrag(void *from, char *to, int offset, int len, - int odd, struct sk_buff *skb), - void *from, int length, int hh_len, int fragheaderlen, - int exthdrlen, int transhdrlen, int mtu, - unsigned int flags, const struct flowi6 *fl6) - -{ - struct sk_buff *skb; - int err; - - /* There is support for UDP large send offload by network - * device, so create one single skb packet containing complete - * udp datagram - */ - skb = skb_peek_tail(queue); - if (!skb) { - skb = sock_alloc_send_skb(sk, - hh_len + fragheaderlen + transhdrlen + 20, - (flags & MSG_DONTWAIT), &err); - if (!skb) - return err; - - /* reserve space for Hardware header */ - skb_reserve(skb, hh_len); - - /* create space for UDP/IP header */ - skb_put(skb, fragheaderlen + transhdrlen); - - /* initialize network header pointer */ - skb_set_network_header(skb, exthdrlen); - - /* initialize protocol header pointer */ - skb->transport_header = skb->network_header + fragheaderlen; - - skb->protocol = htons(ETH_P_IPV6); - skb->csum = 0; - - if (flags & MSG_CONFIRM) - skb_set_dst_pending_confirm(skb, 1); - - __skb_queue_tail(queue, skb); - } else if (skb_is_gso(skb)) { - goto append; - } - - skb->ip_summed = CHECKSUM_PARTIAL; - /* Specify the length of each IPv6 datagram fragment. - * It has to be a multiple of 8. - */ - skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - - sizeof(struct frag_hdr)) & ~7; - skb_shinfo(skb)->gso_type = SKB_GSO_UDP; - skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk), - &fl6->daddr, - &fl6->saddr); - -append: - return skb_append_datato_frags(sk, skb, getfrag, from, - (length - transhdrlen)); -} - static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, gfp_t gfp) { @@ -1385,19 +1322,6 @@ emsgsize: */ cork->length += length; - if ((((length + (skb ? skb->len : headersize)) > mtu) || - (skb && skb_is_gso(skb))) && - (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO) && !dst_xfrm(&rt->dst) && - (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { - err = ip6_ufo_append_data(sk, queue, getfrag, from, length, - hh_len, fragheaderlen, exthdrlen, - transhdrlen, mtu, flags, fl6); - if (err) - goto error; - return 0; - } - if (!skb) goto alloc_new_skb; -- cgit v1.2.3-55-g7522 From 880388aa3c07fdea4f9b85e35641753017b1852f Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 07:29:12 -0700 Subject: net: Remove all references to SKB_GSO_UDP. Such packets are no longer possible. Signed-off-by: David S. Miller --- include/linux/virtio_net.h | 5 ----- net/core/filter.c | 8 ++++---- net/ipv4/af_inet.c | 12 ++---------- net/ipv4/gre_offload.c | 14 +------------- net/ipv4/udp_offload.c | 6 ++---- net/openvswitch/datapath.c | 14 -------------- net/openvswitch/flow.c | 6 +----- net/sched/act_csum.c | 6 ------ 8 files changed, 10 insertions(+), 61 deletions(-) diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 5209b5ed2a64..32fb046f2173 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -18,9 +18,6 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; break; - case VIRTIO_NET_HDR_GSO_UDP: - gso_type = SKB_GSO_UDP; - break; default: return -EINVAL; } @@ -73,8 +70,6 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb, hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; else if (sinfo->gso_type & SKB_GSO_TCPV6) hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; - else if (sinfo->gso_type & SKB_GSO_UDP) - hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; else return -EINVAL; if (sinfo->gso_type & SKB_GSO_TCP_ECN) diff --git a/net/core/filter.c b/net/core/filter.c index e23aa6fa1119..29e690cbe820 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2051,8 +2051,8 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) return ret; if (skb_is_gso(skb)) { - /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to - * be changed into SKB_GSO_TCPV6. + /* SKB_GSO_TCPV4 needs to be changed into + * SKB_GSO_TCPV6. */ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4; @@ -2087,8 +2087,8 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) return ret; if (skb_is_gso(skb)) { - /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to - * be changed into SKB_GSO_TCPV4. + /* SKB_GSO_TCPV6 needs to be changed into + * SKB_GSO_TCPV4. */ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 76c2077c3f5b..5ce44fb7d498 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1219,10 +1219,9 @@ EXPORT_SYMBOL(inet_sk_rebuild_header); struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features) { - bool udpfrag = false, fixedid = false, gso_partial, encap; + bool fixedid = false, gso_partial, encap; struct sk_buff *segs = ERR_PTR(-EINVAL); const struct net_offload *ops; - unsigned int offset = 0; struct iphdr *iph; int proto, tot_len; int nhoff; @@ -1257,7 +1256,6 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, segs = ERR_PTR(-EPROTONOSUPPORT); if (!skb->encapsulation || encap) { - udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); /* fixed ID is invalid if DF bit is not set */ @@ -1277,13 +1275,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, skb = segs; do { iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); - if (udpfrag) { - iph->frag_off = htons(offset >> 3); - if (skb->next) - iph->frag_off |= htons(IP_MF); - offset += skb->len - nhoff - ihl; - tot_len = skb->len - nhoff; - } else if (skb_is_gso(skb)) { + if (skb_is_gso(skb)) { if (!fixedid) { iph->id = htons(id); id += skb_shinfo(skb)->gso_segs; diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index d5cac99170b1..416bb304a281 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -24,7 +24,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, __be16 protocol = skb->protocol; u16 mac_len = skb->mac_len; int gre_offset, outer_hlen; - bool need_csum, ufo, gso_partial; + bool need_csum, gso_partial; if (!skb->encapsulation) goto out; @@ -47,20 +47,8 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM); skb->encap_hdr_csum = need_csum; - ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); - features &= skb->dev->hw_enc_features; - /* The only checksum offload we care about from here on out is the - * outer one so strip the existing checksum feature flags based - * on the fact that we will be computing our checksum in software. - */ - if (ufo) { - features &= ~NETIF_F_CSUM_MASK; - if (!need_csum) - features |= NETIF_F_HW_CSUM; - } - /* segment inner packet. */ segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 781250151d40..4fedce3d5733 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -21,7 +21,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, __be16 new_protocol, bool is_ipv6) { int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); - bool remcsum, need_csum, offload_csum, ufo, gso_partial; + bool remcsum, need_csum, offload_csum, gso_partial; struct sk_buff *segs = ERR_PTR(-EINVAL); struct udphdr *uh = udp_hdr(skb); u16 mac_offset = skb->mac_header; @@ -61,8 +61,6 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); skb->remcsum_offload = remcsum; - ufo = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); - need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); /* Try to offload checksum if possible */ offload_csum = !!(need_csum && @@ -77,7 +75,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, * outer one so strip the existing checksum feature flags and * instead set the flag based on our outer checksum offload value. */ - if (remcsum || ufo) { + if (remcsum) { features &= ~NETIF_F_CSUM_MASK; if (!need_csum || offload_csum) features |= NETIF_F_HW_CSUM; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 45fe8c8a884d..f6e229b51dfb 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -335,8 +335,6 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info, uint32_t cutlen) { - unsigned short gso_type = skb_shinfo(skb)->gso_type; - struct sw_flow_key later_key; struct sk_buff *segs, *nskb; int err; @@ -347,21 +345,9 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, if (segs == NULL) return -EINVAL; - if (gso_type & SKB_GSO_UDP) { - /* The initial flow key extracted by ovs_flow_key_extract() - * in this case is for a first fragment, so we need to - * properly mark later fragments. - */ - later_key = *key; - later_key.ip.frag = OVS_FRAG_TYPE_LATER; - } - /* Queue all of the segments. */ skb = segs; do { - if (gso_type & SKB_GSO_UDP && skb != segs) - key = &later_key; - err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); if (err) break; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 3f76cb765e5b..597d96faca45 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -584,8 +584,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } - if (nh->frag_off & htons(IP_MF) || - skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + if (nh->frag_off & htons(IP_MF)) key->ip.frag = OVS_FRAG_TYPE_FIRST; else key->ip.frag = OVS_FRAG_TYPE_NONE; @@ -701,9 +700,6 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - key->ip.frag = OVS_FRAG_TYPE_FIRST; - /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 3317a2f579da..67afc12df88b 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -231,9 +231,6 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, const struct iphdr *iph; u16 ul; - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - return 1; - /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, @@ -287,9 +284,6 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, const struct ipv6hdr *ip6h; u16 ul; - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) - return 1; - /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, -- cgit v1.2.3-55-g7522 From 6800b2e040edda01f593aba28203c2ebf1679f4c Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Fri, 7 Jul 2017 10:30:55 +0100 Subject: inet: Remove software UFO fragmenting code. Rename udp{4,6}_ufo_fragment() to udp{4,6}_tunnel_segment() and only handle tunnel segmentation. Signed-off-by: David S. Miller --- net/ipv4/udp_offload.c | 58 ++-------------------------- net/ipv6/udp_offload.c | 100 ++----------------------------------------------- 2 files changed, 7 insertions(+), 151 deletions(-) diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 4fedce3d5733..97658bfc1b58 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -187,66 +187,16 @@ out_unlock: } EXPORT_SYMBOL(skb_udp_tunnel_segment); -static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); - unsigned int mss; - __wsum csum; - struct udphdr *uh; - struct iphdr *iph; if (skb->encapsulation && (skb_shinfo(skb)->gso_type & - (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { + (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) segs = skb_udp_tunnel_segment(skb, features, false); - goto out; - } - - if (!pskb_may_pull(skb, sizeof(struct udphdr))) - goto out; - - mss = skb_shinfo(skb)->gso_size; - if (unlikely(skb->len <= mss)) - goto out; - - if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { - /* Packet is from an untrusted source, reset gso_segs. */ - - skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); - - segs = NULL; - goto out; - } - - /* Do software UFO. Complete and fill in the UDP checksum as - * HW cannot do checksum of UDP packets sent as multiple - * IP fragments. - */ - - uh = udp_hdr(skb); - iph = ip_hdr(skb); - - uh->check = 0; - csum = skb_checksum(skb, 0, skb->len, 0); - uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); - if (uh->check == 0) - uh->check = CSUM_MANGLED_0; - skb->ip_summed = CHECKSUM_NONE; - - /* If there is no outer header we can fake a checksum offload - * due to the fact that we have already done the checksum in - * software prior to segmenting the frame. - */ - if (!skb->encap_hdr_csum) - features |= NETIF_F_HW_CSUM; - - /* Fragment the skb. IP headers of the fragments are updated in - * inet_gso_segment() - */ - segs = skb_segment(skb, features); -out: return segs; } @@ -380,7 +330,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) static const struct net_offload udpv4_offload = { .callbacks = { - .gso_segment = udp4_ufo_fragment, + .gso_segment = udp4_tunnel_segment, .gro_receive = udp4_gro_receive, .gro_complete = udp4_gro_complete, }, diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index a2267f80febb..455fd4e39333 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -17,109 +17,15 @@ #include #include "ip6_offload.h" -static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); - unsigned int mss; - unsigned int unfrag_ip6hlen, unfrag_len; - struct frag_hdr *fptr; - u8 *packet_start, *prevhdr; - u8 nexthdr; - u8 frag_hdr_sz = sizeof(struct frag_hdr); - __wsum csum; - int tnl_hlen; - int err; - - mss = skb_shinfo(skb)->gso_size; - if (unlikely(skb->len <= mss)) - goto out; - - if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { - /* Packet is from an untrusted source, reset gso_segs. */ - - skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); - - /* Set the IPv6 fragment id if not set yet */ - if (!skb_shinfo(skb)->ip6_frag_id) - ipv6_proxy_select_ident(dev_net(skb->dev), skb); - - segs = NULL; - goto out; - } if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) segs = skb_udp_tunnel_segment(skb, features, true); - else { - const struct ipv6hdr *ipv6h; - struct udphdr *uh; - - if (!pskb_may_pull(skb, sizeof(struct udphdr))) - goto out; - - /* Do software UFO. Complete and fill in the UDP checksum as HW cannot - * do checksum of UDP packets sent as multiple IP fragments. - */ - - uh = udp_hdr(skb); - ipv6h = ipv6_hdr(skb); - - uh->check = 0; - csum = skb_checksum(skb, 0, skb->len, 0); - uh->check = udp_v6_check(skb->len, &ipv6h->saddr, - &ipv6h->daddr, csum); - if (uh->check == 0) - uh->check = CSUM_MANGLED_0; - - skb->ip_summed = CHECKSUM_NONE; - - /* If there is no outer header we can fake a checksum offload - * due to the fact that we have already done the checksum in - * software prior to segmenting the frame. - */ - if (!skb->encap_hdr_csum) - features |= NETIF_F_HW_CSUM; - - /* Check if there is enough headroom to insert fragment header. */ - tnl_hlen = skb_tnl_header_len(skb); - if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { - if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) - goto out; - } - - /* Find the unfragmentable header and shift it left by frag_hdr_sz - * bytes to insert fragment header. - */ - err = ip6_find_1stfragopt(skb, &prevhdr); - if (err < 0) - return ERR_PTR(err); - unfrag_ip6hlen = err; - nexthdr = *prevhdr; - *prevhdr = NEXTHDR_FRAGMENT; - unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + - unfrag_ip6hlen + tnl_hlen; - packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; - memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); - - SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; - skb->mac_header -= frag_hdr_sz; - skb->network_header -= frag_hdr_sz; - - fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); - fptr->nexthdr = nexthdr; - fptr->reserved = 0; - if (!skb_shinfo(skb)->ip6_frag_id) - ipv6_proxy_select_ident(dev_net(skb->dev), skb); - fptr->identification = skb_shinfo(skb)->ip6_frag_id; - - /* Fragment the skb. ipv6 header and the remaining fields of the - * fragment header are updated in ipv6_gso_segment() - */ - segs = skb_segment(skb, features); - } -out: return segs; } @@ -169,7 +75,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) static const struct net_offload udpv6_offload = { .callbacks = { - .gso_segment = udp6_ufo_fragment, + .gso_segment = udp6_tunnel_segment, .gro_receive = udp6_gro_receive, .gro_complete = udp6_gro_complete, }, -- cgit v1.2.3-55-g7522 From d9d30adf56777c402c0027c0e6ae21f17cc0a365 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 3 Jul 2017 07:31:57 -0700 Subject: net: Kill NETIF_F_UFO and SKB_GSO_UDP. No longer used. Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 4 +--- include/linux/netdevice.h | 1 - include/linux/skbuff.h | 31 +++++++++++++++---------------- 3 files changed, 16 insertions(+), 20 deletions(-) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 1d4737cffc71..ebd273627334 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -36,7 +36,6 @@ enum { /**/NETIF_F_GSO_SHIFT, /* keep the order of SKB_GSO_* bits */ NETIF_F_TSO_BIT /* ... TCPv4 segmentation */ = NETIF_F_GSO_SHIFT, - NETIF_F_UFO_BIT, /* ... UDPv4 fragmentation */ NETIF_F_GSO_ROBUST_BIT, /* ... ->SKB_GSO_DODGY */ NETIF_F_TSO_ECN_BIT, /* ... TCP ECN support */ NETIF_F_TSO_MANGLEID_BIT, /* ... IPV4 ID mangling allowed */ @@ -118,7 +117,6 @@ enum { #define NETIF_F_TSO6 __NETIF_F(TSO6) #define NETIF_F_TSO_ECN __NETIF_F(TSO_ECN) #define NETIF_F_TSO __NETIF_F(TSO) -#define NETIF_F_UFO __NETIF_F(UFO) #define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) #define NETIF_F_RXFCS __NETIF_F(RXFCS) #define NETIF_F_RXALL __NETIF_F(RXALL) @@ -172,7 +170,7 @@ enum { NETIF_F_FSO) /* List of features with software fallbacks. */ -#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_UFO | \ +#define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | \ NETIF_F_GSO_SCTP) /* diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 03b104908235..c60351b84323 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4098,7 +4098,6 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) /* check flags correspondence */ BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); - BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_TCP_FIXEDID != (NETIF_F_TSO_MANGLEID >> NETIF_F_GSO_SHIFT)); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index dbe29b6c9bd6..4d7a284ba3ee 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -463,39 +463,38 @@ enum { enum { SKB_GSO_TCPV4 = 1 << 0, - SKB_GSO_UDP = 1 << 1, /* This indicates the skb is from an untrusted source. */ - SKB_GSO_DODGY = 1 << 2, + SKB_GSO_DODGY = 1 << 1, /* This indicates the tcp segment has CWR set. */ - SKB_GSO_TCP_ECN = 1 << 3, + SKB_GSO_TCP_ECN = 1 << 2, - SKB_GSO_TCP_FIXEDID = 1 << 4, + SKB_GSO_TCP_FIXEDID = 1 << 3, - SKB_GSO_TCPV6 = 1 << 5, + SKB_GSO_TCPV6 = 1 << 4, - SKB_GSO_FCOE = 1 << 6, + SKB_GSO_FCOE = 1 << 5, - SKB_GSO_GRE = 1 << 7, + SKB_GSO_GRE = 1 << 6, - SKB_GSO_GRE_CSUM = 1 << 8, + SKB_GSO_GRE_CSUM = 1 << 7, - SKB_GSO_IPXIP4 = 1 << 9, + SKB_GSO_IPXIP4 = 1 << 8, - SKB_GSO_IPXIP6 = 1 << 10, + SKB_GSO_IPXIP6 = 1 << 9, - SKB_GSO_UDP_TUNNEL = 1 << 11, + SKB_GSO_UDP_TUNNEL = 1 << 10, - SKB_GSO_UDP_TUNNEL_CSUM = 1 << 12, + SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, - SKB_GSO_PARTIAL = 1 << 13, + SKB_GSO_PARTIAL = 1 << 12, - SKB_GSO_TUNNEL_REMCSUM = 1 << 14, + SKB_GSO_TUNNEL_REMCSUM = 1 << 13, - SKB_GSO_SCTP = 1 << 15, + SKB_GSO_SCTP = 1 << 14, - SKB_GSO_ESP = 1 << 16, + SKB_GSO_ESP = 1 << 15, }; #if BITS_PER_LONG > 32 -- cgit v1.2.3-55-g7522 From 06dc75ab06943fcc126a951a0680980ad5cb75c6 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 18:56:54 +0200 Subject: net: Revert "net: add function to allocate sk_buff head without data area" It was added for netlink mmap tx, there are no callers in the tree. The commit also added a check for skb->head != NULL in kfree_skb path, remove that too -- all skbs ought to have skb->head set. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/linux/skbuff.h | 6 ------ net/core/skbuff.c | 31 ++----------------------------- 2 files changed, 2 insertions(+), 35 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4d7a284ba3ee..4093552be1de 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -944,12 +944,6 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE); } -struct sk_buff *__alloc_skb_head(gfp_t priority, int node); -static inline struct sk_buff *alloc_skb_head(gfp_t priority) -{ - return __alloc_skb_head(priority, -1); -} - struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b352c6bcfb31..6bc19c80c210 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -158,31 +158,6 @@ out: * */ -struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node) -{ - struct sk_buff *skb; - - /* Get the HEAD */ - skb = kmem_cache_alloc_node(skbuff_head_cache, - gfp_mask & ~__GFP_DMA, node); - if (!skb) - goto out; - - /* - * Only clear those fields we need to clear, not those that we will - * actually initialise below. Hence, don't put any more fields after - * the tail pointer in struct sk_buff! - */ - memset(skb, 0, offsetof(struct sk_buff, tail)); - skb->head = NULL; - skb->truesize = sizeof(struct sk_buff); - refcount_set(&skb->users, 1); - - skb->mac_header = (typeof(skb->mac_header))~0U; -out: - return skb; -} - /** * __alloc_skb - allocate a network buffer * @size: size to allocate @@ -663,8 +638,7 @@ void skb_release_head_state(struct sk_buff *skb) static void skb_release_all(struct sk_buff *skb) { skb_release_head_state(skb); - if (likely(skb->head)) - skb_release_data(skb); + skb_release_data(skb); } /** @@ -762,8 +736,7 @@ void consume_stateless_skb(struct sk_buff *skb) return; trace_consume_skb(skb); - if (likely(skb->head)) - skb_release_data(skb); + skb_release_data(skb); kfree_skbmem(skb); } -- cgit v1.2.3-55-g7522 From c744cf5b9dea1da57a6fdb314c8b403cb22075c0 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:41:52 +0530 Subject: net: cadence: macb: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 791 336 0 1127 467 net/ethernet/cadence/macb_pci.o File size After adding 'const': text data bss dec hex filename 855 272 0 1127 467 net/ethernet/cadence/macb_pci.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c index 9906fda76087..248a8fc45069 100644 --- a/drivers/net/ethernet/cadence/macb_pci.c +++ b/drivers/net/ethernet/cadence/macb_pci.c @@ -128,7 +128,7 @@ static void macb_remove(struct pci_dev *pdev) clk_unregister(plat_data->hclk); } -static struct pci_device_id dev_id_table[] = { +static const struct pci_device_id dev_id_table[] = { { PCI_DEVICE(CDNS_VENDOR_ID, CDNS_DEVICE_ID), }, { 0, } }; -- cgit v1.2.3-55-g7522 From 7924a42133324d1cbec1c217eca598712707bd03 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:42:34 +0530 Subject: net: ec_bhf: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 5113 384 0 5497 1579 drivers/net/ethernet/ec_bhf.o File size After adding 'const': text data bss dec hex filename 5177 320 0 5497 1579 drivers/net/ethernet/ec_bhf.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/ec_bhf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 4ee042c034a1..1b79a6defd56 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -73,7 +73,7 @@ #define ETHERCAT_MASTER_ID 0x14 -static struct pci_device_id ids[] = { +static const struct pci_device_id ids[] = { { PCI_DEVICE(0x15ec, 0x5000), }, { 0, } }; -- cgit v1.2.3-55-g7522 From 24251c264798ac5a72667245c2650676d7ac2108 Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Mon, 17 Jul 2017 16:14:19 -0400 Subject: samples/bpf: add option for native and skb mode for redirect apps When testing with a driver that has both native and generic redirect support: $ sudo ./samples/bpf/xdp_redirect -N 5 6 input: 5 output: 6 ifindex 6: 4961879 pkt/s ifindex 6: 6391319 pkt/s ifindex 6: 6419468 pkt/s $ sudo ./samples/bpf/xdp_redirect -S 5 6 input: 5 output: 6 ifindex 6: 1845435 pkt/s ifindex 6: 3882850 pkt/s ifindex 6: 3893974 pkt/s $ sudo ./samples/bpf/xdp_redirect_map -N 5 6 input: 5 output: 6 map[0] (vports) = 4, map[1] (map) = 5, map[2] (count) = 0 ifindex 6: 2207374 pkt/s ifindex 6: 6212869 pkt/s ifindex 6: 6286515 pkt/s $ sudo ./samples/bpf/xdp_redirect_map -S 5 6 input: 5 output: 6 map[0] (vports) = 4, map[1] (map) = 5, map[2] (count) = 0 ifindex 6: 5052528 pkt/s ifindex 6: 5736631 pkt/s ifindex 6: 5739962 pkt/s Signed-off-by: Andy Gospodarek Acked-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- samples/bpf/xdp_redirect_map_user.c | 50 ++++++++++++++++++++++++++++++------- samples/bpf/xdp_redirect_user.c | 50 ++++++++++++++++++++++++++++++------- 2 files changed, 82 insertions(+), 18 deletions(-) diff --git a/samples/bpf/xdp_redirect_map_user.c b/samples/bpf/xdp_redirect_map_user.c index 0b8009a85415..a1ad00fdaa8a 100644 --- a/samples/bpf/xdp_redirect_map_user.c +++ b/samples/bpf/xdp_redirect_map_user.c @@ -10,6 +10,7 @@ * General Public License for more details. */ #include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include #include "bpf_load.h" #include "bpf_util.h" @@ -25,9 +27,11 @@ static int ifindex_in; static int ifindex_out; +static __u32 xdp_flags; + static void int_exit(int sig) { - set_link_xdp_fd(ifindex_in, -1, 0); + set_link_xdp_fd(ifindex_in, -1, xdp_flags); exit(0); } @@ -56,20 +60,47 @@ static void poll_stats(int interval, int ifindex) } } -int main(int ac, char **argv) +static void usage(const char *prog) { - char filename[256]; - int ret, key = 0; + fprintf(stderr, + "usage: %s [OPTS] IFINDEX_IN IFINDEX_OUT\n\n" + "OPTS:\n" + " -S use skb-mode\n" + " -N enforce native mode\n", + prog); +} - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - if (ac != 3) { +int main(int argc, char **argv) +{ + const char *optstr = "SN"; + char filename[256]; + int ret, opt, key = 0; + + while ((opt = getopt(argc, argv, optstr)) != -1) { + switch (opt) { + case 'S': + xdp_flags |= XDP_FLAGS_SKB_MODE; + break; + case 'N': + xdp_flags |= XDP_FLAGS_DRV_MODE; + break; + default: + usage(basename(argv[0])); + return 1; + } + } + + if (optind == argc) { printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]); return 1; } - ifindex_in = strtoul(argv[1], NULL, 0); - ifindex_out = strtoul(argv[2], NULL, 0); + ifindex_in = strtoul(argv[optind], NULL, 0); + ifindex_out = strtoul(argv[optind + 1], NULL, 0); + printf("input: %d output: %d\n", ifindex_in, ifindex_out); + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); if (load_bpf_file(filename)) { printf("%s", bpf_log_buf); @@ -82,8 +113,9 @@ int main(int ac, char **argv) } signal(SIGINT, int_exit); + signal(SIGTERM, int_exit); - if (set_link_xdp_fd(ifindex_in, prog_fd[0], 0) < 0) { + if (set_link_xdp_fd(ifindex_in, prog_fd[0], xdp_flags) < 0) { printf("link set xdp fd failed\n"); return 1; } diff --git a/samples/bpf/xdp_redirect_user.c b/samples/bpf/xdp_redirect_user.c index 761a91d5d7b4..f705a1905d2d 100644 --- a/samples/bpf/xdp_redirect_user.c +++ b/samples/bpf/xdp_redirect_user.c @@ -10,6 +10,7 @@ * General Public License for more details. */ #include +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include #include "bpf_load.h" #include "bpf_util.h" @@ -25,9 +27,11 @@ static int ifindex_in; static int ifindex_out; +static __u32 xdp_flags; + static void int_exit(int sig) { - set_link_xdp_fd(ifindex_in, -1, 0); + set_link_xdp_fd(ifindex_in, -1, xdp_flags); exit(0); } @@ -56,20 +60,47 @@ static void poll_stats(int interval, int ifindex) } } -int main(int ac, char **argv) +static void usage(const char *prog) { - char filename[256]; - int ret, key = 0; + fprintf(stderr, + "usage: %s [OPTS] IFINDEX_IN IFINDEX_OUT\n\n" + "OPTS:\n" + " -S use skb-mode\n" + " -N enforce native mode\n", + prog); +} - snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); - if (ac != 3) { +int main(int argc, char **argv) +{ + const char *optstr = "SN"; + char filename[256]; + int ret, opt, key = 0; + + while ((opt = getopt(argc, argv, optstr)) != -1) { + switch (opt) { + case 'S': + xdp_flags |= XDP_FLAGS_SKB_MODE; + break; + case 'N': + xdp_flags |= XDP_FLAGS_DRV_MODE; + break; + default: + usage(basename(argv[0])); + return 1; + } + } + + if (optind == argc) { printf("usage: %s IFINDEX_IN IFINDEX_OUT\n", argv[0]); return 1; } - ifindex_in = strtoul(argv[1], NULL, 0); - ifindex_out = strtoul(argv[2], NULL, 0); + ifindex_in = strtoul(argv[optind], NULL, 0); + ifindex_out = strtoul(argv[optind + 1], NULL, 0); + printf("input: %d output: %d\n", ifindex_in, ifindex_out); + + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); if (load_bpf_file(filename)) { printf("%s", bpf_log_buf); @@ -82,8 +113,9 @@ int main(int ac, char **argv) } signal(SIGINT, int_exit); + signal(SIGTERM, int_exit); - if (set_link_xdp_fd(ifindex_in, prog_fd[0], 0) < 0) { + if (set_link_xdp_fd(ifindex_in, prog_fd[0], xdp_flags) < 0) { printf("link set xdp fd failed\n"); return 1; } -- cgit v1.2.3-55-g7522 From 95b80bf3db03c2bf572a357cf74b9a6aefef0a4a Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 17 Jul 2017 18:09:09 -0300 Subject: mdio_bus: Remove unneeded gpiod NULL check The gpiod API checks for NULL descriptors, so there is no need to duplicate the check in the driver. Signed-off-by: Fabio Estevam Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio_bus.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2df7b62c1a36..b6f9fa670168 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -399,8 +399,7 @@ error: } /* Put PHYs in RESET to save power */ - if (bus->reset_gpiod) - gpiod_set_value_cansleep(bus->reset_gpiod, 1); + gpiod_set_value_cansleep(bus->reset_gpiod, 1); device_del(&bus->dev); return err; @@ -425,8 +424,7 @@ void mdiobus_unregister(struct mii_bus *bus) } /* Put PHYs in RESET to save power */ - if (bus->reset_gpiod) - gpiod_set_value_cansleep(bus->reset_gpiod, 1); + gpiod_set_value_cansleep(bus->reset_gpiod, 1); device_del(&bus->dev); } -- cgit v1.2.3-55-g7522 From 46f55cffa47330b99537985a50d92945d4b34658 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 17 Jul 2017 21:56:48 -0700 Subject: net: fix build error in devmap helper calls Initial patches missed case with CONFIG_BPF_SYSCALL not set. Fixes: 11393cc9b9be ("xdp: Add batching support to redirect map") Fixes: 97f91a7cf04f ("bpf: add bpf_redirect_map helper routine") Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/bpf.h | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6850a760dc94..6353c7474dba 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -318,6 +318,12 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) /* verify correctness of eBPF program */ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); + +/* Map specifics */ +struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); +void __dev_map_insert_ctx(struct bpf_map *map, u32 index); +void __dev_map_flush(struct bpf_map *map); + #else static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -356,6 +362,20 @@ static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) { } + +static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} + +static inline void __dev_map_insert_ctx(struct bpf_map *map, u32 index) +{ +} + +static inline void __dev_map_flush(struct bpf_map *map) +{ +} #endif /* CONFIG_BPF_SYSCALL */ /* verifier prototypes for helper functions called from eBPF programs */ @@ -379,9 +399,4 @@ extern const struct bpf_func_proto bpf_get_stackid_proto; void bpf_user_rnd_init_once(void); u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -/* Map specifics */ -struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); -void __dev_map_insert_ctx(struct bpf_map *map, u32 index); -void __dev_map_flush(struct bpf_map *map); - #endif /* _LINUX_BPF_H */ -- cgit v1.2.3-55-g7522 From 7b23268c9d81a3603ccd9e1bde64e2a1cfe548b7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:35 -0400 Subject: net: dsa: mv88e6xxx: remove unneeded dsa header phy.c does not need to include the DSA public header. Remove it. Signed-off-by: Vivien Didelot Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/phy.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 3500ac0ea848..436668bd50dc 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -13,7 +13,6 @@ #include #include -#include #include "chip.h" #include "phy.h" -- cgit v1.2.3-55-g7522 From c56a71a92114e3198e249593841cb744abaadcb7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:36 -0400 Subject: net: dsa: mv88e6xxx: remove LED control register We don't support LED control yet, remove its register definition. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/port.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 8f3991bf1851..b16d5f0e6e9c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -216,9 +216,6 @@ /* Offset 0x13: OutFiltered Counter */ #define MV88E6XXX_PORT_OUT_FILTERED 0x13 -/* Offset 0x16: LED Control */ -#define MV88E6XXX_PORT_LED_CONTROL 0x16 - /* Offset 0x18: IEEE Priority Mapping Table */ #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE 0x18 #define MV88E6390_PORT_IEEE_PRIO_MAP_TABLE_UPDATE 0x8000 -- cgit v1.2.3-55-g7522 From bd80720468c46f4c712aaf0f3b319ec1440e0cf9 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:37 -0400 Subject: net: dsa: mv88e6xxx: fix 88E6321 family comment MV88E6XXX_FAMILY_6321 is undefined, 88E6321's family is 88E6320, fix this. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 53b088166c28..51f2797ecb52 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2959,7 +2959,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { }; static const struct mv88e6xxx_ops mv88e6321_ops = { - /* MV88E6XXX_FAMILY_6321 */ + /* MV88E6XXX_FAMILY_6320 */ .irl_init_all = mv88e6352_g2_irl_init_all, .get_eeprom = mv88e6xxx_g2_get_eeprom16, .set_eeprom = mv88e6xxx_g2_set_eeprom16, -- cgit v1.2.3-55-g7522 From 2466f64ae4e9d0bb80ffd73e5529911a535d6bc4 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:38 -0400 Subject: net: dsa: mv88e6xxx: remove unused capabilities Remove the forgotten capabilities and related flags from previous cleanups. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.h | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 086444016352..9ccf5d03346a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -109,11 +109,6 @@ enum mv88e6xxx_cap { MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */ MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */ - /* Switch Global (1) Registers. - */ - MV88E6XXX_CAP_G1_ATU_FID, /* (0x01) ATU FID Register */ - MV88E6XXX_CAP_G1_VTU_FID, /* (0x02) VTU FID Register */ - /* Switch Global 2 Registers. * The device contains a second set of global 16-bit registers. */ @@ -122,17 +117,6 @@ enum mv88e6xxx_cap { MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */ MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ - - /* Per VLAN Spanning Tree Unit (STU). - * The Port State database, if present, is accessed through VTU - * operations and dedicated SID registers. See MV88E6352_G1_VTU_SID. - */ - MV88E6XXX_CAP_STU, - - /* VLAN Table Unit. - * The VTU is used to program 802.1Q VLANs. See MV88E6XXX_G1_VTU_OP. - */ - MV88E6XXX_CAP_VTU, }; /* Bitmask of capabilities */ @@ -141,8 +125,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD) #define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) -#define MV88E6XXX_FLAG_G1_VTU_FID BIT_ULL(MV88E6XXX_CAP_G1_VTU_FID) - #define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) #define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT) #define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X) @@ -160,8 +142,7 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6097 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ + (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ @@ -169,8 +150,7 @@ enum mv88e6xxx_cap { MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6165 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ + (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ @@ -193,15 +173,13 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6341 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_G1_VTU_FID | \ - MV88E6XXX_FLAG_GLOBAL2 | \ + (MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ @@ -210,7 +188,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_G1_VTU_FID | \ MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ -- cgit v1.2.3-55-g7522 From 74e60241ce140ac8c312cde1dc823681c6a86f99 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:39 -0400 Subject: net: dsa: mv88e6xxx: remove 88E6185 G2 interrupt The 88E6185 family has no Global 2 Interrupt Source or Mask registers. Remove the MV88E6XXX_FLAG_G2_INT from MV88E6XXX_FLAGS_FAMILY_6185. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 9ccf5d03346a..8eab123f0fed 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -159,7 +159,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6185 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAGS_MULTI_CHIP) -- cgit v1.2.3-55-g7522 From d6c5e6aff50cadeab0b7e381ce50be836ae55097 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:40 -0400 Subject: net: dsa: mv88e6xxx: add number of Global 2 IRQs Similarly to g1_irqs, add a g2_irqs member to the info structure to indicates the presence of the Global 2 Interrupt Source and Mask registers. At the same time, provide helpers and document the registers since they differ a bit between 88E6352 and 88E6390 families. This allows us to get rid of the MV88E6XXX_FLAG_G2_INT flag. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 27 ++++++++++++++++++++++++--- drivers/net/dsa/mv88e6xxx/chip.h | 9 +-------- drivers/net/dsa/mv88e6xxx/global2.c | 22 ++++++++++++++++++++-- drivers/net/dsa/mv88e6xxx/global2.h | 19 +++++++++++++++++-- 4 files changed, 62 insertions(+), 15 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 51f2797ecb52..1be0bc5e7c3f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3207,6 +3207,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3242,6 +3243,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 8, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3260,6 +3262,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3295,6 +3298,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g2_irqs = 10, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6341, @@ -3312,6 +3316,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3330,6 +3335,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3348,6 +3354,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3366,6 +3373,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3384,6 +3392,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3402,6 +3411,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3438,6 +3448,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .tag_protocol = DSA_TAG_PROTO_DSA, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .pvt = true, .atu_move_port_mask = 0x1f, .flags = MV88E6XXX_FLAGS_FAMILY_6390, @@ -3455,6 +3466,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3473,6 +3485,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3491,6 +3504,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3509,6 +3523,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3562,6 +3577,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, + .g2_irqs = 10, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, .flags = MV88E6XXX_FLAGS_FAMILY_6341, @@ -3579,6 +3595,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3597,6 +3614,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3615,6 +3633,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 15000, .g1_irqs = 9, + .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, .tag_protocol = DSA_TAG_PROTO_EDSA, @@ -3632,6 +3651,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3649,6 +3669,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .age_time_coeff = 3750, .g1_irqs = 9, + .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, .tag_protocol = DSA_TAG_PROTO_DSA, @@ -3970,7 +3991,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) if (err) goto out; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) { + if (chip->info->g2_irqs > 0) { err = mv88e6xxx_g2_irq_setup(chip); if (err) goto out_g1_irq; @@ -3990,7 +4011,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) out_mdio: mv88e6xxx_mdios_unregister(chip); out_g2_irq: - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0) + if (chip->info->g2_irqs > 0 && chip->irq > 0) mv88e6xxx_g2_irq_free(chip); out_g1_irq: if (chip->irq > 0) { @@ -4012,7 +4033,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_mdios_unregister(chip); if (chip->irq > 0) { - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT)) + if (chip->info->g2_irqs > 0) mv88e6xxx_g2_irq_free(chip); mv88e6xxx_g1_irq_free(chip); } diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 8eab123f0fed..2e760fd0ad24 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -113,7 +113,6 @@ enum mv88e6xxx_cap { * The device contains a second set of global 16-bit registers. */ MV88E6XXX_CAP_GLOBAL2, - MV88E6XXX_CAP_G2_INT, /* (0x00) Interrupt Status */ MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */ MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ @@ -126,7 +125,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) #define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) -#define MV88E6XXX_FLAG_G2_INT BIT_ULL(MV88E6XXX_CAP_G2_INT) #define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X) #define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X) #define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) @@ -143,7 +141,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6097 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ @@ -151,7 +148,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6165 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ @@ -173,13 +169,11 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6341 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ @@ -188,7 +182,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ @@ -197,7 +190,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6390 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_INT | \ MV88E6XXX_FLAGS_MULTI_CHIP) struct mv88e6xxx_ops; @@ -213,6 +205,7 @@ struct mv88e6xxx_info { unsigned int global1_addr; unsigned int age_time_coeff; unsigned int g1_irqs; + unsigned int g2_irqs; bool pvt; enum dsa_tag_protocol tag_protocol; unsigned long long flags; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 158d0f499874..be704c98dcbb 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -40,6 +40,21 @@ static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) return mv88e6xxx_wait(chip, MV88E6XXX_G2, reg, mask); } +/* Offset 0x00: Interrupt Source Register */ + +static int mv88e6xxx_g2_int_source(struct mv88e6xxx_chip *chip, u16 *src) +{ + /* Read (and clear most of) the Interrupt Source bits */ + return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SRC, src); +} + +/* Offset 0x01: Interrupt Mask Register */ + +static int mv88e6xxx_g2_int_mask(struct mv88e6xxx_chip *chip, u16 mask) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, mask); +} + /* Offset 0x02: Management Enable 2x */ /* Offset 0x03: Management Enable 0x */ @@ -933,7 +948,7 @@ static irqreturn_t mv88e6xxx_g2_irq_thread_fn(int irq, void *dev_id) u16 reg; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_INT_SOURCE, ®); + err = mv88e6xxx_g2_int_source(chip, ®); mutex_unlock(&chip->reg_lock); if (err) goto out; @@ -959,8 +974,11 @@ static void mv88e6xxx_g2_irq_bus_lock(struct irq_data *d) static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d) { struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d); + int err; - mv88e6xxx_g2_write(chip, MV88E6XXX_G2_INT_MASK, ~chip->g2_irq.masked); + err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked); + if (err) + dev_err(chip->dev, "failed to mask interrupts\n"); mutex_unlock(&chip->reg_lock); } diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 317ffd8f323d..7b21b2556af2 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -20,11 +20,26 @@ #define MV88E6XXX_G2 0x1c /* Offset 0x00: Interrupt Source Register */ -#define MV88E6XXX_G2_INT_SOURCE 0x00 +#define MV88E6XXX_G2_INT_SRC 0x00 +#define MV88E6XXX_G2_INT_SRC_WDOG 0x8000 +#define MV88E6XXX_G2_INT_SRC_JAM_LIMIT 0x4000 +#define MV88E6XXX_G2_INT_SRC_DUPLEX_MISMATCH 0x2000 +#define MV88E6XXX_G2_INT_SRC_WAKE_EVENT 0x1000 +#define MV88E6352_G2_INT_SRC_SERDES 0x0800 +#define MV88E6352_G2_INT_SRC_PHY 0x001f +#define MV88E6390_G2_INT_SRC_PHY 0x07fe + #define MV88E6XXX_G2_INT_SOURCE_WATCHDOG 15 /* Offset 0x01: Interrupt Mask Register */ -#define MV88E6XXX_G2_INT_MASK 0x01 +#define MV88E6XXX_G2_INT_MASK 0x01 +#define MV88E6XXX_G2_INT_MASK_WDOG 0x8000 +#define MV88E6XXX_G2_INT_MASK_JAM_LIMIT 0x4000 +#define MV88E6XXX_G2_INT_MASK_DUPLEX_MISMATCH 0x2000 +#define MV88E6XXX_G2_INT_MASK_WAKE_EVENT 0x1000 +#define MV88E6352_G2_INT_MASK_SERDES 0x0800 +#define MV88E6352_G2_INT_MASK_PHY 0x001f +#define MV88E6390_G2_INT_MASK_PHY 0x07fe /* Offset 0x02: MGMT Enable Register 2x */ #define MV88E6XXX_G2_MGMT_EN_2X 0x02 -- cgit v1.2.3-55-g7522 From 51c901a7756215561b331226f23054da056b8e42 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:41 -0400 Subject: net: dsa: mv88e6xxx: distinguish Global 2 Rsvd2CPU The 88E6185 family only has one 16-bit register to mark the 16 802.1D reserved multicast addresses in the range of 01:80:C2:00:00:0x as MGMT. The 88E6352 family also has one 16-bit register to mark the 16 GARP reserved multicast addresses in the range of 01:80:C2:00:00:2x as MGMT. Split the existing mv88e6095 prefixed mgmt_rsvd2cpu operation into two distinct mv88e6185 and mv88e6352 prefixed operations, and wrap its call into a mv88e6xxx_rsvd2cpu_setup helper. This allows us to also get rid of the MV88E6XXX_CAP_G2_MGMT_EN_* flags. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 55 ++++++++++++++++---------------- drivers/net/dsa/mv88e6xxx/chip.h | 17 ---------- drivers/net/dsa/mv88e6xxx/global2.c | 63 ++++++++++++++++++++++++++++--------- drivers/net/dsa/mv88e6xxx/global2.h | 11 +++++-- 4 files changed, 85 insertions(+), 61 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 1be0bc5e7c3f..874e2a154834 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -926,6 +926,14 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_rsvd2cpu_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->mgmt_rsvd2cpu) + return chip->info->ops->mgmt_rsvd2cpu(chip); + + return 0; +} + static int mv88e6xxx_atu_setup(struct mv88e6xxx_chip *chip) { int err; @@ -2142,16 +2150,9 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; - /* Some generations have the configuration of sending reserved - * management frames to the CPU in global2, others in - * global1. Hence it does not fit the two setup functions - * above. - */ - if (chip->info->ops->mgmt_rsvd2cpu) { - err = chip->info->ops->mgmt_rsvd2cpu(chip); - if (err) - goto unlock; - } + err = mv88e6xxx_rsvd2cpu_setup(chip); + if (err) + goto unlock; unlock: mutex_unlock(&chip->reg_lock); @@ -2385,7 +2386,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2408,7 +2409,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = { .stats_get_sset_count = mv88e6095_stats_get_sset_count, .stats_get_strings = mv88e6095_stats_get_strings, .stats_get_stats = mv88e6095_stats_get_stats, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2441,7 +2442,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2467,7 +2468,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2496,7 +2497,7 @@ static const struct mv88e6xxx_ops mv88e6131_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2563,7 +2564,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2587,7 +2588,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2619,7 +2620,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2653,7 +2654,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2686,7 +2687,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2720,7 +2721,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2746,7 +2747,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6185_g2_mgmt_rsvd2cpu, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2884,7 +2885,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2952,7 +2953,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .stats_get_stats = mv88e6320_stats_get_stats, .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, @@ -3049,7 +3050,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3081,7 +3082,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3115,7 +3116,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, - .mgmt_rsvd2cpu = mv88e6095_g2_mgmt_rsvd2cpu, + .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 2e760fd0ad24..48233971759e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -113,8 +113,6 @@ enum mv88e6xxx_cap { * The device contains a second set of global 16-bit registers. */ MV88E6XXX_CAP_GLOBAL2, - MV88E6XXX_CAP_G2_MGMT_EN_2X, /* (0x02) MGMT Enable Register 2x */ - MV88E6XXX_CAP_G2_MGMT_EN_0X, /* (0x03) MGMT Enable Register 0x */ MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ }; @@ -125,8 +123,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) #define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) -#define MV88E6XXX_FLAG_G2_MGMT_EN_2X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_2X) -#define MV88E6XXX_FLAG_G2_MGMT_EN_0X BIT_ULL(MV88E6XXX_CAP_G2_MGMT_EN_0X) #define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) /* Multi-chip Addressing Mode */ @@ -136,33 +132,25 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6095 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6097 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6165 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6185 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) @@ -174,16 +162,12 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_MGMT_EN_2X | \ - MV88E6XXX_FLAG_G2_MGMT_EN_0X | \ MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) @@ -418,7 +402,6 @@ struct mv88e6xxx_ops { int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port); const struct mv88e6xxx_irq_ops *watchdog_ops; - /* Can be either in g1 or g2, so don't use a prefix */ int (*mgmt_rsvd2cpu)(struct mv88e6xxx_chip *chip); /* Power on/off a SERDES interface */ diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index be704c98dcbb..6b6ebbd6d322 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -56,29 +56,65 @@ static int mv88e6xxx_g2_int_mask(struct mv88e6xxx_chip *chip, u16 mask) } /* Offset 0x02: Management Enable 2x */ + +static int mv88e6xxx_g2_mgmt_enable_2x(struct mv88e6xxx_chip *chip, u16 en2x) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, en2x); +} + /* Offset 0x03: Management Enable 0x */ -int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +static int mv88e6xxx_g2_mgmt_enable_0x(struct mv88e6xxx_chip *chip, u16 en0x) +{ + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, en0x); +} + +/* Offset 0x05: Switch Management Register */ + +static int mv88e6xxx_g2_switch_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip, + bool enable) +{ + u16 val; + int err; + + err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SWITCH_MGMT, &val); + if (err) + return err; + + if (enable) + val |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU; + else + val &= ~MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU; + + return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, val); +} + +int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) { int err; /* Consider the frames with reserved multicast destination - * addresses matching 01:80:c2:00:00:2x as MGMT. + * addresses matching 01:80:c2:00:00:0x as MGMT. */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) { - err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_2X, 0xffff); - if (err) - return err; - } + err = mv88e6xxx_g2_mgmt_enable_0x(chip, 0xffff); + if (err) + return err; + + return mv88e6xxx_g2_switch_mgmt_rsvd2cpu(chip, true); +} + +int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + int err; /* Consider the frames with reserved multicast destination - * addresses matching 01:80:c2:00:00:0x as MGMT. + * addresses matching 01:80:c2:00:00:2x as MGMT. */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X)) - return mv88e6xxx_g2_write(chip, MV88E6XXX_G2_MGMT_EN_0X, - 0xffff); + err = mv88e6xxx_g2_mgmt_enable_2x(chip, 0xffff); + if (err) + return err; - return 0; + return mv88e6185_g2_mgmt_rsvd2cpu(chip); } /* Offset 0x06: Device Mapping Table register */ @@ -1081,9 +1117,6 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) * port at the highest priority. */ reg = MV88E6XXX_G2_SWITCH_MGMT_FORCE_FLOW_CTL_PRI | (0x7 << 4); - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_0X) || - mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_MGMT_EN_2X)) - reg |= MV88E6XXX_G2_SWITCH_MGMT_RSVD2CPU | 0x7; err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SWITCH_MGMT, reg); if (err) return err; diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 7b21b2556af2..487a81146c31 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -260,7 +260,9 @@ int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip); int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); -int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); + +int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; @@ -362,7 +364,12 @@ static inline void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip) { } -static inline int mv88e6095_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +static inline int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + +static inline int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) { return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From a2a05db8a5ed5f97d269155508da801646683ad6 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:42 -0400 Subject: net: dsa: mv88e6xxx: add POT flag to 88E6390 The 88E6390 family clear the Priority Override Table the same way as 88E6352, thus add MV88E6XXX_FLAG_G2_POT to MV88E6XXX_FLAGS_FAMILY_6390. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 48233971759e..52b52423df1f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -174,6 +174,7 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6390 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ + MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) struct mv88e6xxx_ops; -- cgit v1.2.3-55-g7522 From 9e907d739cc3caf3afa0af45835cb82ff929207c Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:43 -0400 Subject: net: dsa: mv88e6xxx: add POT operation Add a pot_clear operation to clear the Priority Override Table and wrap its call into a mv88e6xxx_pot_setup helper. This allows us to get rid of the MV88E6XXX_FLAG_G2_POT flag. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 34 ++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/chip.h | 12 +++--------- drivers/net/dsa/mv88e6xxx/global2.c | 9 +-------- drivers/net/dsa/mv88e6xxx/global2.h | 7 +++++++ 4 files changed, 45 insertions(+), 17 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 874e2a154834..6351230f82ad 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -926,6 +926,14 @@ static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port, dev_err(ds->dev, "p%d: failed to update state\n", port); } +static int mv88e6xxx_pot_setup(struct mv88e6xxx_chip *chip) +{ + if (chip->info->ops->pot_clear) + return chip->info->ops->pot_clear(chip); + + return 0; +} + static int mv88e6xxx_rsvd2cpu_setup(struct mv88e6xxx_chip *chip) { if (chip->info->ops->mgmt_rsvd2cpu) @@ -2150,6 +2158,10 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) if (err) goto unlock; + err = mv88e6xxx_pot_setup(chip); + if (err) + goto unlock; + err = mv88e6xxx_rsvd2cpu_setup(chip); if (err) goto unlock; @@ -2387,6 +2399,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .ppu_enable = mv88e6185_g1_ppu_enable, .ppu_disable = mv88e6185_g1_ppu_disable, .reset = mv88e6185_g1_reset, @@ -2443,6 +2456,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2469,6 +2483,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2534,6 +2549,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2565,6 +2581,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2589,6 +2606,7 @@ static const struct mv88e6xxx_ops mv88e6165_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2621,6 +2639,7 @@ static const struct mv88e6xxx_ops mv88e6171_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2655,6 +2674,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2688,6 +2708,7 @@ static const struct mv88e6xxx_ops mv88e6175_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2722,6 +2743,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2783,6 +2805,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2817,6 +2840,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2851,6 +2875,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2886,6 +2911,7 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -2921,6 +2947,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -2954,6 +2981,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, @@ -3019,6 +3047,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3051,6 +3080,7 @@ static const struct mv88e6xxx_ops mv88e6350_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3083,6 +3113,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3117,6 +3148,7 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .set_egress_port = mv88e6095_g1_set_egress_port, .watchdog_ops = &mv88e6097_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, @@ -3154,6 +3186,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, @@ -3190,6 +3223,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .set_egress_port = mv88e6390_g1_set_egress_port, .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, + .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 52b52423df1f..822286250aff 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -113,7 +113,6 @@ enum mv88e6xxx_cap { * The device contains a second set of global 16-bit registers. */ MV88E6XXX_CAP_GLOBAL2, - MV88E6XXX_CAP_G2_POT, /* (0x0f) Priority Override Table */ }; /* Bitmask of capabilities */ @@ -123,7 +122,6 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) #define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) -#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT) /* Multi-chip Addressing Mode */ #define MV88E6XXX_FLAGS_MULTI_CHIP \ @@ -136,12 +134,10 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6097 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6165 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6185 \ @@ -151,30 +147,25 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6341 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6390 \ (MV88E6XXX_FLAG_EEE | \ MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAG_G2_POT | \ MV88E6XXX_FLAGS_MULTI_CHIP) struct mv88e6xxx_ops; @@ -313,6 +304,9 @@ struct mv88e6xxx_ops { struct mii_bus *bus, int addr, int reg, u16 val); + /* Priority Override Table operations */ + int (*pot_clear)(struct mv88e6xxx_chip *chip); + /* PHY Polling Unit (PPU) operations */ int (*ppu_enable)(struct mv88e6xxx_chip *chip); int (*ppu_disable)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 6b6ebbd6d322..aaf98e818d0d 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -311,7 +311,7 @@ static int mv88e6xxx_g2_pot_write(struct mv88e6xxx_chip *chip, int pointer, return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_PRIO_OVERRIDE, val); } -static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip) +int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) { int i, err; @@ -1131,12 +1131,5 @@ int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip) if (err) return err; - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_POT)) { - /* Clear the priority override table. */ - err = mv88e6xxx_g2_clear_pot(chip); - if (err) - return err; - } - return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 487a81146c31..d89d7b810a45 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -264,6 +264,8 @@ void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip); int mv88e6185_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip); +int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip); + extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops; extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; @@ -374,6 +376,11 @@ static inline int mv88e6352_g2_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip) return -EOPNOTSUPP; } +static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip) +{ + return -EOPNOTSUPP; +} + static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {}; static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {}; -- cgit v1.2.3-55-g7522 From 9069c13a48675001c59e9864b25429aa7fb1c96a Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:44 -0400 Subject: net: dsa: mv88e6xxx: add a global2_addr info flag Similarly to global1_addr, add a global2_addr member in the info structure to describe the presence of the Global 2 Registers. This allows us to get rid of the MV88E6XXX_FLAG_GLOBAL2 flag. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 28 +++++++++++++++++++++++++++- drivers/net/dsa/mv88e6xxx/chip.h | 27 ++++++--------------------- drivers/net/dsa/mv88e6xxx/global2.c | 8 ++++---- drivers/net/dsa/mv88e6xxx/global2.h | 4 +--- 4 files changed, 38 insertions(+), 29 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 6351230f82ad..eb4871a66076 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2132,7 +2132,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds) goto unlock; /* Setup Switch Global 2 Registers */ - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + if (chip->info->global2_addr) { err = mv88e6xxx_g2_setup(chip); if (err) goto unlock; @@ -3240,6 +3240,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .g2_irqs = 10, @@ -3259,6 +3260,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, @@ -3276,6 +3278,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .g2_irqs = 10, @@ -3295,6 +3298,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3314,6 +3318,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .atu_move_port_mask = 0xf, @@ -3331,6 +3336,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, .g2_irqs = 10, @@ -3349,6 +3355,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3368,6 +3375,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3387,6 +3395,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3406,6 +3415,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3425,6 +3435,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3444,6 +3455,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3463,6 +3475,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, @@ -3480,6 +3493,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .tag_protocol = DSA_TAG_PROTO_DSA, .age_time_coeff = 3750, .g1_irqs = 9, @@ -3499,6 +3513,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, @@ -3518,6 +3533,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, @@ -3537,6 +3553,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3556,6 +3573,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, @@ -3575,6 +3593,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, @@ -3593,6 +3612,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, @@ -3610,6 +3630,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .atu_move_port_mask = 0x1f, .g2_irqs = 10, @@ -3628,6 +3649,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3647,6 +3669,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3666,6 +3689,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 4095, .port_base_addr = 0x10, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 15000, .g1_irqs = 9, .g2_irqs = 10, @@ -3684,6 +3708,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, @@ -3702,6 +3727,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .max_vid = 8191, .port_base_addr = 0x0, .global1_addr = 0x1b, + .global2_addr = 0x1c, .age_time_coeff = 3750, .g1_irqs = 9, .g2_irqs = 14, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 822286250aff..15b793446400 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -108,11 +108,6 @@ enum mv88e6xxx_cap { */ MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */ MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */ - - /* Switch Global 2 Registers. - * The device contains a second set of global 16-bit registers. - */ - MV88E6XXX_CAP_GLOBAL2, }; /* Bitmask of capabilities */ @@ -121,51 +116,40 @@ enum mv88e6xxx_cap { #define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD) #define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) -#define MV88E6XXX_FLAG_GLOBAL2 BIT_ULL(MV88E6XXX_CAP_GLOBAL2) - /* Multi-chip Addressing Mode */ #define MV88E6XXX_FLAGS_MULTI_CHIP \ (MV88E6XXX_FLAG_SMI_CMD | \ MV88E6XXX_FLAG_SMI_DATA) #define MV88E6XXX_FLAGS_FAMILY_6095 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6097 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6165 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6185 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6320 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6341 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAG_GLOBAL2 | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6390 \ (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAG_GLOBAL2 | \ MV88E6XXX_FLAGS_MULTI_CHIP) struct mv88e6xxx_ops; @@ -179,6 +163,7 @@ struct mv88e6xxx_info { unsigned int max_vid; unsigned int port_base_addr; unsigned int global1_addr; + unsigned int global2_addr; unsigned int age_time_coeff; unsigned int g1_irqs; unsigned int g2_irqs; diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index aaf98e818d0d..16f556261022 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -22,22 +22,22 @@ static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val) { - return mv88e6xxx_read(chip, MV88E6XXX_G2, reg, val); + return mv88e6xxx_read(chip, chip->info->global2_addr, reg, val); } static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val) { - return mv88e6xxx_write(chip, MV88E6XXX_G2, reg, val); + return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val); } static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update) { - return mv88e6xxx_update(chip, MV88E6XXX_G2, reg, update); + return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update); } static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask) { - return mv88e6xxx_wait(chip, MV88E6XXX_G2, reg, mask); + return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask); } /* Offset 0x00: Interrupt Source Register */ diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index d89d7b810a45..669f59017b12 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h @@ -17,8 +17,6 @@ #include "chip.h" -#define MV88E6XXX_G2 0x1c - /* Offset 0x00: Interrupt Source Register */ #define MV88E6XXX_G2_INT_SRC 0x00 #define MV88E6XXX_G2_INT_SRC_WDOG 0x8000 @@ -273,7 +271,7 @@ extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops; static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip) { - if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_GLOBAL2)) { + if (chip->info->global2_addr) { dev_err(chip->dev, "this chip requires CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 enabled\n"); return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From 68b8f60cf70d57459a75c25ccd78cf0cbd4637f9 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:45 -0400 Subject: net: dsa: mv88e6xxx: add Energy Detect ops The 88E6352 family supports Energy Detect and has one bit for Sense and one bit for periodically transmit NLP (Energy Detect+TM). The 88E6390 family adds another bit to distinguish Auto or SW wake-up. Chips supporting EEE all have an EEE Enabled bit in the Port Status Register. This patch adds new ops for the PHY Energy Detect accesses. This also allows us to get rid of the MV88E6XXX_FLAG_EEE flag. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 87 ++++++++++++++++++++++-------------- drivers/net/dsa/mv88e6xxx/chip.h | 24 +++++----- drivers/net/dsa/mv88e6xxx/phy.c | 96 ++++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx/phy.h | 22 +++++++++ drivers/net/dsa/mv88e6xxx/port.c | 17 +++++++ drivers/net/dsa/mv88e6xxx/port.h | 3 ++ 6 files changed, 202 insertions(+), 47 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index eb4871a66076..be61983dfed4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -810,31 +810,40 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int mv88e6xxx_energy_detect_read(struct mv88e6xxx_chip *chip, int port, + struct ethtool_eee *eee) { - struct mv88e6xxx_chip *chip = ds->priv; - u16 reg; int err; - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) + if (!chip->info->ops->phy_energy_detect_read) return -EOPNOTSUPP; - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_phy_read(chip, port, 16, ®); + /* assign eee->eee_enabled and eee->tx_lpi_enabled */ + err = chip->info->ops->phy_energy_detect_read(chip, port, eee); if (err) - goto out; + return err; - e->eee_enabled = !!(reg & 0x0200); - e->tx_lpi_enabled = !!(reg & 0x0100); + /* assign eee->eee_active */ + return mv88e6xxx_port_status_eee(chip, port, eee); +} - err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, ®); - if (err) - goto out; +static int mv88e6xxx_energy_detect_write(struct mv88e6xxx_chip *chip, int port, + struct ethtool_eee *eee) +{ + if (!chip->info->ops->phy_energy_detect_write) + return -EOPNOTSUPP; - e->eee_active = !!(reg & MV88E6352_PORT_STS_EEE); -out: + return chip->info->ops->phy_energy_detect_write(chip, port, eee); +} + +static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err; + + mutex_lock(&chip->reg_lock); + err = mv88e6xxx_energy_detect_read(chip, port, e); mutex_unlock(&chip->reg_lock); return err; @@ -844,26 +853,10 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { struct mv88e6xxx_chip *chip = ds->priv; - u16 reg; int err; - if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_EEE)) - return -EOPNOTSUPP; - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_phy_read(chip, port, 16, ®); - if (err) - goto out; - - reg &= ~0x0300; - if (e->eee_enabled) - reg |= 0x0200; - if (e->tx_lpi_enabled) - reg |= 0x0100; - - err = mv88e6xxx_phy_write(chip, port, 16, reg); -out: + err = mv88e6xxx_energy_detect_write(chip, port, e); mutex_unlock(&chip->reg_lock); return err; @@ -2528,6 +2521,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2653,6 +2648,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -2722,6 +2719,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -2785,6 +2784,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2820,6 +2821,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2855,6 +2858,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2890,6 +2895,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -2926,6 +2933,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2962,6 +2971,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -2995,6 +3006,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3026,6 +3039,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -3127,6 +3142,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -3163,6 +3180,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -3201,6 +3220,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, + .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, + .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 15b793446400..3fbee01d2d84 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -98,10 +98,6 @@ enum mv88e6xxx_family { }; enum mv88e6xxx_cap { - /* Energy Efficient Ethernet. - */ - MV88E6XXX_CAP_EEE, - /* Multi-chip Addressing Mode. * Some chips respond to only 2 registers of its own SMI device address * when it is non-zero, and use indirect access to internal registers. @@ -111,8 +107,6 @@ enum mv88e6xxx_cap { }; /* Bitmask of capabilities */ -#define MV88E6XXX_FLAG_EEE BIT_ULL(MV88E6XXX_CAP_EEE) - #define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD) #define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) @@ -134,23 +128,19 @@ enum mv88e6xxx_cap { (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6341 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6351 \ (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) #define MV88E6XXX_FLAGS_FAMILY_6390 \ - (MV88E6XXX_FLAG_EEE | \ - MV88E6XXX_FLAGS_MULTI_CHIP) + (MV88E6XXX_FLAGS_MULTI_CHIP) struct mv88e6xxx_ops; @@ -289,6 +279,12 @@ struct mv88e6xxx_ops { struct mii_bus *bus, int addr, int reg, u16 val); + /* Copper Energy Detect operations */ + int (*phy_energy_detect_read)(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee); + int (*phy_energy_detect_write)(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee); + /* Priority Override Table operations */ int (*pot_clear)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 436668bd50dc..317ae89cfa68 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -246,3 +246,99 @@ int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip) { return mv88e6xxx_phy_ppu_enable(chip); } + +/* Page 0, Register 16: Copper Specific Control Register 1 */ + +int mv88e6352_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee) +{ + u16 val; + int err; + + err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); + if (err) + return err; + + val &= MV88E6352_PHY_CSCTL1_ENERGY_DETECT_MASK; + + eee->eee_enabled = false; + eee->tx_lpi_enabled = false; + + switch (val) { + case MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP: + eee->tx_lpi_enabled = true; + /* fall through... */ + case MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV: + eee->eee_enabled = true; + } + + return 0; +} + +int mv88e6352_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee) +{ + u16 val; + int err; + + err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); + if (err) + return err; + + val &= ~MV88E6352_PHY_CSCTL1_ENERGY_DETECT_MASK; + + if (eee->eee_enabled) + val |= MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV; + if (eee->tx_lpi_enabled) + val |= MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP; + + return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_CSCTL1, val); +} + +int mv88e6390_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee) +{ + u16 val; + int err; + + err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); + if (err) + return err; + + val &= MV88E6390_PHY_CSCTL1_ENERGY_DETECT_MASK; + + eee->eee_enabled = false; + eee->tx_lpi_enabled = false; + + switch (val) { + case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_AUTO: + case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_SW: + eee->tx_lpi_enabled = true; + /* fall through... */ + case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_AUTO: + case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_SW: + eee->eee_enabled = true; + } + + return 0; +} + +int mv88e6390_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee) +{ + u16 val; + int err; + + err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); + if (err) + return err; + + val &= ~MV88E6390_PHY_CSCTL1_ENERGY_DETECT_MASK; + + if (eee->eee_enabled) + val |= MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_AUTO; + if (eee->tx_lpi_enabled) + val |= MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_AUTO; + + return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_CSCTL1, val); +} diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h index 556b74a0502a..988802799ad6 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.h +++ b/drivers/net/dsa/mv88e6xxx/phy.h @@ -17,6 +17,19 @@ #define MV88E6XXX_PHY_PAGE 0x16 #define MV88E6XXX_PHY_PAGE_COPPER 0x00 +/* Page 0, Register 16: Copper Specific Control Register 1 */ +#define MV88E6XXX_PHY_CSCTL1 16 +#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_MASK 0x0300 +#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_OFF_MASK 0x0100 /* 0x */ +#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV 0x0200 +#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP 0x0300 +#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_MASK 0x0380 +#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_OFF_MASK 0x0180 /* 0xx */ +#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_AUTO 0x0200 +#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_SW 0x0280 +#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_AUTO 0x0300 +#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_SW 0x0380 + /* PHY Registers accesses implementations */ int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int addr, int reg, u16 *val); @@ -40,4 +53,13 @@ void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip); void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip); int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip); +int mv88e6352_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee); +int mv88e6352_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee); +int mv88e6390_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee); +int mv88e6390_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, + struct ethtool_eee *eee); + #endif /*_MV88E6XXX_PHY_H */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index a7801f6668a5..2837a9128557 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -35,6 +35,23 @@ int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, return mv88e6xxx_write(chip, addr, reg, val); } +/* Offset 0x00: Port Status Register */ + +int mv88e6xxx_port_status_eee(struct mv88e6xxx_chip *chip, int port, + struct ethtool_eee *eee) +{ + u16 val; + int err; + + err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val); + if (err) + return err; + + eee->eee_active = !!(val & MV88E6352_PORT_STS_EEE); + + return 0; +} + /* Offset 0x01: MAC (or PCS or Physical) Control Register * * Link, Duplex and Flow Control have one force bit, one value bit. diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index b16d5f0e6e9c..6fcab309cd85 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -241,6 +241,9 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, u16 val); +int mv88e6xxx_port_status_eee(struct mv88e6xxx_chip *chip, int port, + struct ethtool_eee *eee); + int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, -- cgit v1.2.3-55-g7522 From b3e05aa12319f01ce5db6cb80402b554de02cc3d Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 17 Jul 2017 13:03:46 -0400 Subject: net: dsa: mv88e6xxx: add a multi_chip info flag Instead of relying on a bitmap flag, add a new multi_chip info flag to describe the presence of the indirect SMI access though the two device registers 0x0 and 0x1. All remaining capabilities and flags are now unused. Remove the mv88e6xxx_cap enum and the info flags bitmaps. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 54 ++++++++++++++++++------------------- drivers/net/dsa/mv88e6xxx/chip.h | 58 +++++----------------------------------- 2 files changed, 33 insertions(+), 79 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index be61983dfed4..947ea352a57a 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3267,8 +3267,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6085_ops, }, @@ -3285,8 +3285,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6095, .ops = &mv88e6095_ops, }, @@ -3305,8 +3305,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6097, .ops = &mv88e6097_ops, }, @@ -3325,8 +3325,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6123_ops, }, @@ -3343,8 +3343,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 9, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6131_ops, }, @@ -3362,8 +3362,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .g2_irqs = 10, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6341, .ops = &mv88e6141_ops, }, @@ -3382,8 +3382,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6161_ops, }, @@ -3402,8 +3402,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6165, .ops = &mv88e6165_ops, }, @@ -3422,8 +3422,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6171_ops, }, @@ -3442,8 +3442,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6172_ops, }, @@ -3462,8 +3462,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6175_ops, }, @@ -3482,8 +3482,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6176_ops, }, @@ -3500,8 +3500,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6185, .ops = &mv88e6185_ops, }, @@ -3520,8 +3520,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g1_irqs = 9, .g2_irqs = 14, .pvt = true, + .multi_chip = true, .atu_move_port_mask = 0x1f, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190_ops, }, @@ -3540,8 +3540,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6190x_ops, }, @@ -3560,8 +3560,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6191_ops, }, @@ -3580,8 +3580,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6240_ops, }, @@ -3600,8 +3600,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6290_ops, }, @@ -3619,8 +3619,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g1_irqs = 8, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6320_ops, }, @@ -3637,8 +3637,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .age_time_coeff = 15000, .g1_irqs = 8, .atu_move_port_mask = 0xf, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6320, .ops = &mv88e6321_ops, }, @@ -3656,8 +3656,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .atu_move_port_mask = 0x1f, .g2_irqs = 10, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6341, .ops = &mv88e6341_ops, }, @@ -3676,8 +3676,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6350_ops, }, @@ -3696,8 +3696,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6351, .ops = &mv88e6351_ops, }, @@ -3716,8 +3716,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 10, .atu_move_port_mask = 0xf, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_EDSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6352, .ops = &mv88e6352_ops, }, [MV88E6390] = { @@ -3735,8 +3735,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390_ops, }, [MV88E6390X] = { @@ -3754,8 +3754,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g2_irqs = 14, .atu_move_port_mask = 0x1f, .pvt = true, + .multi_chip = true, .tag_protocol = DSA_TAG_PROTO_DSA, - .flags = MV88E6XXX_FLAGS_FAMILY_6390, .ops = &mv88e6390x_ops, }, }; @@ -3825,7 +3825,7 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip, { if (sw_addr == 0) chip->smi_ops = &mv88e6xxx_smi_single_chip_ops; - else if (mv88e6xxx_has(chip, MV88E6XXX_FLAGS_MULTI_CHIP)) + else if (chip->info->multi_chip) chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops; else return -EINVAL; diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 3fbee01d2d84..9111e1316250 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -97,51 +97,6 @@ enum mv88e6xxx_family { MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */ }; -enum mv88e6xxx_cap { - /* Multi-chip Addressing Mode. - * Some chips respond to only 2 registers of its own SMI device address - * when it is non-zero, and use indirect access to internal registers. - */ - MV88E6XXX_CAP_SMI_CMD, /* (0x00) SMI Command */ - MV88E6XXX_CAP_SMI_DATA, /* (0x01) SMI Data */ -}; - -/* Bitmask of capabilities */ -#define MV88E6XXX_FLAG_SMI_CMD BIT_ULL(MV88E6XXX_CAP_SMI_CMD) -#define MV88E6XXX_FLAG_SMI_DATA BIT_ULL(MV88E6XXX_CAP_SMI_DATA) - -/* Multi-chip Addressing Mode */ -#define MV88E6XXX_FLAGS_MULTI_CHIP \ - (MV88E6XXX_FLAG_SMI_CMD | \ - MV88E6XXX_FLAG_SMI_DATA) - -#define MV88E6XXX_FLAGS_FAMILY_6095 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6097 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6165 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6185 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6320 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6341 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6351 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6352 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - -#define MV88E6XXX_FLAGS_FAMILY_6390 \ - (MV88E6XXX_FLAGS_MULTI_CHIP) - struct mv88e6xxx_ops; struct mv88e6xxx_info { @@ -158,8 +113,13 @@ struct mv88e6xxx_info { unsigned int g1_irqs; unsigned int g2_irqs; bool pvt; + + /* Multi-chip Addressing Mode. + * Some chips respond to only 2 registers of its own SMI device address + * when it is non-zero, and use indirect access to internal registers. + */ + bool multi_chip; enum dsa_tag_protocol tag_protocol; - unsigned long long flags; /* Mask for FromPort and ToPort value of PortVec used in ATU Move * operation. 0 means that the ATU Move operation is not supported. @@ -410,12 +370,6 @@ struct mv88e6xxx_hw_stat { int type; }; -static inline bool mv88e6xxx_has(struct mv88e6xxx_chip *chip, - unsigned long flags) -{ - return (chip->info->flags & flags) == flags; -} - static inline bool mv88e6xxx_has_pvt(struct mv88e6xxx_chip *chip) { return chip->info->pvt; -- cgit v1.2.3-55-g7522 From 0ab10314747364e621ab95b528c2bd874ff3f528 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:18 +0200 Subject: drivers: net: add missing interrupt.h include these drivers use tasklets or irq apis, but don't include interrupt.h. Once flow cache is removed the implicit interrupt.h inclusion goes away which will break the build. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- drivers/net/arcnet/arcdevice.h | 2 +- drivers/net/ethernet/amd/xgbe/xgbe.h | 1 + drivers/net/ethernet/synopsys/dwc-xlgmac-net.c | 1 + drivers/net/ieee802154/ca8210.c | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index cbb4f8566bbe..d09b2b46ab63 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -20,7 +20,7 @@ #include #ifdef __KERNEL__ -#include +#include /* * RECON_THRESHOLD is the maximum number of RECON messages to receive diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 0938294f640a..e9282c924621 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -129,6 +129,7 @@ #include #include #include +#include #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.3" diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 3b91257683bc..e1b55b8fb8e0 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -17,6 +17,7 @@ #include #include +#include #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index a626c539fb17..326243fae7e2 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -66,6 +66,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3-55-g7522 From 6b1c42e9726bdb00370342909d95efdc331d10ac Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:19 +0200 Subject: vti: revert flush x-netns xfrm cache when vti interface is removed flow cache is removed in next commit. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv4/ip_vti.c | 31 ------------------------------- net/ipv6/ip6_vti.c | 31 ------------------------------- 2 files changed, 62 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 0192c255e508..5ed63d250950 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -584,33 +584,6 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = { .get_link_net = ip_tunnel_get_link_net, }; -static bool is_vti_tunnel(const struct net_device *dev) -{ - return dev->netdev_ops == &vti_netdev_ops; -} - -static int vti_device_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct ip_tunnel *tunnel = netdev_priv(dev); - - if (!is_vti_tunnel(dev)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_DOWN: - if (!net_eq(tunnel->net, dev_net(dev))) - xfrm_garbage_collect(tunnel->net); - break; - } - return NOTIFY_DONE; -} - -static struct notifier_block vti_notifier_block __read_mostly = { - .notifier_call = vti_device_event, -}; - static int __init vti_init(void) { const char *msg; @@ -618,8 +591,6 @@ static int __init vti_init(void) pr_info("IPv4 over IPsec tunneling driver\n"); - register_netdevice_notifier(&vti_notifier_block); - msg = "tunnel device"; err = register_pernet_device(&vti_net_ops); if (err < 0) @@ -652,7 +623,6 @@ xfrm_proto_ah_failed: xfrm_proto_esp_failed: unregister_pernet_device(&vti_net_ops); pernet_dev_failed: - unregister_netdevice_notifier(&vti_notifier_block); pr_err("vti init: failed to register %s\n", msg); return err; } @@ -664,7 +634,6 @@ static void __exit vti_fini(void) xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP); unregister_pernet_device(&vti_net_ops); - unregister_netdevice_notifier(&vti_notifier_block); } module_init(vti_init); diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 486c2305f53c..79444a4bfd6d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -1145,33 +1145,6 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { .priority = 100, }; -static bool is_vti6_tunnel(const struct net_device *dev) -{ - return dev->netdev_ops == &vti6_netdev_ops; -} - -static int vti6_device_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct ip6_tnl *t = netdev_priv(dev); - - if (!is_vti6_tunnel(dev)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_DOWN: - if (!net_eq(t->net, dev_net(dev))) - xfrm_garbage_collect(t->net); - break; - } - return NOTIFY_DONE; -} - -static struct notifier_block vti6_notifier_block __read_mostly = { - .notifier_call = vti6_device_event, -}; - /** * vti6_tunnel_init - register protocol and reserve needed resources * @@ -1182,8 +1155,6 @@ static int __init vti6_tunnel_init(void) const char *msg; int err; - register_netdevice_notifier(&vti6_notifier_block); - msg = "tunnel device"; err = register_pernet_device(&vti6_net_ops); if (err < 0) @@ -1216,7 +1187,6 @@ xfrm_proto_ah_failed: xfrm_proto_esp_failed: unregister_pernet_device(&vti6_net_ops); pernet_dev_failed: - unregister_netdevice_notifier(&vti6_notifier_block); pr_err("vti6 init: failed to register %s\n", msg); return err; } @@ -1231,7 +1201,6 @@ static void __exit vti6_tunnel_cleanup(void) xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH); xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP); unregister_pernet_device(&vti6_net_ops); - unregister_netdevice_notifier(&vti6_notifier_block); } module_init(vti6_tunnel_init); -- cgit v1.2.3-55-g7522 From 3c2a89ddc11896cf5498115c0380ab54b1c424b7 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:20 +0200 Subject: net: xfrm: revert to lower xfrm dst gc limit revert c386578f1cdb4dac230395 ("xfrm: Let the flowcache handle its size by default."). Once we remove flow cache, we don't have a flow cache limit anymore. We must not allow (virtually) unlimited allocations of xfrm dst entries. Revert back to the old xfrm dst gc limits. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 6 ++---- net/ipv4/xfrm4_policy.c | 2 +- net/ipv6/xfrm6_policy.c | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 974ab47ae53a..f485d553e65c 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -1291,8 +1291,7 @@ tag - INTEGER xfrm4_gc_thresh - INTEGER The threshold at which we will start garbage collecting for IPv4 destination cache entries. At twice this value the system will - refuse new allocations. The value must be set below the flowcache - limit (4096 * number of online cpus) to take effect. + refuse new allocations. igmp_link_local_mcast_reports - BOOLEAN Enable IGMP reports for link local multicast groups in the @@ -1778,8 +1777,7 @@ ratelimit - INTEGER xfrm6_gc_thresh - INTEGER The threshold at which we will start garbage collecting for IPv6 destination cache entries. At twice this value the system will - refuse new allocations. The value must be set below the flowcache - limit (4096 * number of online cpus) to take effect. + refuse new allocations. IPv6 Update by: diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 71b4ecc195c7..19455a5fc328 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -266,7 +266,7 @@ static struct dst_ops xfrm4_dst_ops_template = { .destroy = xfrm4_dst_destroy, .ifdown = xfrm4_dst_ifdown, .local_out = __ip_local_out, - .gc_thresh = INT_MAX, + .gc_thresh = 32768, }; static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = { diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 79651bc71bf0..ae30dc4973e8 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -286,7 +286,7 @@ static struct dst_ops xfrm6_dst_ops_template = { .destroy = xfrm6_dst_destroy, .ifdown = xfrm6_dst_ifdown, .local_out = __ip6_local_out, - .gc_thresh = INT_MAX, + .gc_thresh = 32768, }; static const struct xfrm_policy_afinfo xfrm6_policy_afinfo = { -- cgit v1.2.3-55-g7522 From 3ca28286ea809685d273d41674da34f45111482a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:21 +0200 Subject: xfrm_policy: bypass flow_cache_lookup Instead of consulting flow cache, call the xfrm bundle/policy lookup functions directly. This pretends the flow cache had no entry. This helps to gradually remove flow cache integration, followup commit will remove the dead code that this change adds. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/xfrm/xfrm_policy.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ff61d8557929..1c7126ab752c 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2052,13 +2052,12 @@ free_dst: } static struct flow_cache_object * -xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, - struct flow_cache_object *oldflo, void *ctx) +xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo) { - struct xfrm_flo *xflo = (struct xfrm_flo *)ctx; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; struct xfrm_dst *xdst, *new_xdst; int num_pols = 0, num_xfrms = 0, i, err, pol_dead; + struct flow_cache_object *oldflo = NULL; /* Check if the policies from old bundle are usable */ xdst = NULL; @@ -2128,8 +2127,6 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, dst_release_immediate(&xdst->u.dst); } - /* We do need to return one reference for original caller */ - dst_hold(&new_xdst->u.dst); return &new_xdst->flo; make_dummy_bundle: @@ -2242,8 +2239,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, !net->xfrm.policy_count[XFRM_POLICY_OUT]) goto nopol; - flo = flow_cache_lookup(net, fl, family, dir, - xfrm_bundle_lookup, &xflo); + flo = xfrm_bundle_lookup(net, fl, family, dir, &xflo); if (flo == NULL) goto nopol; if (IS_ERR(flo)) { @@ -2489,8 +2485,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, if (!pol) { struct flow_cache_object *flo; - flo = flow_cache_lookup(net, &fl, family, fl_dir, - xfrm_policy_lookup, NULL); + flo = xfrm_policy_lookup(net, &fl, family, dir, NULL, NULL); + if (IS_ERR_OR_NULL(flo)) pol = ERR_CAST(flo); else -- cgit v1.2.3-55-g7522 From 855dad99c07434065c0f21be6185e9cd8a6daab0 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:22 +0200 Subject: xfrm_policy: remove always true/false branches after previous change oldflo and xdst are always NULL. These branches were already removed by gcc, this doesn't change code. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/xfrm/xfrm_policy.c | 74 ++++++++++---------------------------------------- 1 file changed, 14 insertions(+), 60 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 1c7126ab752c..19d457db3a09 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2056,48 +2056,23 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, { struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; struct xfrm_dst *xdst, *new_xdst; - int num_pols = 0, num_xfrms = 0, i, err, pol_dead; - struct flow_cache_object *oldflo = NULL; + int num_pols = 0, num_xfrms = 0, err; /* Check if the policies from old bundle are usable */ xdst = NULL; - if (oldflo) { - xdst = container_of(oldflo, struct xfrm_dst, flo); - num_pols = xdst->num_pols; - num_xfrms = xdst->num_xfrms; - pol_dead = 0; - for (i = 0; i < num_pols; i++) { - pols[i] = xdst->pols[i]; - pol_dead |= pols[i]->walk.dead; - } - if (pol_dead) { - /* Mark DST_OBSOLETE_DEAD to fail the next - * xfrm_dst_check() - */ - xdst->u.dst.obsolete = DST_OBSOLETE_DEAD; - dst_release_immediate(&xdst->u.dst); - xdst = NULL; - num_pols = 0; - num_xfrms = 0; - oldflo = NULL; - } - } - /* Resolve policies to use if we couldn't get them from * previous cache entry */ - if (xdst == NULL) { - num_pols = 1; - pols[0] = __xfrm_policy_lookup(net, fl, family, - flow_to_policy_dir(dir)); - err = xfrm_expand_policies(fl, family, pols, + num_pols = 1; + pols[0] = __xfrm_policy_lookup(net, fl, family, + flow_to_policy_dir(dir)); + err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); - if (err < 0) - goto inc_error; - if (num_pols == 0) - return NULL; - if (num_xfrms <= 0) - goto make_dummy_bundle; - } + if (err < 0) + goto inc_error; + if (num_pols == 0) + return NULL; + if (num_xfrms <= 0) + goto make_dummy_bundle; new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, xflo->dst_orig); @@ -2105,26 +2080,10 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, err = PTR_ERR(new_xdst); if (err != -EAGAIN) goto error; - if (oldflo == NULL) - goto make_dummy_bundle; - dst_hold(&xdst->u.dst); - return oldflo; + goto make_dummy_bundle; } else if (new_xdst == NULL) { num_xfrms = 0; - if (oldflo == NULL) - goto make_dummy_bundle; - xdst->num_xfrms = 0; - dst_hold(&xdst->u.dst); - return oldflo; - } - - /* Kill the previous bundle */ - if (xdst) { - /* The policies were stolen for newly generated bundle */ - xdst->num_pols = 0; - /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */ - xdst->u.dst.obsolete = DST_OBSOLETE_DEAD; - dst_release_immediate(&xdst->u.dst); + goto make_dummy_bundle; } return &new_xdst->flo; @@ -2148,12 +2107,7 @@ make_dummy_bundle: inc_error: XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); error: - if (xdst != NULL) { - /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */ - xdst->u.dst.obsolete = DST_OBSOLETE_DEAD; - dst_release_immediate(&xdst->u.dst); - } else - xfrm_pols_put(pols, num_pols); + xfrm_pols_put(pols, num_pols); return ERR_PTR(err); } -- cgit v1.2.3-55-g7522 From aff669bc286eb3a459acb6e192ae7d2adc3967a3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:23 +0200 Subject: xfrm_policy: kill flow to policy dir conversion XFRM_POLICY_IN/OUT/FWD are identical to FLOW_DIR_*, so gcc already removed this function as its just returns the argument. Again, no code change. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/xfrm/xfrm_policy.c | 46 ++++------------------------------------------ 1 file changed, 4 insertions(+), 42 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 19d457db3a09..9f724a688475 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1187,24 +1187,6 @@ __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); } -static int flow_to_policy_dir(int dir) -{ - if (XFRM_POLICY_IN == FLOW_DIR_IN && - XFRM_POLICY_OUT == FLOW_DIR_OUT && - XFRM_POLICY_FWD == FLOW_DIR_FWD) - return dir; - - switch (dir) { - default: - case FLOW_DIR_IN: - return XFRM_POLICY_IN; - case FLOW_DIR_OUT: - return XFRM_POLICY_OUT; - case FLOW_DIR_FWD: - return XFRM_POLICY_FWD; - } -} - static struct flow_cache_object * xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct flow_cache_object *old_obj, void *ctx) @@ -1214,7 +1196,7 @@ xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, if (old_obj) xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); - pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir)); + pol = __xfrm_policy_lookup(net, fl, family, dir); if (IS_ERR_OR_NULL(pol)) return ERR_CAST(pol); @@ -1225,23 +1207,6 @@ xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, return &pol->flo; } -static inline int policy_to_flow_dir(int dir) -{ - if (XFRM_POLICY_IN == FLOW_DIR_IN && - XFRM_POLICY_OUT == FLOW_DIR_OUT && - XFRM_POLICY_FWD == FLOW_DIR_FWD) - return dir; - switch (dir) { - default: - case XFRM_POLICY_IN: - return FLOW_DIR_IN; - case XFRM_POLICY_OUT: - return FLOW_DIR_OUT; - case XFRM_POLICY_FWD: - return FLOW_DIR_FWD; - } -} - static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, const struct flowi *fl, u16 family) { @@ -1261,7 +1226,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, } err = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, - policy_to_flow_dir(dir)); + dir); if (!err) { if (!xfrm_pol_hold_rcu(pol)) goto again; @@ -2063,8 +2028,7 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, /* Resolve policies to use if we couldn't get them from * previous cache entry */ num_pols = 1; - pols[0] = __xfrm_policy_lookup(net, fl, family, - flow_to_policy_dir(dir)); + pols[0] = __xfrm_policy_lookup(net, fl, family, dir); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) @@ -2142,7 +2106,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, struct xfrm_dst *xdst; struct dst_entry *dst, *route; u16 family = dst_orig->ops->family; - u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); + u8 dir = XFRM_POLICY_OUT; int i, err, num_pols, num_xfrms = 0, drop_pols = 0; dst = NULL; @@ -2399,12 +2363,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, int pi; int reverse; struct flowi fl; - u8 fl_dir; int xerr_idx = -1; reverse = dir & ~XFRM_POLICY_MASK; dir &= XFRM_POLICY_MASK; - fl_dir = policy_to_flow_dir(dir); if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); -- cgit v1.2.3-55-g7522 From 86dc8ee0b2c524d47864ee1bdf2b36ea157405a8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:24 +0200 Subject: xfrm_policy: remove xfrm_policy_lookup This removes the wrapper and renames the __xfrm_policy_lookup variant to get rid of another place that used flow cache objects. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/xfrm/xfrm_policy.c | 36 ++++-------------------------------- 1 file changed, 4 insertions(+), 32 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 9f724a688475..339bb3ac2797 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1175,7 +1175,7 @@ fail: } static struct xfrm_policy * -__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) +xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) { #ifdef CONFIG_XFRM_SUB_POLICY struct xfrm_policy *pol; @@ -1187,26 +1187,6 @@ __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); } -static struct flow_cache_object * -xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, - u8 dir, struct flow_cache_object *old_obj, void *ctx) -{ - struct xfrm_policy *pol; - - if (old_obj) - xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); - - pol = __xfrm_policy_lookup(net, fl, family, dir); - if (IS_ERR_OR_NULL(pol)) - return ERR_CAST(pol); - - /* Resolver returns two references: - * one for cache and one for caller of flow_cache_lookup() */ - xfrm_pol_hold(pol); - - return &pol->flo; -} - static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, const struct flowi *fl, u16 family) { @@ -2028,7 +2008,7 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, /* Resolve policies to use if we couldn't get them from * previous cache entry */ num_pols = 1; - pols[0] = __xfrm_policy_lookup(net, fl, family, dir); + pols[0] = xfrm_policy_lookup(net, fl, family, dir); err = xfrm_expand_policies(fl, family, pols, &num_pols, &num_xfrms); if (err < 0) @@ -2398,16 +2378,8 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, } } - if (!pol) { - struct flow_cache_object *flo; - - flo = xfrm_policy_lookup(net, &fl, family, dir, NULL, NULL); - - if (IS_ERR_OR_NULL(flo)) - pol = ERR_CAST(flo); - else - pol = container_of(flo, struct xfrm_policy, flo); - } + if (!pol) + pol = xfrm_policy_lookup(net, &fl, family, dir); if (IS_ERR(pol)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); -- cgit v1.2.3-55-g7522 From bd45c539bf56650fb8fbab09c36f4b9afcbd4e1c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:25 +0200 Subject: xfrm_policy: make xfrm_bundle_lookup return xfrm dst object This allows to remove flow cache object embedded in struct xfrm_dst. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/xfrm/xfrm_policy.c | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 339bb3ac2797..145d2395f3c0 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1996,15 +1996,13 @@ free_dst: goto out; } -static struct flow_cache_object * +static struct xfrm_dst * xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, struct xfrm_flo *xflo) { struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; - struct xfrm_dst *xdst, *new_xdst; int num_pols = 0, num_xfrms = 0, err; + struct xfrm_dst *xdst; - /* Check if the policies from old bundle are usable */ - xdst = NULL; /* Resolve policies to use if we couldn't get them from * previous cache entry */ num_pols = 1; @@ -2018,19 +2016,19 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, if (num_xfrms <= 0) goto make_dummy_bundle; - new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, + xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, xflo->dst_orig); - if (IS_ERR(new_xdst)) { - err = PTR_ERR(new_xdst); + if (IS_ERR(xdst)) { + err = PTR_ERR(xdst); if (err != -EAGAIN) goto error; goto make_dummy_bundle; - } else if (new_xdst == NULL) { + } else if (xdst == NULL) { num_xfrms = 0; goto make_dummy_bundle; } - return &new_xdst->flo; + return xdst; make_dummy_bundle: /* We found policies, but there's no bundles to instantiate: @@ -2046,7 +2044,7 @@ make_dummy_bundle: memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); dst_hold(&xdst->u.dst); - return &xdst->flo; + return xdst; inc_error: XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); @@ -2082,7 +2080,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, const struct sock *sk, int flags) { struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; - struct flow_cache_object *flo; struct xfrm_dst *xdst; struct dst_entry *dst, *route; u16 family = dst_orig->ops->family; @@ -2137,14 +2134,13 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, !net->xfrm.policy_count[XFRM_POLICY_OUT]) goto nopol; - flo = xfrm_bundle_lookup(net, fl, family, dir, &xflo); - if (flo == NULL) + xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo); + if (xdst == NULL) goto nopol; - if (IS_ERR(flo)) { - err = PTR_ERR(flo); + if (IS_ERR(xdst)) { + err = PTR_ERR(xdst); goto dropdst; } - xdst = container_of(flo, struct xfrm_dst, flo); num_pols = xdst->num_pols; num_xfrms = xdst->num_xfrms; -- cgit v1.2.3-55-g7522 From 09c7570480f7544ffbf8e6db365208b0b0c154c6 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:26 +0200 Subject: xfrm: remove flow cache After rcu conversions performance degradation in forward tests isn't that noticeable anymore. See next patch for some numbers. A followup patcg could then also remove genid from the policies as we do not cache bundles anymore. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/flow.h | 34 --- include/net/flowcache.h | 25 -- include/net/netns/xfrm.h | 11 - include/net/xfrm.h | 8 - net/core/Makefile | 1 - net/core/flow.c | 516 ---------------------------------------- net/ipv4/xfrm4_policy.c | 9 - net/ipv6/xfrm6_policy.c | 9 - net/key/af_key.c | 6 - net/xfrm/xfrm_device.c | 2 - net/xfrm/xfrm_policy.c | 108 +-------- net/xfrm/xfrm_user.c | 3 - security/selinux/include/xfrm.h | 4 +- 13 files changed, 2 insertions(+), 734 deletions(-) delete mode 100644 include/net/flowcache.h delete mode 100644 net/core/flow.c diff --git a/include/net/flow.h b/include/net/flow.h index bae198b3039e..f3dc61b29bb5 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -218,40 +218,6 @@ static inline unsigned int flow_key_size(u16 family) return 0; } -#define FLOW_DIR_IN 0 -#define FLOW_DIR_OUT 1 -#define FLOW_DIR_FWD 2 - -struct net; -struct sock; -struct flow_cache_ops; - -struct flow_cache_object { - const struct flow_cache_ops *ops; -}; - -struct flow_cache_ops { - struct flow_cache_object *(*get)(struct flow_cache_object *); - int (*check)(struct flow_cache_object *); - void (*delete)(struct flow_cache_object *); -}; - -typedef struct flow_cache_object *(*flow_resolve_t)( - struct net *net, const struct flowi *key, u16 family, - u8 dir, struct flow_cache_object *oldobj, void *ctx); - -struct flow_cache_object *flow_cache_lookup(struct net *net, - const struct flowi *key, u16 family, - u8 dir, flow_resolve_t resolver, - void *ctx); -int flow_cache_init(struct net *net); -void flow_cache_fini(struct net *net); -void flow_cache_hp_init(void); - -void flow_cache_flush(struct net *net); -void flow_cache_flush_deferred(struct net *net); -extern atomic_t flow_cache_genid; - __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys); static inline __u32 get_hash_from_flowi6(const struct flowi6 *fl6) diff --git a/include/net/flowcache.h b/include/net/flowcache.h deleted file mode 100644 index 51eb971e8973..000000000000 --- a/include/net/flowcache.h +++ /dev/null @@ -1,25 +0,0 @@ -#ifndef _NET_FLOWCACHE_H -#define _NET_FLOWCACHE_H - -#include -#include -#include -#include - -struct flow_cache_percpu { - struct hlist_head *hash_table; - unsigned int hash_count; - u32 hash_rnd; - int hash_rnd_recalc; - struct tasklet_struct flush_tasklet; -}; - -struct flow_cache { - u32 hash_shift; - struct flow_cache_percpu __percpu *percpu; - struct hlist_node node; - unsigned int low_watermark; - unsigned int high_watermark; - struct timer_list rnd_timer; -}; -#endif /* _NET_FLOWCACHE_H */ diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 27bb9633c69d..611521646dd4 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -6,7 +6,6 @@ #include #include #include -#include struct ctl_table_header; @@ -73,16 +72,6 @@ struct netns_xfrm { spinlock_t xfrm_state_lock; spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; - - /* flow cache part */ - struct flow_cache flow_cache_global; - atomic_t flow_cache_genid; - struct list_head flow_cache_gc_list; - atomic_t flow_cache_gc_count; - spinlock_t flow_cache_gc_lock; - struct work_struct flow_cache_gc_work; - struct work_struct flow_cache_flush_work; - struct mutex flow_flush_sem; }; #endif diff --git a/include/net/xfrm.h b/include/net/xfrm.h index c0916ab18d32..e0feba2ce76a 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -563,7 +563,6 @@ struct xfrm_policy { refcount_t refcnt; struct timer_list timer; - struct flow_cache_object flo; atomic_t genid; u32 priority; u32 index; @@ -978,7 +977,6 @@ struct xfrm_dst { struct rt6_info rt6; } u; struct dst_entry *route; - struct flow_cache_object flo; struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; int num_pols, num_xfrms; u32 xfrm_genid; @@ -1226,9 +1224,6 @@ static inline void xfrm_sk_free_policy(struct sock *sk) } } -void xfrm_garbage_collect(struct net *net); -void xfrm_garbage_collect_deferred(struct net *net); - #else static inline void xfrm_sk_free_policy(struct sock *sk) {} @@ -1263,9 +1258,6 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, { return 1; } -static inline void xfrm_garbage_collect(struct net *net) -{ -} #endif static __inline__ diff --git a/net/core/Makefile b/net/core/Makefile index 79f9479e9658..d501c4278015 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -11,7 +11,6 @@ obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ sock_diag.o dev_ioctl.o tso.o sock_reuseport.o -obj-$(CONFIG_XFRM) += flow.o obj-y += net-sysfs.o obj-$(CONFIG_PROC_FS) += net-procfs.o obj-$(CONFIG_NET_PKTGEN) += pktgen.o diff --git a/net/core/flow.c b/net/core/flow.c deleted file mode 100644 index f7f5d1932a27..000000000000 --- a/net/core/flow.c +++ /dev/null @@ -1,516 +0,0 @@ -/* flow.c: Generic flow cache. - * - * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru) - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct flow_cache_entry { - union { - struct hlist_node hlist; - struct list_head gc_list; - } u; - struct net *net; - u16 family; - u8 dir; - u32 genid; - struct flowi key; - struct flow_cache_object *object; -}; - -struct flow_flush_info { - struct flow_cache *cache; - atomic_t cpuleft; - struct completion completion; -}; - -static struct kmem_cache *flow_cachep __read_mostly; - -#define flow_cache_hash_size(cache) (1U << (cache)->hash_shift) -#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) - -static void flow_cache_new_hashrnd(unsigned long arg) -{ - struct flow_cache *fc = (void *) arg; - int i; - - for_each_possible_cpu(i) - per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; - - fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; - add_timer(&fc->rnd_timer); -} - -static int flow_entry_valid(struct flow_cache_entry *fle, - struct netns_xfrm *xfrm) -{ - if (atomic_read(&xfrm->flow_cache_genid) != fle->genid) - return 0; - if (fle->object && !fle->object->ops->check(fle->object)) - return 0; - return 1; -} - -static void flow_entry_kill(struct flow_cache_entry *fle, - struct netns_xfrm *xfrm) -{ - if (fle->object) - fle->object->ops->delete(fle->object); - kmem_cache_free(flow_cachep, fle); -} - -static void flow_cache_gc_task(struct work_struct *work) -{ - struct list_head gc_list; - struct flow_cache_entry *fce, *n; - struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, - flow_cache_gc_work); - - INIT_LIST_HEAD(&gc_list); - spin_lock_bh(&xfrm->flow_cache_gc_lock); - list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list); - spin_unlock_bh(&xfrm->flow_cache_gc_lock); - - list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { - flow_entry_kill(fce, xfrm); - atomic_dec(&xfrm->flow_cache_gc_count); - } -} - -static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, - unsigned int deleted, - struct list_head *gc_list, - struct netns_xfrm *xfrm) -{ - if (deleted) { - atomic_add(deleted, &xfrm->flow_cache_gc_count); - fcp->hash_count -= deleted; - spin_lock_bh(&xfrm->flow_cache_gc_lock); - list_splice_tail(gc_list, &xfrm->flow_cache_gc_list); - spin_unlock_bh(&xfrm->flow_cache_gc_lock); - schedule_work(&xfrm->flow_cache_gc_work); - } -} - -static void __flow_cache_shrink(struct flow_cache *fc, - struct flow_cache_percpu *fcp, - unsigned int shrink_to) -{ - struct flow_cache_entry *fle; - struct hlist_node *tmp; - LIST_HEAD(gc_list); - unsigned int deleted = 0; - struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, - flow_cache_global); - unsigned int i; - - for (i = 0; i < flow_cache_hash_size(fc); i++) { - unsigned int saved = 0; - - hlist_for_each_entry_safe(fle, tmp, - &fcp->hash_table[i], u.hlist) { - if (saved < shrink_to && - flow_entry_valid(fle, xfrm)) { - saved++; - } else { - deleted++; - hlist_del(&fle->u.hlist); - list_add_tail(&fle->u.gc_list, &gc_list); - } - } - } - - flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); -} - -static void flow_cache_shrink(struct flow_cache *fc, - struct flow_cache_percpu *fcp) -{ - unsigned int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); - - __flow_cache_shrink(fc, fcp, shrink_to); -} - -static void flow_new_hash_rnd(struct flow_cache *fc, - struct flow_cache_percpu *fcp) -{ - get_random_bytes(&fcp->hash_rnd, sizeof(u32)); - fcp->hash_rnd_recalc = 0; - __flow_cache_shrink(fc, fcp, 0); -} - -static u32 flow_hash_code(struct flow_cache *fc, - struct flow_cache_percpu *fcp, - const struct flowi *key, - unsigned int keysize) -{ - const u32 *k = (const u32 *) key; - const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); - - return jhash2(k, length, fcp->hash_rnd) - & (flow_cache_hash_size(fc) - 1); -} - -/* I hear what you're saying, use memcmp. But memcmp cannot make - * important assumptions that we can here, such as alignment. - */ -static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, - unsigned int keysize) -{ - const flow_compare_t *k1, *k1_lim, *k2; - - k1 = (const flow_compare_t *) key1; - k1_lim = k1 + keysize; - - k2 = (const flow_compare_t *) key2; - - do { - if (*k1++ != *k2++) - return 1; - } while (k1 < k1_lim); - - return 0; -} - -struct flow_cache_object * -flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, - flow_resolve_t resolver, void *ctx) -{ - struct flow_cache *fc = &net->xfrm.flow_cache_global; - struct flow_cache_percpu *fcp; - struct flow_cache_entry *fle, *tfle; - struct flow_cache_object *flo; - unsigned int keysize; - unsigned int hash; - - local_bh_disable(); - fcp = this_cpu_ptr(fc->percpu); - - fle = NULL; - flo = NULL; - - keysize = flow_key_size(family); - if (!keysize) - goto nocache; - - /* Packet really early in init? Making flow_cache_init a - * pre-smp initcall would solve this. --RR */ - if (!fcp->hash_table) - goto nocache; - - if (fcp->hash_rnd_recalc) - flow_new_hash_rnd(fc, fcp); - - hash = flow_hash_code(fc, fcp, key, keysize); - hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) { - if (tfle->net == net && - tfle->family == family && - tfle->dir == dir && - flow_key_compare(key, &tfle->key, keysize) == 0) { - fle = tfle; - break; - } - } - - if (unlikely(!fle)) { - if (fcp->hash_count > fc->high_watermark) - flow_cache_shrink(fc, fcp); - - if (atomic_read(&net->xfrm.flow_cache_gc_count) > - 2 * num_online_cpus() * fc->high_watermark) { - flo = ERR_PTR(-ENOBUFS); - goto ret_object; - } - - fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); - if (fle) { - fle->net = net; - fle->family = family; - fle->dir = dir; - memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); - fle->object = NULL; - hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); - fcp->hash_count++; - } - } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) { - flo = fle->object; - if (!flo) - goto ret_object; - flo = flo->ops->get(flo); - if (flo) - goto ret_object; - } else if (fle->object) { - flo = fle->object; - flo->ops->delete(flo); - fle->object = NULL; - } - -nocache: - flo = NULL; - if (fle) { - flo = fle->object; - fle->object = NULL; - } - flo = resolver(net, key, family, dir, flo, ctx); - if (fle) { - fle->genid = atomic_read(&net->xfrm.flow_cache_genid); - if (!IS_ERR(flo)) - fle->object = flo; - else - fle->genid--; - } else { - if (!IS_ERR_OR_NULL(flo)) - flo->ops->delete(flo); - } -ret_object: - local_bh_enable(); - return flo; -} -EXPORT_SYMBOL(flow_cache_lookup); - -static void flow_cache_flush_tasklet(unsigned long data) -{ - struct flow_flush_info *info = (void *)data; - struct flow_cache *fc = info->cache; - struct flow_cache_percpu *fcp; - struct flow_cache_entry *fle; - struct hlist_node *tmp; - LIST_HEAD(gc_list); - unsigned int deleted = 0; - struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, - flow_cache_global); - unsigned int i; - - fcp = this_cpu_ptr(fc->percpu); - for (i = 0; i < flow_cache_hash_size(fc); i++) { - hlist_for_each_entry_safe(fle, tmp, - &fcp->hash_table[i], u.hlist) { - if (flow_entry_valid(fle, xfrm)) - continue; - - deleted++; - hlist_del(&fle->u.hlist); - list_add_tail(&fle->u.gc_list, &gc_list); - } - } - - flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); - - if (atomic_dec_and_test(&info->cpuleft)) - complete(&info->completion); -} - -/* - * Return whether a cpu needs flushing. Conservatively, we assume - * the presence of any entries means the core may require flushing, - * since the flow_cache_ops.check() function may assume it's running - * on the same core as the per-cpu cache component. - */ -static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) -{ - struct flow_cache_percpu *fcp; - unsigned int i; - - fcp = per_cpu_ptr(fc->percpu, cpu); - for (i = 0; i < flow_cache_hash_size(fc); i++) - if (!hlist_empty(&fcp->hash_table[i])) - return 0; - return 1; -} - -static void flow_cache_flush_per_cpu(void *data) -{ - struct flow_flush_info *info = data; - struct tasklet_struct *tasklet; - - tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; - tasklet->data = (unsigned long)info; - tasklet_schedule(tasklet); -} - -void flow_cache_flush(struct net *net) -{ - struct flow_flush_info info; - cpumask_var_t mask; - int i, self; - - /* Track which cpus need flushing to avoid disturbing all cores. */ - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) - return; - cpumask_clear(mask); - - /* Don't want cpus going down or up during this. */ - get_online_cpus(); - mutex_lock(&net->xfrm.flow_flush_sem); - info.cache = &net->xfrm.flow_cache_global; - for_each_online_cpu(i) - if (!flow_cache_percpu_empty(info.cache, i)) - cpumask_set_cpu(i, mask); - atomic_set(&info.cpuleft, cpumask_weight(mask)); - if (atomic_read(&info.cpuleft) == 0) - goto done; - - init_completion(&info.completion); - - local_bh_disable(); - self = cpumask_test_and_clear_cpu(smp_processor_id(), mask); - on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0); - if (self) - flow_cache_flush_tasklet((unsigned long)&info); - local_bh_enable(); - - wait_for_completion(&info.completion); - -done: - mutex_unlock(&net->xfrm.flow_flush_sem); - put_online_cpus(); - free_cpumask_var(mask); -} - -static void flow_cache_flush_task(struct work_struct *work) -{ - struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, - flow_cache_flush_work); - struct net *net = container_of(xfrm, struct net, xfrm); - - flow_cache_flush(net); -} - -void flow_cache_flush_deferred(struct net *net) -{ - schedule_work(&net->xfrm.flow_cache_flush_work); -} - -static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) -{ - struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); - unsigned int sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); - - if (!fcp->hash_table) { - fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); - if (!fcp->hash_table) { - pr_err("NET: failed to allocate flow cache sz %u\n", sz); - return -ENOMEM; - } - fcp->hash_rnd_recalc = 1; - fcp->hash_count = 0; - tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); - } - return 0; -} - -static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node) -{ - struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node); - - return flow_cache_cpu_prepare(fc, cpu); -} - -static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node) -{ - struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node); - struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); - - __flow_cache_shrink(fc, fcp, 0); - return 0; -} - -int flow_cache_init(struct net *net) -{ - int i; - struct flow_cache *fc = &net->xfrm.flow_cache_global; - - if (!flow_cachep) - flow_cachep = kmem_cache_create("flow_cache", - sizeof(struct flow_cache_entry), - 0, SLAB_PANIC, NULL); - spin_lock_init(&net->xfrm.flow_cache_gc_lock); - INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list); - INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); - INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); - mutex_init(&net->xfrm.flow_flush_sem); - atomic_set(&net->xfrm.flow_cache_gc_count, 0); - - fc->hash_shift = 10; - fc->low_watermark = 2 * flow_cache_hash_size(fc); - fc->high_watermark = 4 * flow_cache_hash_size(fc); - - fc->percpu = alloc_percpu(struct flow_cache_percpu); - if (!fc->percpu) - return -ENOMEM; - - if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node)) - goto err; - - setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, - (unsigned long) fc); - fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; - add_timer(&fc->rnd_timer); - - return 0; - -err: - for_each_possible_cpu(i) { - struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); - kfree(fcp->hash_table); - fcp->hash_table = NULL; - } - - free_percpu(fc->percpu); - fc->percpu = NULL; - - return -ENOMEM; -} -EXPORT_SYMBOL(flow_cache_init); - -void flow_cache_fini(struct net *net) -{ - int i; - struct flow_cache *fc = &net->xfrm.flow_cache_global; - - del_timer_sync(&fc->rnd_timer); - - cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node); - - for_each_possible_cpu(i) { - struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); - kfree(fcp->hash_table); - fcp->hash_table = NULL; - } - - free_percpu(fc->percpu); - fc->percpu = NULL; -} -EXPORT_SYMBOL(flow_cache_fini); - -void __init flow_cache_hp_init(void) -{ - int ret; - - ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE, - "net/flow:prepare", - flow_cache_cpu_up_prep, - flow_cache_cpu_dead); - WARN_ON(ret < 0); -} diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 19455a5fc328..4aefb149fe0a 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -213,14 +213,6 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) fl4->flowi4_tos = iph->tos; } -static inline int xfrm4_garbage_collect(struct dst_ops *ops) -{ - struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); - - xfrm_garbage_collect_deferred(net); - return (dst_entries_get_slow(ops) > ops->gc_thresh * 2); -} - static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { @@ -259,7 +251,6 @@ static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static struct dst_ops xfrm4_dst_ops_template = { .family = AF_INET, - .gc = xfrm4_garbage_collect, .update_pmtu = xfrm4_update_pmtu, .redirect = xfrm4_redirect, .cow_metrics = dst_cow_metrics_generic, diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ae30dc4973e8..f44b25a48478 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -214,14 +214,6 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) } } -static inline int xfrm6_garbage_collect(struct dst_ops *ops) -{ - struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); - - xfrm_garbage_collect_deferred(net); - return dst_entries_get_fast(ops) > ops->gc_thresh * 2; -} - static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { @@ -279,7 +271,6 @@ static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static struct dst_ops xfrm6_dst_ops_template = { .family = AF_INET6, - .gc = xfrm6_garbage_collect, .update_pmtu = xfrm6_update_pmtu, .redirect = xfrm6_redirect, .cow_metrics = dst_cow_metrics_generic, diff --git a/net/key/af_key.c b/net/key/af_key.c index ca9d3ae665e7..10d7133e4fe9 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2398,8 +2398,6 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa out: xfrm_pol_put(xp); - if (err == 0) - xfrm_garbage_collect(net); return err; } @@ -2650,8 +2648,6 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_ out: xfrm_pol_put(xp); - if (delete && err == 0) - xfrm_garbage_collect(net); return err; } @@ -2751,8 +2747,6 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad int err, err2; err = xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, true); - if (!err) - xfrm_garbage_collect(net); err2 = unicast_flush_resp(sk, hdr); if (err || err2) { if (err == -ESRCH) /* empty table - old silent behavior */ diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 5f7e8bfa0c2d..1f9a079e08b0 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -175,8 +175,6 @@ static int xfrm_dev_down(struct net_device *dev) if (dev->features & NETIF_F_HW_ESP) xfrm_dev_state_flush(dev_net(dev), dev, true); - xfrm_garbage_collect(dev_net(dev)); - return NOTIFY_DONE; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 145d2395f3c0..0f1db4c18b22 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -246,36 +246,6 @@ expired: xfrm_pol_put(xp); } -static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) -{ - struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); - - if (unlikely(pol->walk.dead)) - flo = NULL; - else - xfrm_pol_hold(pol); - - return flo; -} - -static int xfrm_policy_flo_check(struct flow_cache_object *flo) -{ - struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); - - return !pol->walk.dead; -} - -static void xfrm_policy_flo_delete(struct flow_cache_object *flo) -{ - xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); -} - -static const struct flow_cache_ops xfrm_policy_fc_ops = { - .get = xfrm_policy_flo_get, - .check = xfrm_policy_flo_check, - .delete = xfrm_policy_flo_delete, -}; - /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 * SPD calls. */ @@ -298,7 +268,6 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) (unsigned long)policy); setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process, (unsigned long)policy); - policy->flo.ops = &xfrm_policy_fc_ops; } return policy; } @@ -798,7 +767,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) else hlist_add_head(&policy->bydst, chain); __xfrm_policy_link(policy, dir); - atomic_inc(&net->xfrm.flow_cache_genid); /* After previous checking, family can either be AF_INET or AF_INET6 */ if (policy->family == AF_INET) @@ -1490,58 +1458,6 @@ static int xfrm_get_tos(const struct flowi *fl, int family) return tos; } -static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) -{ - struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); - struct dst_entry *dst = &xdst->u.dst; - - if (xdst->route == NULL) { - /* Dummy bundle - if it has xfrms we were not - * able to build bundle as template resolution failed. - * It means we need to try again resolving. */ - if (xdst->num_xfrms > 0) - return NULL; - } else if (dst->flags & DST_XFRM_QUEUE) { - return NULL; - } else { - /* Real bundle */ - if (stale_bundle(dst)) - return NULL; - } - - dst_hold(dst); - return flo; -} - -static int xfrm_bundle_flo_check(struct flow_cache_object *flo) -{ - struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); - struct dst_entry *dst = &xdst->u.dst; - - if (!xdst->route) - return 0; - if (stale_bundle(dst)) - return 0; - - return 1; -} - -static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) -{ - struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); - struct dst_entry *dst = &xdst->u.dst; - - /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */ - dst->obsolete = DST_OBSOLETE_DEAD; - dst_release_immediate(dst); -} - -static const struct flow_cache_ops xfrm_bundle_fc_ops = { - .get = xfrm_bundle_flo_get, - .check = xfrm_bundle_flo_check, - .delete = xfrm_bundle_flo_delete, -}; - static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) { const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); @@ -1569,7 +1485,6 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) struct dst_entry *dst = &xdst->u.dst; memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); - xdst->flo.ops = &xfrm_bundle_fc_ops; } else xdst = ERR_PTR(-ENOBUFS); @@ -2521,11 +2436,9 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) * notice. That's what we are validating here via the * stale_bundle() check. * - * When an xdst is removed from flow cache, DST_OBSOLETE_DEAD will - * be marked on it. * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will * be marked on it. - * Both will force stable_bundle() to fail on any xdst bundle with + * This will force stale_bundle() to fail on any xdst bundle with * this dst linked in it. */ if (dst->obsolete < 0 && !stale_bundle(dst)) @@ -2565,18 +2478,6 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) return dst; } -void xfrm_garbage_collect(struct net *net) -{ - flow_cache_flush(net); -} -EXPORT_SYMBOL(xfrm_garbage_collect); - -void xfrm_garbage_collect_deferred(struct net *net) -{ - flow_cache_flush_deferred(net); -} -EXPORT_SYMBOL(xfrm_garbage_collect_deferred); - static void xfrm_init_pmtu(struct dst_entry *dst) { do { @@ -2914,14 +2815,9 @@ static int __net_init xfrm_net_init(struct net *net) rv = xfrm_sysctl_init(net); if (rv < 0) goto out_sysctl; - rv = flow_cache_init(net); - if (rv < 0) - goto out; return 0; -out: - xfrm_sysctl_fini(net); out_sysctl: xfrm_policy_fini(net); out_policy: @@ -2934,7 +2830,6 @@ out_statistics: static void __net_exit xfrm_net_exit(struct net *net) { - flow_cache_fini(net); xfrm_sysctl_fini(net); xfrm_policy_fini(net); xfrm_state_fini(net); @@ -2948,7 +2843,6 @@ static struct pernet_operations __net_initdata xfrm_net_ops = { void __init xfrm_init(void) { - flow_cache_hp_init(); register_pernet_subsys(&xfrm_net_ops); seqcount_init(&xfrm_policy_hash_generation); xfrm_input_init(); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 2be4c6af008a..1b539b7dcfab 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1815,8 +1815,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, out: xfrm_pol_put(xp); - if (delete && err == 0) - xfrm_garbage_collect(net); return err; } @@ -2027,7 +2025,6 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, return 0; return err; } - xfrm_garbage_collect(net); c.data.type = type; c.event = nlh->nlmsg_type; diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 1450f85b946d..36a7ce9e11ff 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h @@ -47,10 +47,8 @@ static inline void selinux_xfrm_notify_policyload(void) struct net *net; rtnl_lock(); - for_each_net(net) { - atomic_inc(&net->xfrm.flow_cache_genid); + for_each_net(net) rt_genid_bump_all(net); - } rtnl_unlock(); } #else -- cgit v1.2.3-55-g7522 From ec30d78c14a813db39a647b6a348b4286ba4abf5 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 17 Jul 2017 13:57:27 +0200 Subject: xfrm: add xdst pcpu cache retain last used xfrm_dst in a pcpu cache. On next request, reuse this dst if the policies are the same. The cache will not help with strict RR workloads as there is no hit. The cache packet-path part is reasonably small, the notifier part is needed so we do not add long hangs when a device is dismantled but some pcpu xdst still holds a reference, there are also calls to the flush operation when userspace deletes SAs so modules can be removed (there is no hit. We need to run the dst_release on the correct cpu to avoid races with packet path. This is done by adding a work_struct for each cpu and then doing the actual test/release on each affected cpu via schedule_work_on(). Test results using 4 network namespaces and null encryption: ns1 ns2 -> ns3 -> ns4 netperf -> xfrm/null enc -> xfrm/null dec -> netserver what TCP_STREAM UDP_STREAM UDP_RR Flow cache: 14644.61 294.35 327231.64 No flow cache: 14349.81 242.64 202301.72 Pcpu cache: 14629.70 292.21 205595.22 UDP tests used 64byte packets, tests ran for one minute each, value is average over ten iterations. 'Flow cache' is 'net-next', 'No flow cache' is net-next plus this series but without this patch. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/xfrm.h | 1 + net/xfrm/xfrm_device.c | 2 + net/xfrm/xfrm_policy.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++- net/xfrm/xfrm_state.c | 5 +- 4 files changed, 132 insertions(+), 3 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index e0feba2ce76a..afb4929d7232 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -317,6 +317,7 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c); +void xfrm_policy_cache_flush(void); void km_state_notify(struct xfrm_state *x, const struct km_event *c); struct xfrm_tmpl; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 1f9a079e08b0..5cd7a244e88d 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -153,6 +153,7 @@ static int xfrm_dev_register(struct net_device *dev) static int xfrm_dev_unregister(struct net_device *dev) { + xfrm_policy_cache_flush(); return NOTIFY_DONE; } @@ -175,6 +176,7 @@ static int xfrm_dev_down(struct net_device *dev) if (dev->features & NETIF_F_HW_ESP) xfrm_dev_state_flush(dev_net(dev), dev, true); + xfrm_policy_cache_flush(); return NOTIFY_DONE; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 0f1db4c18b22..06c3bf7ab86b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -44,6 +45,8 @@ struct xfrm_flo { u8 flags; }; +static DEFINE_PER_CPU(struct xfrm_dst *, xfrm_last_dst); +static struct work_struct *xfrm_pcpu_work __read_mostly; static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] __read_mostly; @@ -972,6 +975,8 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) } if (!cnt) err = -ESRCH; + else + xfrm_policy_cache_flush(); out: spin_unlock_bh(&net->xfrm.xfrm_policy_lock); return err; @@ -1700,6 +1705,102 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, } +static void xfrm_last_dst_update(struct xfrm_dst *xdst, struct xfrm_dst *old) +{ + this_cpu_write(xfrm_last_dst, xdst); + if (old) + dst_release(&old->u.dst); +} + +static void __xfrm_pcpu_work_fn(void) +{ + struct xfrm_dst *old; + + old = this_cpu_read(xfrm_last_dst); + if (old && !xfrm_bundle_ok(old)) + xfrm_last_dst_update(NULL, old); +} + +static void xfrm_pcpu_work_fn(struct work_struct *work) +{ + local_bh_disable(); + rcu_read_lock(); + __xfrm_pcpu_work_fn(); + rcu_read_unlock(); + local_bh_enable(); +} + +void xfrm_policy_cache_flush(void) +{ + struct xfrm_dst *old; + bool found = 0; + int cpu; + + local_bh_disable(); + rcu_read_lock(); + for_each_possible_cpu(cpu) { + old = per_cpu(xfrm_last_dst, cpu); + if (old && !xfrm_bundle_ok(old)) { + if (smp_processor_id() == cpu) { + __xfrm_pcpu_work_fn(); + continue; + } + found = true; + break; + } + } + + rcu_read_unlock(); + local_bh_enable(); + + if (!found) + return; + + get_online_cpus(); + + for_each_possible_cpu(cpu) { + bool bundle_release; + + rcu_read_lock(); + old = per_cpu(xfrm_last_dst, cpu); + bundle_release = old && !xfrm_bundle_ok(old); + rcu_read_unlock(); + + if (!bundle_release) + continue; + + if (cpu_online(cpu)) { + schedule_work_on(cpu, &xfrm_pcpu_work[cpu]); + continue; + } + + rcu_read_lock(); + old = per_cpu(xfrm_last_dst, cpu); + if (old && !xfrm_bundle_ok(old)) { + per_cpu(xfrm_last_dst, cpu) = NULL; + dst_release(&old->u.dst); + } + rcu_read_unlock(); + } + + put_online_cpus(); +} + +static bool xfrm_pol_dead(struct xfrm_dst *xdst) +{ + unsigned int num_pols = xdst->num_pols; + unsigned int pol_dead = 0, i; + + for (i = 0; i < num_pols; i++) + pol_dead |= xdst->pols[i]->walk.dead; + + /* Mark DST_OBSOLETE_DEAD to fail the next xfrm_dst_check() */ + if (pol_dead) + xdst->u.dst.obsolete = DST_OBSOLETE_DEAD; + + return pol_dead; +} + static struct xfrm_dst * xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, const struct flowi *fl, u16 family, @@ -1707,10 +1808,22 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, { struct net *net = xp_net(pols[0]); struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; + struct xfrm_dst *xdst, *old; struct dst_entry *dst; - struct xfrm_dst *xdst; int err; + xdst = this_cpu_read(xfrm_last_dst); + if (xdst && + xdst->u.dst.dev == dst_orig->dev && + xdst->num_pols == num_pols && + !xfrm_pol_dead(xdst) && + memcmp(xdst->pols, pols, + sizeof(struct xfrm_policy *) * num_pols) == 0) { + dst_hold(&xdst->u.dst); + return xdst; + } + + old = xdst; /* Try to instantiate a bundle */ err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); if (err <= 0) { @@ -1731,6 +1844,9 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); xdst->policy_genid = atomic_read(&pols[0]->genid); + atomic_set(&xdst->u.dst.__refcnt, 2); + xfrm_last_dst_update(xdst, old); + return xdst; } @@ -2843,6 +2959,15 @@ static struct pernet_operations __net_initdata xfrm_net_ops = { void __init xfrm_init(void) { + int i; + + xfrm_pcpu_work = kmalloc_array(NR_CPUS, sizeof(*xfrm_pcpu_work), + GFP_KERNEL); + BUG_ON(!xfrm_pcpu_work); + + for (i = 0; i < NR_CPUS; i++) + INIT_WORK(&xfrm_pcpu_work[i], xfrm_pcpu_work_fn); + register_pernet_subsys(&xfrm_net_ops); seqcount_init(&xfrm_policy_hash_generation); xfrm_input_init(); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 6c0956d10db6..82cbbce69b79 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -724,9 +724,10 @@ restart: } } } - if (cnt) + if (cnt) { err = 0; - + xfrm_policy_cache_flush(); + } out: spin_unlock_bh(&net->xfrm.xfrm_state_lock); return err; -- cgit v1.2.3-55-g7522 From e29237e7bb4ad79f5011cd0af9c8baeca16dce5c Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:09 +0200 Subject: mlxsw: spectrum_router: Enable IPv6 router Before we add IPv6 constructs like traps and router interfaces, we first need to enable IPv6 routing in the device. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 4 +++- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0ca196899e18..ce6c63d0cf2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3953,10 +3953,12 @@ MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2); */ MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8); -static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en) +static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en, + bool ipv6_en) { MLXSW_REG_ZERO(rgcr, payload); mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en); + mlxsw_reg_rgcr_ipv6_en_set(payload, ipv6_en); } /* RITR - Router Interface Table Register diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 383fef5a8e24..060134d7db1c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3697,7 +3697,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) return -EIO; max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); - mlxsw_reg_rgcr_pack(rgcr_pl, true); + mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); if (err) @@ -3709,7 +3709,7 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) { char rgcr_pl[MLXSW_REG_RGCR_LEN]; - mlxsw_reg_rgcr_pack(rgcr_pl, false); + mlxsw_reg_rgcr_pack(rgcr_pl, false, false); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl); } -- cgit v1.2.3-55-g7522 From e717e011ff5238a991e69aa0c62468b625fc29c7 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:10 +0200 Subject: mlxsw: reg: Enable IPv6 on router interfaces Enable IPv6 and IPv6 forwarding on router interfaces (RIFs), so that they will be able to receive and forward IPv6 traffic. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index ce6c63d0cf2a..8282acb8f18f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4206,10 +4206,12 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable, MLXSW_REG_ZERO(ritr, payload); mlxsw_reg_ritr_enable_set(payload, enable); mlxsw_reg_ritr_ipv4_set(payload, 1); + mlxsw_reg_ritr_ipv6_set(payload, 1); mlxsw_reg_ritr_type_set(payload, type); mlxsw_reg_ritr_op_set(payload, op); mlxsw_reg_ritr_rif_set(payload, rif); mlxsw_reg_ritr_ipv4_fe_set(payload, 1); + mlxsw_reg_ritr_ipv6_fe_set(payload, 1); mlxsw_reg_ritr_lb_en_set(payload, 1); mlxsw_reg_ritr_virtual_router_set(payload, vr_id); mlxsw_reg_ritr_mtu_set(payload, mtu); -- cgit v1.2.3-55-g7522 From 8d54814e5233f92bdc2e5185c5dbf4978f7c2742 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:11 +0200 Subject: mlxsw: spectrum: Add support for IPv6 traps Before we can start using IPv6, we need to trap certain control packets to the CPU. Among others, these include Neighbour Discovery, DHCP and neighbour misses. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 5 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 39 +++++++++++++++++++++----- drivers/net/ethernet/mellanox/mlxsw/trap.h | 20 +++++++++++-- 3 files changed, 53 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8282acb8f18f..fe196fd84491 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3679,16 +3679,17 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP, MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP, MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP, - MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4, + MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP, MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF, MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP, - MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS, + MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS, MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP, MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE, MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME, MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP, MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, + MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND, }; /* reg_htgt_trap_group diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1aa6298ea6cd..d6dc3dc4f319 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3345,12 +3345,35 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(OSPF, TRAP_TO_CPU, OSPF, false), MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), + MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false), + MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false), MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false), - MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false), - MLXSW_SP_RXL_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false), + MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false), + MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false), + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND, + false), + MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false), + MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, + false), + MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false), + MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false), MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), /* PKT Sample trap */ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, false, SP_IP2ME, DISCARD), @@ -3389,12 +3412,13 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) rate = 16 * 1024; burst_size = 10; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: rate = 1024; burst_size = 7; break; @@ -3443,7 +3467,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) priority = 5; tc = 5; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP_IPV4: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP: priority = 4; tc = 4; @@ -3455,10 +3479,11 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) tc = 3; break; case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND: priority = 2; tc = 2; break; - case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP_MISS: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS: case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: priority = 1; diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h index 891b4ee6eeb2..61652396bf75 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/trap.h +++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h @@ -61,16 +61,32 @@ enum { MLXSW_TRAP_ID_MTUERROR = 0x52, MLXSW_TRAP_ID_TTLERROR = 0x53, MLXSW_TRAP_ID_LBERROR = 0x54, - MLXSW_TRAP_ID_OSPF = 0x55, + MLXSW_TRAP_ID_IPV4_OSPF = 0x55, MLXSW_TRAP_ID_IP2ME = 0x5F, + MLXSW_TRAP_ID_IPV6_UNSPECIFIED_ADDRESS = 0x60, + MLXSW_TRAP_ID_IPV6_LINK_LOCAL_DEST = 0x61, + MLXSW_TRAP_ID_IPV6_LINK_LOCAL_SRC = 0x62, + MLXSW_TRAP_ID_IPV6_ALL_NODES_LINK = 0x63, + MLXSW_TRAP_ID_IPV6_OSPF = 0x64, MLXSW_TRAP_ID_IPV6_MLDV12_LISTENER_QUERY = 0x65, MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_REPORT = 0x66, MLXSW_TRAP_ID_IPV6_MLDV1_LISTENER_DONE = 0x67, MLXSW_TRAP_ID_IPV6_MLDV2_LISTENER_REPORT = 0x68, + MLXSW_TRAP_ID_IPV6_DHCP = 0x69, + MLXSW_TRAP_ID_IPV6_ALL_ROUTERS_LINK = 0x6F, MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70, - MLXSW_TRAP_ID_BGP_IPV4 = 0x88, + MLXSW_TRAP_ID_IPV4_BGP = 0x88, + MLXSW_TRAP_ID_IPV6_BGP = 0x89, + MLXSW_TRAP_ID_L3_IPV6_ROUTER_SOLICITATION = 0x8A, + MLXSW_TRAP_ID_L3_IPV6_ROUTER_ADVERTISMENT = 0x8B, + MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_SOLICITATION = 0x8C, + MLXSW_TRAP_ID_L3_IPV6_NEIGHBOR_ADVERTISMENT = 0x8D, + MLXSW_TRAP_ID_L3_IPV6_REDIRECTION = 0x8E, MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90, + MLXSW_TRAP_ID_IPV6_MC_LINK_LOCAL_DEST = 0x91, + MLXSW_TRAP_ID_HOST_MISS_IPV6 = 0x92, MLXSW_TRAP_ID_ROUTER_ALERT_IPV4 = 0xD6, + MLXSW_TRAP_ID_ROUTER_ALERT_IPV6 = 0xD7, MLXSW_TRAP_ID_ACL0 = 0x1C0, MLXSW_TRAP_ID_MAX = 0x1FF -- cgit v1.2.3-55-g7522 From 0d284818aff7752c71f1dc5dd6ad8ee64a4adac6 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:12 +0200 Subject: mlxsw: spectrum_router: Flood unregistered multicast packets to router Up until now we only flooded broadcast packets to the router when an L3 interface was configured on top of a bridge. However, IPv6 Neighbour Discovery packets are trapped to the CPU inside the router and these can be sent with a multicast address. Flood unregistered multicast packets to the router port, so that relevant packets could be trapped there. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 060134d7db1c..690bb9fed57a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3558,6 +3558,11 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif) if (err) return err; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), true); + if (err) + goto err_fid_mc_flood_set; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), true); if (err) @@ -3566,6 +3571,9 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif) return 0; err_fid_bc_flood_set: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); +err_fid_mc_flood_set: mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); return err; } @@ -3577,6 +3585,8 @@ static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false); } @@ -3607,6 +3617,11 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) if (err) return err; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), true); + if (err) + goto err_fid_mc_flood_set; + err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), true); if (err) @@ -3615,6 +3630,9 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif) return 0; err_fid_bc_flood_set: + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); +err_fid_mc_flood_set: mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); return err; } @@ -3626,6 +3644,8 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif) mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC, mlxsw_sp_router_port(mlxsw_sp), false); + mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC, + mlxsw_sp_router_port(mlxsw_sp), false); mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false); } -- cgit v1.2.3-55-g7522 From 5ea1237f94906937ded990316c79d3b6d942da2a Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:13 +0200 Subject: mlxsw: spectrum_router: Configure RIFs based on IPv6 addresses When a netdev is configured with an IP address a router interface (RIF) should be configured for it in the device. Allow configuration of RIFs based on IPv6 address notifications as well as IPv4. Note that the RIF exists as long as an IP address is configured on the netdev, regardless of the address family. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 8 +++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 + .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 79 ++++++++++++++++++++-- 3 files changed, 84 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index d6dc3dc4f319..88b668ba0d8a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -58,6 +58,7 @@ #include #include #include +#include #include "spectrum.h" #include "pci.h" @@ -4393,6 +4394,10 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { .priority = 10, /* Must be called before FIB notifier block */ }; +static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { + .notifier_call = mlxsw_sp_inet6addr_event, +}; + static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = { .notifier_call = mlxsw_sp_router_netevent_event, }; @@ -4413,6 +4418,7 @@ static int __init mlxsw_sp_module_init(void) register_netdevice_notifier(&mlxsw_sp_netdevice_nb); register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); + register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); register_netevent_notifier(&mlxsw_sp_router_netevent_nb); err = mlxsw_core_driver_register(&mlxsw_sp_driver); @@ -4429,6 +4435,7 @@ err_pci_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp_driver); err_core_driver_register: unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); return err; @@ -4439,6 +4446,7 @@ static void __exit mlxsw_sp_module_exit(void) mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sp_driver); unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb); + unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 5ef98d4d0ab6..e848f06e34e6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -384,6 +384,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); int mlxsw_sp_inetaddr_event(struct notifier_block *unused, unsigned long event, void *ptr); +int mlxsw_sp_inet6addr_event(struct notifier_block *unused, + unsigned long event, void *ptr); int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, struct netdev_notifier_changeupper_info *info); void diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 690bb9fed57a..7224066ac589 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -49,6 +49,7 @@ #include #include #include +#include #include "spectrum.h" #include "core.h" @@ -2941,17 +2942,30 @@ static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif); } -static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, - const struct in_device *in_dev, - unsigned long event) +static bool +mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, + unsigned long event) { + struct inet6_dev *inet6_dev; + bool addr_list_empty = true; + struct in_device *idev; + switch (event) { case NETDEV_UP: if (!rif) return true; return false; case NETDEV_DOWN: - if (rif && !in_dev->ifa_list && + idev = __in_dev_get_rtnl(dev); + if (idev && idev->ifa_list) + addr_list_empty = false; + + inet6_dev = __in6_dev_get(dev); + if (addr_list_empty && inet6_dev && + !list_empty(&inet6_dev->addr_list)) + addr_list_empty = false; + + if (rif && addr_list_empty && !netif_is_l3_slave(rif->dev)) return true; /* It is possible we already removed the RIF ourselves @@ -3349,7 +3363,7 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused, goto out; rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event)) + if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; err = __mlxsw_sp_inetaddr_event(dev, event); @@ -3357,6 +3371,61 @@ out: return notifier_from_errno(err); } +struct mlxsw_sp_inet6addr_event_work { + struct work_struct work; + struct net_device *dev; + unsigned long event; +}; + +static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) +{ + struct mlxsw_sp_inet6addr_event_work *inet6addr_work = + container_of(work, struct mlxsw_sp_inet6addr_event_work, work); + struct net_device *dev = inet6addr_work->dev; + unsigned long event = inet6addr_work->event; + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + + rtnl_lock(); + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + goto out; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!mlxsw_sp_rif_should_config(rif, dev, event)) + goto out; + + __mlxsw_sp_inetaddr_event(dev, event); +out: + rtnl_unlock(); + dev_put(dev); + kfree(inet6addr_work); +} + +/* Called with rcu_read_lock() */ +int mlxsw_sp_inet6addr_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr; + struct mlxsw_sp_inet6addr_event_work *inet6addr_work; + struct net_device *dev = if6->idev->dev; + + if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) + return NOTIFY_DONE; + + inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC); + if (!inet6addr_work) + return NOTIFY_BAD; + + INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work); + inet6addr_work->dev = dev; + inet6addr_work->event = event; + dev_hold(dev); + mlxsw_core_schedule_work(&inet6addr_work->work); + + return NOTIFY_DONE; +} + static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, const char *mac, int mtu) { -- cgit v1.2.3-55-g7522 From 6929e50736d909f6a61fb638cc4f58ec58feafd2 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:14 +0200 Subject: mlxsw: reg: Update RAUHT register with IPv6 support Update the register, so the IPv6 neighbours could be programmed to the device's neighbour table. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index fe196fd84491..abfa63181ec1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4960,6 +4960,7 @@ MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16); * Access: Index */ MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32); +MLXSW_ITEM_BUF(reg, rauht, dip6, 0x10, 16); enum mlxsw_reg_rauht_trap_action { MLXSW_REG_RAUHT_TRAP_ACTION_NOP, @@ -5024,6 +5025,15 @@ static inline void mlxsw_reg_rauht_pack4(char *payload, mlxsw_reg_rauht_dip4_set(payload, dip); } +static inline void mlxsw_reg_rauht_pack6(char *payload, + enum mlxsw_reg_rauht_op op, u16 rif, + const char *mac, const char *dip) +{ + mlxsw_reg_rauht_pack(payload, op, rif, mac); + mlxsw_reg_rauht_type_set(payload, MLXSW_REG_RAUHT_TYPE_IPV6); + mlxsw_reg_rauht_dip6_memcpy_to(payload, dip); +} + /* RALEU - Router Algorithmic LPM ECMP Update Register * --------------------------------------------------- * The register enables updating the ECMP section in the action for multiple -- cgit v1.2.3-55-g7522 From d5eb89cf68d674d165e36c49b486b5a487172d1c Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:15 +0200 Subject: mlxsw: spectrum_router: Reflect IPv6 neighbours to the device As with IPv4, listen to NEIGH_UPDATE events from the ndisc table and program relevant neighbours to the device's neighbour table. Note that neighbours with a link-local IP address aren't programmed, as packets with a link-local destination IP are trapped after LPM lookup and never reach the neighbour table. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 40 ++++++++++++++++++++-- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 7224066ac589..a0de7365d57e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -50,6 +50,8 @@ #include #include #include +#include +#include #include "spectrum.h" #include "core.h" @@ -1147,6 +1149,32 @@ mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); } +static void +mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_neigh_entry *neigh_entry, + enum mlxsw_reg_rauht_op op) +{ + struct neighbour *n = neigh_entry->key.n; + char rauht_pl[MLXSW_REG_RAUHT_LEN]; + const char *dip = n->primary_key; + + mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha, + dip); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); +} + +static bool mlxsw_sp_neigh_ipv6_ignore(struct neighbour *n) +{ + /* Packets with a link-local destination address are trapped + * after LPM lookup and never reach the neighbour table, so + * there is no need to program such neighbours to the device. + */ + if (ipv6_addr_type((struct in6_addr *) &n->primary_key) & + IPV6_ADDR_LINKLOCAL) + return true; + return false; +} + static void mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_neigh_entry *neigh_entry, @@ -1155,11 +1183,17 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, if (!adding && !neigh_entry->connected) return; neigh_entry->connected = adding; - if (neigh_entry->key.n->tbl == &arp_tbl) + if (neigh_entry->key.n->tbl == &arp_tbl) { mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, mlxsw_sp_rauht_op(adding)); - else + } else if (neigh_entry->key.n->tbl == &nd_tbl) { + if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry->key.n)) + return; + mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, + mlxsw_sp_rauht_op(adding)); + } else { WARN_ON_ONCE(1); + } } struct mlxsw_sp_neigh_event_work { @@ -1247,7 +1281,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, case NETEVENT_NEIGH_UPDATE: n = ptr; - if (n->tbl != &arp_tbl) + if (n->tbl != &arp_tbl && n->tbl != &nd_tbl) return NOTIFY_DONE; mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); -- cgit v1.2.3-55-g7522 From 72e8ebe1b3cc57b379eaeeab76b9652bd27e7bfb Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:16 +0200 Subject: mlxsw: reg: Update RAUHTD register with IPv6 support Update the register so that the active IPv6 neighbours could be dumped from the device's neighbour table. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 32 +++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index abfa63181ec1..0fc2263dee3a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -5232,6 +5232,30 @@ MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0, 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false); +#define MLXSW_REG_RAUHTD_IPV6_ENT_LEN 0x20 + +/* reg_rauhtd_ipv6_ent_a + * Activity. Set for new entries. Set if a packet lookup has hit on the + * specific entry. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1, + MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv6_ent_rif + * Router interface. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv6_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0, + 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x00, false); + +/* reg_rauhtd_ipv6_ent_dip + * Destination IPv6 address. + * Access: RO + */ +MLXSW_ITEM_BUF_INDEXED(reg, rauhtd, ipv6_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, + 16, MLXSW_REG_RAUHTD_IPV6_ENT_LEN, 0x10); + static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, int ent_index, u16 *p_rif, u32 *p_dip) @@ -5240,6 +5264,14 @@ static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload, *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index); } +static inline void mlxsw_reg_rauhtd_ent_ipv6_unpack(char *payload, + int rec_index, u16 *p_rif, + char *p_dip) +{ + *p_rif = mlxsw_reg_rauhtd_ipv6_ent_rif_get(payload, rec_index); + mlxsw_reg_rauhtd_ipv6_ent_dip_memcpy_from(payload, rec_index, p_dip); +} + /* MFCR - Management Fan Control Register * -------------------------------------- * This register controls the settings of the Fan Speed PWM mechanism. -- cgit v1.2.3-55-g7522 From 60f040ca11b968371fc1cd910d4648e58e6b80d3 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:17 +0200 Subject: mlxsw: spectrum_router: Periodically dump active IPv6 neighbours In addition to IPv4, periodically dump IPv6 neighbours and update the kernel about them. Signed-off-by: Arkadi Sharshevsky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 79 +++++++++++++++++++--- 1 file changed, 69 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index a0de7365d57e..312fb67277d7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -968,6 +968,36 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } +static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + struct net_device *dev; + struct neighbour *n; + struct in6_addr dip; + u16 rif; + + mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif, + (char *) &dip); + + if (!mlxsw_sp->router->rifs[rif]) { + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n"); + return; + } + + dev = mlxsw_sp->router->rifs[rif]->dev; + n = neigh_lookup(&nd_tbl, &dip, dev); + if (!n) { + netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n", + &dip); + return; + } + + netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip); + neigh_event_send(n, NULL); + neigh_release(n); +} + static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) @@ -991,6 +1021,15 @@ static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, } +static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ + /* One record contains one entry. */ + mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl, + rec_index); +} + static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) { @@ -1000,7 +1039,8 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, rec_index); break; case MLXSW_REG_RAUHTD_TYPE_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl, + rec_index); break; } } @@ -1025,22 +1065,20 @@ static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) return false; } -static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +static int +__mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + enum mlxsw_reg_rauhtd_type type) { - char *rauhtd_pl; - u8 num_rec; - int i, err; - - rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); - if (!rauhtd_pl) - return -ENOMEM; + int i, num_rec; + int err; /* Make sure the neighbour's netdev isn't removed in the * process. */ rtnl_lock(); do { - mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4); + mlxsw_reg_rauhtd_pack(rauhtd_pl, type); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd), rauhtd_pl); if (err) { @@ -1054,6 +1092,27 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); rtnl_unlock(); + return err; +} + +static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_rauhtd_type type; + char *rauhtd_pl; + int err; + + rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL); + if (!rauhtd_pl) + return -ENOMEM; + + type = MLXSW_REG_RAUHTD_TYPE_IPV4; + err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); + if (err) + goto out; + + type = MLXSW_REG_RAUHTD_TYPE_IPV6; + err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type); +out: kfree(rauhtd_pl); return err; } -- cgit v1.2.3-55-g7522 From a6c9b5d1990577eed514110f4ba4d3bc032c57fd Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Tue, 18 Jul 2017 10:10:18 +0200 Subject: mlxsw: spectrum_router: Set activity interval according to both neighbour tables The neighbours' activity is currently dumped according to the ARP table's DELAY_PROBE time, but with the introduction of IPv6 offload we should set the interval according to the minimum between the ARP and ndisc tables. Signed-off-by: Arkadi Sharshvesky Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 312fb67277d7..cb797fb43b3d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -932,8 +932,11 @@ mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) static void mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) { - unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); + unsigned long interval; + interval = min_t(unsigned long, + NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME), + NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME)); mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval); } @@ -1321,7 +1324,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, p = ptr; /* We don't care about changes in the default table. */ - if (!p->dev || p->tbl != &arp_tbl) + if (!p->dev || (p->tbl != &arp_tbl && p->tbl != &nd_tbl)) return NOTIFY_DONE; /* We are in atomic context and can't take RTNL mutex, -- cgit v1.2.3-55-g7522 From 58adf2c48003d1a7469fe0d0b438300ab6173031 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:19 +0200 Subject: mlxsw: spectrum_router: Don't assume neighbour type Thankfully, the neighbour subsystem is agnostic to the upper protocol and used by both IPv4 and IPv6. By removing assumptions regarding the neighbour type we can thus re-use much of the neighbour-related code for both IPv4 and IPv6. For each nexthop, store its gateway IP and for nexthop group store the neighbour table used by its nexthops. Use this information throughout the code and remove assumption about the neighbour type. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index cb797fb43b3d..36f2b6843f17 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1437,6 +1437,7 @@ struct mlxsw_sp_nexthop { */ struct rhash_head ht_node; struct mlxsw_sp_nexthop_key key; + unsigned char gw_addr[sizeof(struct in6_addr)]; struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. @@ -1457,6 +1458,7 @@ struct mlxsw_sp_nexthop_group_key { struct mlxsw_sp_nexthop_group { struct rhash_head ht_node; struct list_head fib_list; /* list of fib entries that use this group */ + struct neigh_table *neigh_tbl; struct mlxsw_sp_nexthop_group_key key; u8 adj_index_valid:1, gateway:1; /* routes using the group use a gateway */ @@ -1774,7 +1776,6 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh) { struct mlxsw_sp_neigh_entry *neigh_entry; - struct fib_nh *fib_nh = nh->key.fib_nh; struct neighbour *n; u8 nud_state, dead; int err; @@ -1787,9 +1788,10 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, * The reference is taken either in neigh_lookup() or * in neigh_create() in case n is not found. */ - n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); + n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev); if (!n) { - n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev); + n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr, + nh->rif->dev); if (IS_ERR(n)) return PTR_ERR(n); neigh_event_send(n, NULL); @@ -1863,6 +1865,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, nh->nh_grp = nh_grp; nh->key.fib_nh = fib_nh; + memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw)); err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh); if (err) return err; @@ -1961,6 +1964,8 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) if (!nh_grp) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&nh_grp->fib_list); + nh_grp->neigh_tbl = &arp_tbl; + nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; nh_grp->count = fi->fib_nhs; nh_grp->key.fi = fi; -- cgit v1.2.3-55-g7522 From 160e22aa2629875c23092e38eded442002d1ebda Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:20 +0200 Subject: mlxsw: spectrum_router: Don't create FIB node during lookup When looking up a FIB entry we shouldn't create the FIB node where it's supposed to be linked in case the node doesn't already exist. Instead, lookup the node and fail if it doesn't exist. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 36f2b6843f17..da8590ba2558 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2314,8 +2314,8 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_fib_node * -mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info); +mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, + size_t addr_len, unsigned char prefix_len); static struct mlxsw_sp_fib_entry * mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, @@ -2323,9 +2323,18 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_fib_entry *fib_entry; struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; + struct mlxsw_sp_vr *vr; - fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); - if (IS_ERR(fib_node)) + vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id); + if (!vr) + return NULL; + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); + + fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, + sizeof(fen_info->dst), + fen_info->dst_len); + if (!fib_node) return NULL; list_for_each_entry(fib_entry, &fib_node->entry_list, list) { -- cgit v1.2.3-55-g7522 From 731ea1ca429fe7c959522129c526bdda442d7c54 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:21 +0200 Subject: mlxsw: spectrum_router: Make FIB node retrieval family agnostic A FIB node is an entity which stores routes sharing the same prefix and length. The data structure itself is already family agnostic, but we make some of its operations agnostic as well and thus re-use them for IPv6 offload. Instead of passing an IPv4-specific structure to fib4_node_get(), pass general routing parameters and rename the function accordingly. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 34 +++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index da8590ba2558..936d96a56ede 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2499,28 +2499,25 @@ static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_fib_node * -mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp, - const struct fib_entry_notifier_info *fen_info) +mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr, + size_t addr_len, unsigned char prefix_len, + enum mlxsw_sp_l3proto proto) { struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; int err; - vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id); + vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id); if (IS_ERR(vr)) return ERR_CAST(vr); - fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4); + fib = mlxsw_sp_vr_fib(vr, proto); - fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len); + fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len); if (fib_node) return fib_node; - fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst, - sizeof(fen_info->dst), - fen_info->dst_len); + fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len); if (!fib_node) { err = -ENOMEM; goto err_fib_node_create; @@ -2539,8 +2536,8 @@ err_fib_node_create: return ERR_PTR(err); } -static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_node *fib_node) +static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) { struct mlxsw_sp_vr *vr = fib_node->fib->vr; @@ -2725,7 +2722,7 @@ static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } static int @@ -2740,7 +2737,10 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp->router->aborted) return 0; - fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info); + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id, + &fen_info->dst, sizeof(fen_info->dst), + fen_info->dst_len, + MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(fib_node)) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n"); return PTR_ERR(fib_node); @@ -2767,7 +2767,7 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, err_fib4_node_entry_link: mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); err_fib4_entry_create: - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); return err; } @@ -2787,7 +2787,7 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) @@ -2846,7 +2846,7 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); /* Break when entry list is empty and node was freed. * Otherwise, we'll access freed memory in the next * iteration. -- cgit v1.2.3-55-g7522 From a3d9bc506d6434c5557e8d069a611d1e60a2a62e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:22 +0200 Subject: mlxsw: spectrum_router: Extend virtual routers with IPv6 support A Virtual Router (VR) is an entity which corresponds to a VRF and performs FIB lookup in an LPM tree according to the {VR, IP Proto} -> Tree binding. Extend the virtual router data structure towards IPv6 FIB offload. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 28 +++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 936d96a56ede..00d5449e1417 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -431,6 +431,7 @@ struct mlxsw_sp_vr { u32 tb_id; /* kernel fib table id */ unsigned int rif_count; struct mlxsw_sp_fib *fib4; + struct mlxsw_sp_fib *fib6; }; static const struct rhashtable_params mlxsw_sp_fib_ht_params; @@ -628,7 +629,7 @@ static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp) static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr) { - return !!vr->fib4; + return !!vr->fib4 || !!vr->fib6; } static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) @@ -697,7 +698,7 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr, case MLXSW_SP_L3_PROTO_IPV4: return vr->fib4; case MLXSW_SP_L3_PROTO_IPV6: - BUG_ON(1); + return vr->fib6; } return NULL; } @@ -706,6 +707,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { struct mlxsw_sp_vr *vr; + int err; vr = mlxsw_sp_vr_find_unused(mlxsw_sp); if (!vr) @@ -713,12 +715,24 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp, vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4); if (IS_ERR(vr->fib4)) return ERR_CAST(vr->fib4); + vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(vr->fib6)) { + err = PTR_ERR(vr->fib6); + goto err_fib6_create; + } vr->tb_id = tb_id; return vr; + +err_fib6_create: + mlxsw_sp_fib_destroy(vr->fib4); + vr->fib4 = NULL; + return ERR_PTR(err); } static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) { + mlxsw_sp_fib_destroy(vr->fib6); + vr->fib6 = NULL; mlxsw_sp_fib_destroy(vr->fib4); vr->fib4 = NULL; } @@ -776,7 +790,8 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) { - if (!vr->rif_count && list_empty(&vr->fib4->node_list)) + if (!vr->rif_count && list_empty(&vr->fib4->node_list) && + list_empty(&vr->fib6->node_list)) mlxsw_sp_vr_destroy(vr); } @@ -2895,6 +2910,13 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); + + /* If virtual router was only used for IPv4, then it's no + * longer used. + */ + if (!mlxsw_sp_vr_is_used(vr)) + continue; + mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); } } -- cgit v1.2.3-55-g7522 From 62547f407fa13e02c4d8ddb1ed481a7c60068b2f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:23 +0200 Subject: mlxsw: reg: Update RALUE register with IPv6 support Update the register so that IPv6 LPM entries could be programmed to the device's table. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0fc2263dee3a..c6c508941d23 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4724,6 +4724,7 @@ MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8); * Access: Index */ MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32); +MLXSW_ITEM_BUF(reg, ralue, dip6, 0x0C, 16); enum mlxsw_reg_ralue_entry_type { MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1, @@ -4857,6 +4858,16 @@ static inline void mlxsw_reg_ralue_pack4(char *payload, mlxsw_reg_ralue_dip4_set(payload, dip); } +static inline void mlxsw_reg_ralue_pack6(char *payload, + enum mlxsw_reg_ralxx_protocol protocol, + enum mlxsw_reg_ralue_op op, + u16 virtual_router, u8 prefix_len, + const void *dip) +{ + mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len); + mlxsw_reg_ralue_dip6_memcpy_to(payload, dip); +} + static inline void mlxsw_reg_ralue_act_remote_pack(char *payload, enum mlxsw_reg_ralue_trap_action trap_action, -- cgit v1.2.3-55-g7522 From 9dbf4d76d07d3b581238149d6e87796c39c94716 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:24 +0200 Subject: mlxsw: spectrum_router: Allow IPv6 routes to be programmed Take advantage of previous patch and allow the RALUE register to be called with IPv6 routes. In order to re-use as much code as possible between IPv4 and IPv6, only the lowest-level function that actually does the register packing is demuxed based on the passed protocol. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 87 ++++++++++++---------- 1 file changed, 46 insertions(+), 41 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 00d5449e1417..1141d742468b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2126,13 +2126,37 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, } } -static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static void +mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl, + const struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { - char ralue_pl[MLXSW_REG_RALUE_LEN]; struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; + enum mlxsw_reg_ralxx_protocol proto; + u32 *p_dip; + + proto = (enum mlxsw_reg_ralxx_protocol) fib->proto; + + switch (fib->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + p_dip = (u32 *) fib_entry->fib_node->key.addr; + mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id, + fib_entry->fib_node->key.prefix_len, + *p_dip); + break; + case MLXSW_SP_L3_PROTO_IPV6: + mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id, + fib_entry->fib_node->key.prefix_len, + fib_entry->fib_node->key.addr); + break; + } +} + +static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) +{ + char ralue_pl[MLXSW_REG_RALUE_LEN]; enum mlxsw_reg_ralue_trap_action trap_action; u16 trap_id = 0; u32 adjacency_index = 0; @@ -2151,24 +2175,19 @@ static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id, adjacency_index, ecmp_size); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif; - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; enum mlxsw_reg_ralue_trap_action trap_action; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; u16 trap_id = 0; u16 rif_index = 0; @@ -2180,42 +2199,34 @@ static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp, trap_id = MLXSW_TRAP_ID_RTR_INGRESS0; } - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, rif_index); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { - struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib; char ralue_pl[MLXSW_REG_RALUE_LEN]; - u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr; - mlxsw_reg_ralue_pack4(ralue_pl, - (enum mlxsw_reg_ralxx_protocol) fib->proto, op, - fib->vr->id, fib_entry->fib_node->key.prefix_len, - *p_dip); + mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); } -static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - enum mlxsw_reg_ralue_op op) +static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op) { switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: - return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL: - return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op); case MLXSW_SP_FIB_ENTRY_TYPE_TRAP: - return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op); + return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op); } return -EINVAL; } @@ -2224,16 +2235,10 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, enum mlxsw_reg_ralue_op op) { - int err = -EINVAL; + int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op); - switch (fib_entry->fib_node->fib->proto) { - case MLXSW_SP_L3_PROTO_IPV4: - err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op); - break; - case MLXSW_SP_L3_PROTO_IPV6: - return err; - } mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err); + return err; } -- cgit v1.2.3-55-g7522 From bc65a8a4f4d214c30c21cba90607c93ac676f6cf Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:25 +0200 Subject: mlxsw: spectrum_router: Set abort trap for IPv6 When we fail to insert a route we invoke the abort mechanism which flushes all the tables and inserts a default route in each, so that all packets incoming to the router will be trapped to the CPU. Upon abort, add an IPv6 default route to the IPv6 tables. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 39 ++++++++++++++-------- 1 file changed, 26 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 1141d742468b..0017a611ed16 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2810,19 +2810,20 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } -static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) +static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_ralxx_protocol proto, + u8 tree_id) { char ralta_pl[MLXSW_REG_RALTA_LEN]; char ralst_pl[MLXSW_REG_RALST_LEN]; int i, err; - mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); if (err) return err; - mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl); if (err) return err; @@ -2835,17 +2836,14 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) if (!mlxsw_sp_vr_is_used(vr)) continue; - mlxsw_reg_raltb_pack(raltb_pl, vr->id, - MLXSW_REG_RALXX_PROTOCOL_IPV4, - MLXSW_SP_LPM_TREE_MIN); + mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); if (err) return err; - mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4, - MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0, - 0); + mlxsw_reg_ralue_pack(ralue_pl, proto, + MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0); mlxsw_reg_ralue_act_ip2me_pack(ralue_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); @@ -2856,6 +2854,21 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) +{ + enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4; + int err; + + err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, + MLXSW_SP_LPM_TREE_MIN); + if (err) + return err; + + proto = MLXSW_REG_RALXX_PROTOCOL_IPV6; + return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto, + MLXSW_SP_LPM_TREE_MIN + 1); +} + static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { @@ -2925,7 +2938,7 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) } } -static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) { int err; @@ -2970,7 +2983,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info, replace, append); if (err) - mlxsw_sp_router_fib4_abort(mlxsw_sp); + mlxsw_sp_router_fib_abort(mlxsw_sp); fib_info_put(fib_work->fen_info.fi); break; case FIB_EVENT_ENTRY_DEL: @@ -2981,7 +2994,7 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) case FIB_EVENT_RULE_DEL: rule = fib_work->fr_info.rule; if (!fib4_rule_default(rule) && !rule->l3mdev) - mlxsw_sp_router_fib4_abort(mlxsw_sp); + mlxsw_sp_router_fib_abort(mlxsw_sp); fib_rule_put(rule); break; case FIB_EVENT_NH_ADD: /* fall through */ -- cgit v1.2.3-55-g7522 From 4f1c7f1f2e954cca8230fad4b33b192f84307f50 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:26 +0200 Subject: mlxsw: spectrum_router: Create IPv4 specific entry struct Some of the parameters stored in the FIB entry structure are specific to IPv4 and therefore better placed in an IPv4 specific structure. Create an IPv4 specific structure that encapsulates the common FIB entry structure and contains IPv4 specific parameters. In a follow-up patchset an IPv6 specific structure will be introduced. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 223 ++++++++++++--------- 1 file changed, 123 insertions(+), 100 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 0017a611ed16..52487a4db09e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -387,23 +387,23 @@ struct mlxsw_sp_fib_node { struct mlxsw_sp_fib_key key; }; -struct mlxsw_sp_fib_entry_params { - u32 tb_id; - u32 prio; - u8 tos; - u8 type; -}; - struct mlxsw_sp_fib_entry { struct list_head list; struct mlxsw_sp_fib_node *fib_node; enum mlxsw_sp_fib_entry_type type; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; - struct mlxsw_sp_fib_entry_params params; bool offloaded; }; +struct mlxsw_sp_fib4_entry { + struct mlxsw_sp_fib_entry common; + u32 tb_id; + u32 prio; + u8 tos; + u8 type; +}; + enum mlxsw_sp_l3proto { MLXSW_SP_L3_PROTO_IPV4, MLXSW_SP_L3_PROTO_IPV6, @@ -2057,13 +2057,29 @@ static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); } +static bool +mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib4_entry *fib4_entry; + + fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry, + common); + return !fib4_entry->tos; +} + static bool mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group; - if (fib_entry->params.tos) - return false; + switch (fib_entry->fib_node->fib->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + if (!mlxsw_sp_fib4_entry_should_offload(fib_entry)) + return false; + break; + case MLXSW_SP_L3_PROTO_IPV6: + break; + } switch (fib_entry->type) { case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE: @@ -2288,19 +2304,19 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp, } } -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, const struct fib_entry_notifier_info *fen_info) { + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_entry *fib_entry; int err; - fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL); - if (!fib_entry) { - err = -ENOMEM; - goto err_fib_entry_alloc; - } + fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL); + if (!fib4_entry) + return ERR_PTR(-ENOMEM); + fib_entry = &fib4_entry->common; err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry); if (err) @@ -2310,38 +2326,37 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_nexthop_group_get; - fib_entry->params.prio = fen_info->fi->fib_priority; - fib_entry->params.tb_id = fen_info->tb_id; - fib_entry->params.type = fen_info->type; - fib_entry->params.tos = fen_info->tos; + fib4_entry->prio = fen_info->fi->fib_priority; + fib4_entry->tb_id = fen_info->tb_id; + fib4_entry->type = fen_info->type; + fib4_entry->tos = fen_info->tos; fib_entry->fib_node = fib_node; - return fib_entry; + return fib4_entry; err_nexthop_group_get: err_fib4_entry_type_set: - kfree(fib_entry); -err_fib_entry_alloc: + kfree(fib4_entry); return ERR_PTR(err); } static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) + struct mlxsw_sp_fib4_entry *fib4_entry) { - mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry); - kfree(fib_entry); + mlxsw_sp_nexthop_group_put(mlxsw_sp, &fib4_entry->common); + kfree(fib4_entry); } static struct mlxsw_sp_fib_node * mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr, size_t addr_len, unsigned char prefix_len); -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; struct mlxsw_sp_fib *fib; struct mlxsw_sp_vr *vr; @@ -2357,12 +2372,12 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, if (!fib_node) return NULL; - list_for_each_entry(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id == fen_info->tb_id && - fib_entry->params.tos == fen_info->tos && - fib_entry->params.type == fen_info->type && - fib_entry->nh_group->key.fi == fen_info->fi) { - return fib_entry; + list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { + if (fib4_entry->tb_id == fen_info->tb_id && + fib4_entry->tos == fen_info->tos && + fib4_entry->type == fen_info->type && + fib4_entry->common.nh_group->key.fi == fen_info->fi) { + return fib4_entry; } } @@ -2568,88 +2583,93 @@ static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_vr_put(vr); } -static struct mlxsw_sp_fib_entry * +static struct mlxsw_sp_fib4_entry * mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct mlxsw_sp_fib_entry_params *params) + const struct mlxsw_sp_fib4_entry *new4_entry) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; - list_for_each_entry(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id > params->tb_id) + list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) { + if (fib4_entry->tb_id > new4_entry->tb_id) continue; - if (fib_entry->params.tb_id != params->tb_id) + if (fib4_entry->tb_id != new4_entry->tb_id) break; - if (fib_entry->params.tos > params->tos) + if (fib4_entry->tos > new4_entry->tos) continue; - if (fib_entry->params.prio >= params->prio || - fib_entry->params.tos < params->tos) - return fib_entry; + if (fib4_entry->prio >= new4_entry->prio || + fib4_entry->tos < new4_entry->tos) + return fib4_entry; } return NULL; } -static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry, - struct mlxsw_sp_fib_entry *new_entry) +static int +mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry, + struct mlxsw_sp_fib4_entry *new4_entry) { struct mlxsw_sp_fib_node *fib_node; - if (WARN_ON(!fib_entry)) + if (WARN_ON(!fib4_entry)) return -EINVAL; - fib_node = fib_entry->fib_node; - list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) { - if (fib_entry->params.tb_id != new_entry->params.tb_id || - fib_entry->params.tos != new_entry->params.tos || - fib_entry->params.prio != new_entry->params.prio) + fib_node = fib4_entry->common.fib_node; + list_for_each_entry_from(fib4_entry, &fib_node->entry_list, + common.list) { + if (fib4_entry->tb_id != new4_entry->tb_id || + fib4_entry->tos != new4_entry->tos || + fib4_entry->prio != new4_entry->prio) break; } - list_add_tail(&new_entry->list, &fib_entry->list); + list_add_tail(&new4_entry->common.list, &fib4_entry->common.list); return 0; } static int mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *new_entry, + struct mlxsw_sp_fib4_entry *new4_entry, bool replace, bool append) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; - fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params); + fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry); if (append) - return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry); - if (replace && WARN_ON(!fib_entry)) + return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry); + if (replace && WARN_ON(!fib4_entry)) return -EINVAL; /* Insert new entry before replaced one, so that we can later * remove the second. */ - if (fib_entry) { - list_add_tail(&new_entry->list, &fib_entry->list); + if (fib4_entry) { + list_add_tail(&new4_entry->common.list, + &fib4_entry->common.list); } else { - struct mlxsw_sp_fib_entry *last; + struct mlxsw_sp_fib4_entry *last; - list_for_each_entry(last, &fib_node->entry_list, list) { - if (new_entry->params.tb_id > last->params.tb_id) + list_for_each_entry(last, &fib_node->entry_list, common.list) { + if (new4_entry->tb_id > last->tb_id) break; - fib_entry = last; + fib4_entry = last; } - if (fib_entry) - list_add(&new_entry->list, &fib_entry->list); + if (fib4_entry) + list_add(&new4_entry->common.list, + &fib4_entry->common.list); else - list_add(&new_entry->list, &fib_node->entry_list); + list_add(&new4_entry->common.list, + &fib_node->entry_list); } return 0; } static void -mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry) +mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry) { - list_del(&fib_entry->list); + list_del(&fib4_entry->common.list); } static int @@ -2695,50 +2715,52 @@ mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib4_entry *fib4_entry, bool replace, bool append) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + struct mlxsw_sp_fib_node *fib_node; int err; - err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace, + fib_node = fib4_entry->common.fib_node; + err = mlxsw_sp_fib4_node_list_insert(fib_node, fib4_entry, replace, append); if (err) return err; - err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry); + err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, + &fib4_entry->common); if (err) goto err_fib4_node_entry_add; return 0; err_fib4_node_entry_add: - mlxsw_sp_fib4_node_list_remove(fib_entry); + mlxsw_sp_fib4_node_list_remove(fib4_entry); return err; } static void mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) + struct mlxsw_sp_fib4_entry *fib4_entry) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; - mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry); - mlxsw_sp_fib4_node_list_remove(fib_entry); + mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, &fib4_entry->common); + mlxsw_sp_fib4_node_list_remove(fib4_entry); } static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, + struct mlxsw_sp_fib4_entry *fib4_entry, bool replace) { - struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; - struct mlxsw_sp_fib_entry *replaced; + struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; + struct mlxsw_sp_fib4_entry *replaced; if (!replace) return; /* We inserted the new entry before replaced one */ - replaced = list_next_entry(fib_entry, list); + replaced = list_next_entry(fib4_entry, common.list); mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced); mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced); @@ -2750,7 +2772,7 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, const struct fib_entry_notifier_info *fen_info, bool replace, bool append) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; int err; @@ -2766,26 +2788,26 @@ mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp, return PTR_ERR(fib_node); } - fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); - if (IS_ERR(fib_entry)) { + fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info); + if (IS_ERR(fib4_entry)) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n"); - err = PTR_ERR(fib_entry); + err = PTR_ERR(fib4_entry); goto err_fib4_entry_create; } - err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace, + err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace, append); if (err) { dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n"); goto err_fib4_node_entry_link; } - mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace); + mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace); return 0; err_fib4_node_entry_link: - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); err_fib4_entry_create: mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); return err; @@ -2794,19 +2816,19 @@ err_fib4_entry_create: static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, struct fib_entry_notifier_info *fen_info) { - struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_fib4_entry *fib4_entry; struct mlxsw_sp_fib_node *fib_node; if (mlxsw_sp->router->aborted) return; - fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); - if (WARN_ON(!fib_entry)) + fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info); + if (WARN_ON(!fib4_entry)) return; - fib_node = fib_entry->fib_node; + fib_node = fib4_entry->common.fib_node; - mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } @@ -2872,13 +2894,14 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_fib_entry *fib_entry, *tmp; + struct mlxsw_sp_fib4_entry *fib4_entry, *tmp; - list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) { - bool do_break = &tmp->list == &fib_node->entry_list; + list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list, + common.list) { + bool do_break = &tmp->common.list == &fib_node->entry_list; - mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry); - mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry); + mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry); + mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry); mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); /* Break when entry list is empty and node was freed. * Otherwise, we'll access freed memory in the next -- cgit v1.2.3-55-g7522 From 0e6ea2a4eaef5dcf30abb5c4e02988b5d0b9342f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:27 +0200 Subject: mlxsw: spectrum_router: Mark IPv4 specific function accordingly The functions to create and destroy a nexthop group are IPv4 specific and should be renamed accordingly, so that they won't be confused with the IPv6 specific functions in follow-up patches. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 58 +++++++++++----------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 52487a4db09e..5d787c17f306 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1868,10 +1868,10 @@ static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } -static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp, - struct mlxsw_sp_nexthop *nh, - struct fib_nh *fib_nh) +static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + struct fib_nh *fib_nh) { struct net_device *dev = fib_nh->nh_dev; struct in_device *in_dev; @@ -1910,16 +1910,16 @@ err_nexthop_neigh_init: return err; } -static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop *nh) +static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) { mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); mlxsw_sp_nexthop_rif_fini(nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); } -static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp, - unsigned long event, struct fib_nh *fib_nh) +static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp, + unsigned long event, struct fib_nh *fib_nh) { struct mlxsw_sp_nexthop_key key; struct mlxsw_sp_nexthop *nh; @@ -1964,7 +1964,7 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) +mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) { struct mlxsw_sp_nexthop_group *nh_grp; struct mlxsw_sp_nexthop *nh; @@ -1988,9 +1988,9 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; fib_nh = &fi->fib_nh[i]; - err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh); + err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh); if (err) - goto err_nexthop_init; + goto err_nexthop4_init; } err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); if (err) @@ -1999,10 +1999,10 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) return nh_grp; err_nexthop_group_insert: -err_nexthop_init: +err_nexthop4_init: for (i--; i >= 0; i--) { nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); } fib_info_put(nh_grp->key.fi); kfree(nh_grp); @@ -2010,8 +2010,8 @@ err_nexthop_init: } static void -mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group *nh_grp) +mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) { struct mlxsw_sp_nexthop *nh; int i; @@ -2019,7 +2019,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - mlxsw_sp_nexthop_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); } mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); WARN_ON_ONCE(nh_grp->adj_index_valid); @@ -2027,9 +2027,9 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, kfree(nh_grp); } -static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry, - struct fib_info *fi) +static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry, + struct fib_info *fi) { struct mlxsw_sp_nexthop_group_key key; struct mlxsw_sp_nexthop_group *nh_grp; @@ -2037,7 +2037,7 @@ static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, key.fi = fi; nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key); if (!nh_grp) { - nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi); + nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi); if (IS_ERR(nh_grp)) return PTR_ERR(nh_grp); } @@ -2046,15 +2046,15 @@ static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp, return 0; } -static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; list_del(&fib_entry->nexthop_group_node); if (!list_empty(&nh_grp->fib_list)) return; - mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp); + mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp); } static bool @@ -2322,9 +2322,9 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, if (err) goto err_fib4_entry_type_set; - err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi); + err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi); if (err) - goto err_nexthop_group_get; + goto err_nexthop4_group_get; fib4_entry->prio = fen_info->fi->fib_priority; fib4_entry->tb_id = fen_info->tb_id; @@ -2335,7 +2335,7 @@ mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp, return fib4_entry; -err_nexthop_group_get: +err_nexthop4_group_get: err_fib4_entry_type_set: kfree(fib4_entry); return ERR_PTR(err); @@ -2344,7 +2344,7 @@ err_fib4_entry_type_set: static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib4_entry *fib4_entry) { - mlxsw_sp_nexthop_group_put(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common); kfree(fib4_entry); } @@ -3022,8 +3022,8 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) break; case FIB_EVENT_NH_ADD: /* fall through */ case FIB_EVENT_NH_DEL: - mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event, - fib_work->fnh_info.fib_nh); + mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event, + fib_work->fnh_info.fib_nh); fib_info_put(fib_work->fnh_info.fib_nh->nh_parent); break; } -- cgit v1.2.3-55-g7522 From 9efbee6fead645db2fe8d9e779c36875aa2b98d4 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:28 +0200 Subject: mlxsw: spectrum_router: Drop unnecessary parameter Functions that take as argument a FIB entry don't need to take FIB node as well, as it can be extracted from the entry. Remove unnecessary FIB node parameter. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 32 +++++++++------------- 1 file changed, 13 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 5d787c17f306..c8c8187a8f0a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2627,10 +2627,10 @@ mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry, } static int -mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib4_entry *new4_entry, +mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry, bool replace, bool append) { + struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node; struct mlxsw_sp_fib4_entry *fib4_entry; fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry); @@ -2672,11 +2672,11 @@ mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry) list_del(&fib4_entry->common.list); } -static int -mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *fib_entry) +static int mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) return 0; @@ -2693,11 +2693,11 @@ mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); } -static void -mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib_node *fib_node, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { + struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; + if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry)) return; @@ -2718,17 +2718,13 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib4_entry *fib4_entry, bool replace, bool append) { - struct mlxsw_sp_fib_node *fib_node; int err; - fib_node = fib4_entry->common.fib_node; - err = mlxsw_sp_fib4_node_list_insert(fib_node, fib4_entry, replace, - append); + err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append); if (err) return err; - err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, - &fib4_entry->common); + err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, &fib4_entry->common); if (err) goto err_fib4_node_entry_add; @@ -2743,9 +2739,7 @@ static void mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib4_entry *fib4_entry) { - struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node; - - mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, &fib4_entry->common); + mlxsw_sp_fib4_node_entry_del(mlxsw_sp, &fib4_entry->common); mlxsw_sp_fib4_node_list_remove(fib4_entry); } -- cgit v1.2.3-55-g7522 From 80c238f91b1f607b32fae54f0462a073c3674140 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:29 +0200 Subject: mlxsw: spectrum_router: Rename functions to add / delete a FIB entry These functions aren't specific to IPv4 and can be re-used for IPv6. Drop the '4' designation from their name. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c8c8187a8f0a..32ed2b65b395 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2672,8 +2672,8 @@ mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry) list_del(&fib4_entry->common.list); } -static int mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; @@ -2693,8 +2693,8 @@ static int mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry); } -static void mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib_entry *fib_entry) +static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) { struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node; @@ -2724,13 +2724,13 @@ static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp, if (err) return err; - err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, &fib4_entry->common); + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common); if (err) - goto err_fib4_node_entry_add; + goto err_fib_node_entry_add; return 0; -err_fib4_node_entry_add: +err_fib_node_entry_add: mlxsw_sp_fib4_node_list_remove(fib4_entry); return err; } @@ -2739,7 +2739,7 @@ static void mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib4_entry *fib4_entry) { - mlxsw_sp_fib4_node_entry_del(mlxsw_sp, &fib4_entry->common); + mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common); mlxsw_sp_fib4_node_list_remove(fib4_entry); } -- cgit v1.2.3-55-g7522 From 7dcc18adad31d15d528414bdff12bf98d33d9a20 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 18 Jul 2017 10:10:30 +0200 Subject: mlxsw: spectrum_router: Update prefix count for IPv6 The number of possible prefix lengths for IPv6 is 129 and not 128. Fixes following warning from UBSAN when /128 routes are offloaded: UBSAN: Undefined behaviour in drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c:2510:27 index 128 is out of range for type 'long unsigned int [128]' Fixes: 5e9c16cc83a7 ("mlxsw: spectrum_router: Implement private fib") Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 32ed2b65b395..e6d629f40f93 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -307,7 +307,7 @@ static struct mlxsw_sp_rif * mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, const struct net_device *dev); -#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE) +#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1) struct mlxsw_sp_prefix_usage { DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT); -- cgit v1.2.3-55-g7522 From 43f51ef11957a5ba2575dc0f20c84ba7d2ae38b9 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:13:45 +0530 Subject: net: cdc_ncm: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. File size before: text data bss dec hex filename 13275 928 1 14204 377c drivers/net/usb/cdc_ncm.o File size After adding 'const': text data bss dec hex filename 13339 864 1 14204 377c drivers/net/usb/cdc_ncm.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/usb/cdc_ncm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index d103a1d4fb36..b401ba9e6ddc 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -367,7 +367,7 @@ static struct attribute *cdc_ncm_sysfs_attrs[] = { NULL, }; -static struct attribute_group cdc_ncm_sysfs_attr_group = { +static const struct attribute_group cdc_ncm_sysfs_attr_group = { .name = "cdc_ncm", .attrs = cdc_ncm_sysfs_attrs, }; -- cgit v1.2.3-55-g7522 From 7ec2796e6375607104ee516b2330dd51dbb7abcd Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:14:18 +0530 Subject: net: can: at91_can: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. File size before: text data bss dec hex filename 6164 304 0 6468 1944 drivers/net/can/at91_can.o File size After adding 'const': text data bss dec hex filename 6228 240 0 6468 1944 drivers/net/can/at91_can.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/can/at91_can.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 0e0df0ba288c..f37ce0e1b603 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1232,7 +1232,7 @@ static struct attribute *at91_sysfs_attrs[] = { NULL, }; -static struct attribute_group at91_sysfs_attr_group = { +static const struct attribute_group at91_sysfs_attr_group = { .attrs = at91_sysfs_attrs, }; -- cgit v1.2.3-55-g7522 From 7eaf0d93f9d85527086a483d5d200b421aa325c6 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:14:19 +0530 Subject: net: can: janz-ican3: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. File size before: text data bss dec hex filename 11800 368 0 12168 2f88 drivers/net/can/janz-ican3.o File size After adding 'const': text data bss dec hex filename 11864 304 0 12168 2f88 drivers/net/can/janz-ican3.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/can/janz-ican3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 2ba1a81500c1..12a53c8e8e1d 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1875,7 +1875,7 @@ static struct attribute *ican3_sysfs_attrs[] = { NULL, }; -static struct attribute_group ican3_sysfs_attr_group = { +static const struct attribute_group ican3_sysfs_attr_group = { .attrs = ican3_sysfs_attrs, }; -- cgit v1.2.3-55-g7522 From d7979553ef3e8927fe3f035e5d2a7e1749c27780 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:14:59 +0530 Subject: wireless: ipw2200: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/wireless/intel/ipw2x00/ipw2200.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 9368abdf18e2..c311b1a994c1 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -11500,7 +11500,7 @@ static struct attribute *ipw_sysfs_entries[] = { NULL }; -static struct attribute_group ipw_attribute_group = { +static const struct attribute_group ipw_attribute_group = { .name = NULL, /* put in device directory */ .attrs = ipw_sysfs_entries, }; -- cgit v1.2.3-55-g7522 From e00b6c6d98adf7277c6c897783c57f467a8b62b0 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:15:00 +0530 Subject: wireless: ipw2100: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index aaaca4d08e2b..ccbe74589eec 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -4324,7 +4324,7 @@ static struct attribute *ipw2100_sysfs_entries[] = { NULL, }; -static struct attribute_group ipw2100_attribute_group = { +static const struct attribute_group ipw2100_attribute_group = { .attrs = ipw2100_sysfs_entries, }; -- cgit v1.2.3-55-g7522 From 64571ca71bbfa890321ff14cf18c74089a23e4d1 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:15:01 +0530 Subject: wireless: iwlegacy: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/wireless/intel/iwlegacy/3945-mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 38bf403bb1e1..329f3a63dadd 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3464,7 +3464,7 @@ static struct attribute *il3945_sysfs_entries[] = { NULL }; -static struct attribute_group il3945_attribute_group = { +static const struct attribute_group il3945_attribute_group = { .name = NULL, /* put in device directory */ .attrs = il3945_sysfs_entries, }; -- cgit v1.2.3-55-g7522 From 6cbbd7ec06fa605b219cb2a91b653960a5d21aa6 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:15:02 +0530 Subject: wireless: iwlegacy: Constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 5b51fba75595..de9b6522c43f 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -4654,7 +4654,7 @@ static struct attribute *il_sysfs_entries[] = { NULL }; -static struct attribute_group il_attribute_group = { +static const struct attribute_group il_attribute_group = { .name = NULL, /* put in device directory */ .attrs = il_sysfs_entries, }; -- cgit v1.2.3-55-g7522 From c5567669fc638849fbd1057385846ac75fa42a0e Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:15:36 +0530 Subject: arcnet: com20020-pci: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. File size before: text data bss dec hex filename 3409 948 28 4385 1121 drivers/net/arcnet/com20020-pci.o File size After adding 'const': text data bss dec hex filename 3473 884 28 4385 1121 drivers/net/arcnet/com20020-pci.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/arcnet/com20020-pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 01cab9548785..eb7f76753c9c 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -109,7 +109,7 @@ static struct attribute *com20020_state_attrs[] = { NULL, }; -static struct attribute_group com20020_state_group = { +static const struct attribute_group com20020_state_group = { .name = NULL, .attrs = com20020_state_attrs, }; -- cgit v1.2.3-55-g7522 From 02dbbef0548678b9066e68bb1b6c9eda5c077a00 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:15:56 +0530 Subject: net: bonding: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. File size before: text data bss dec hex filename 4512 1472 0 5984 1760 drivers/net/bonding/bond_sysfs.o File size After adding 'const': text data bss dec hex filename 4576 1408 0 5984 1760 drivers/net/bonding/bond_sysfs.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/bonding/bond_sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 770623a0cc01..040b493f60ae 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -759,7 +759,7 @@ static struct attribute *per_bond_attrs[] = { NULL, }; -static struct attribute_group bonding_group = { +static const struct attribute_group bonding_group = { .name = "bonding", .attrs = per_bond_attrs, }; -- cgit v1.2.3-55-g7522 From 98dc8373db89b242a7888cdc7b0788e21e0a4cbe Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 18 Jul 2017 15:16:19 +0530 Subject: net: chelsio: cxgb3: constify attribute_group structures. attribute_group are not supposed to change at runtime. All functions working with attribute_group provided by work with const attribute_group. So mark the non-const structs as const. File size before: text data bss dec hex filename 28720 985 12 29717 7415 net/.../cxgb3/cxgb3_main.o File size After adding 'const': text data bss dec hex filename 28848 857 12 29717 7415 net/.../cxgb3/cxgb3_main.o Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 0bc6a4ffce30..6a015362c340 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -793,7 +793,9 @@ static struct attribute *cxgb3_attrs[] = { NULL }; -static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs }; +static const struct attribute_group cxgb3_attr_group = { + .attrs = cxgb3_attrs, +}; static ssize_t tm_attr_show(struct device *d, char *buf, int sched) @@ -880,7 +882,9 @@ static struct attribute *offload_attrs[] = { NULL }; -static struct attribute_group offload_attr_group = {.attrs = offload_attrs }; +static const struct attribute_group offload_attr_group = { + .attrs = offload_attrs, +}; /* * Sends an sk_buff to an offload queue driver -- cgit v1.2.3-55-g7522 From 741912c55365f1a48611e20d3291433b686f6846 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Mon, 17 Jul 2017 13:33:14 -0700 Subject: liquidio: support new firmware statistic fw_err_pki Added support for new firmware statistic 'tx_err_pki'. Signed-off-by: Rick Farrington Signed-off-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 4 ++++ drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 28ecda3d3404..976a50f67551 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -105,6 +105,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = { "tx_total_sent", "tx_total_fwd", "tx_err_pko", + "tx_err_pki", "tx_err_link", "tx_err_drop", @@ -826,6 +827,8 @@ lio_get_ethtool_stats(struct net_device *netdev, data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. @@ -1568,6 +1571,7 @@ octnet_nic_stats_callback(struct octeon_device *oct_dev, tstats->fw_total_sent = rsp_tstats->fw_total_sent; tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_pki = rsp_tstats->fw_err_pki; tstats->fw_err_link = rsp_tstats->fw_err_link; tstats->fw_err_drop = rsp_tstats->fw_err_drop; tstats->fw_tso = rsp_tstats->fw_tso; diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 231dd7fbfb80..53aaf417e722 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -814,6 +814,7 @@ struct nic_tx_stats { u64 fw_tso; /* number of tso requests */ u64 fw_tso_fwd; /* number of packets segmented in tso */ u64 fw_tx_vxlan; + u64 fw_err_pki; }; struct oct_link_stats { -- cgit v1.2.3-55-g7522 From 00587f2fa70866a2c3425ee799c91657b4717b7c Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Mon, 17 Jul 2017 17:50:47 -0700 Subject: liquidio: lowmem: init allocated memory to 0 Fix GPF in octeon_init_droq(); zero the allocated block 'recv_buf_list'. This prevents a GPF trying to access an invalid 'recv_buf_list[i]' entry in octeon_droq_destroy_ring_buffers() if init didn't alloc all entries. Signed-off-by: Rick Farrington Signed-off-by: Satanand Burla Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_droq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 2e190deb2233..645668339620 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -275,12 +275,12 @@ int octeon_init_droq(struct octeon_device *oct, droq->max_count); droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc_node(droq->max_count * + vzalloc_node(droq->max_count * OCT_DROQ_RECVBUF_SIZE, numa_node); if (!droq->recv_buf_list) droq->recv_buf_list = (struct octeon_recv_buffer *) - vmalloc(droq->max_count * + vzalloc(droq->max_count * OCT_DROQ_RECVBUF_SIZE); if (!droq->recv_buf_list) { dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); -- cgit v1.2.3-55-g7522 From 689062a18c00f49f9c32ac8d5366c075ab691c30 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Mon, 17 Jul 2017 17:51:10 -0700 Subject: liquidio: lowmem: do not dereference null ptr Don't dereference a NULL ptr in octeon_droq_destroy_ring_buffers(). Signed-off-by: Rick Farrington Signed-off-by: Satanand Burla Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_droq.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 645668339620..f7b5d68eb4cf 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -145,6 +145,8 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct, for (i = 0; i < droq->max_count; i++) { pg_info = &droq->recv_buf_list[i].pg_info; + if (!pg_info) + continue; if (pg_info->dma) lio_unmap_ring(oct->pci_dev, -- cgit v1.2.3-55-g7522 From 2c4aac74a9f28e0431f5335401d5fc8f744fa1e1 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Mon, 17 Jul 2017 17:51:37 -0700 Subject: liquidio: lowmem: init allocated memory to 0 For defensive programming, zero the allocated block 'oct->droq[0]' in octeon_setup_output_queues() and 'oct->instr_queue[0]' in octeon_setup_instr_queues(). Signed-off-by: Rick Farrington Signed-off-by: Satanand Burla Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_device.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 623e28ca736e..f10014f7ae88 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -876,11 +876,11 @@ int octeon_setup_instr_queues(struct octeon_device *oct) oct->num_iqs = 0; - oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]), + oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]), numa_node); if (!oct->instr_queue[0]) oct->instr_queue[0] = - vmalloc(sizeof(struct octeon_instr_queue)); + vzalloc(sizeof(struct octeon_instr_queue)); if (!oct->instr_queue[0]) return 1; memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); @@ -923,9 +923,9 @@ int octeon_setup_output_queues(struct octeon_device *oct) desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf)); } oct->num_oqs = 0; - oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node); + oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node); if (!oct->droq[0]) - oct->droq[0] = vmalloc(sizeof(*oct->droq[0])); + oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); if (!oct->droq[0]) return 1; -- cgit v1.2.3-55-g7522 From c57c054eb5b1ccf230c49f736f7a018fcbc3e952 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Mon, 17 Jul 2017 23:28:05 -0700 Subject: openvswitch: Optimize updating for OvS flow_stats. In the ovs_flow_stats_update(), we only use the node var to alloc flow_stats struct. But this is not a common case, it is unnecessary to call the numa_node_id() everytime. This patch is not a bugfix, but there maybe a small increase. Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller --- net/openvswitch/flow.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 597d96faca45..6ef51e764367 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -72,7 +72,6 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, const struct sk_buff *skb) { struct flow_stats *stats; - int node = numa_node_id(); int cpu = smp_processor_id(); int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); @@ -108,7 +107,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, __GFP_THISNODE | __GFP_NOWARN | __GFP_NOMEMALLOC, - node); + numa_node_id()); if (likely(new_stats)) { new_stats->used = jiffies; new_stats->packet_count = 1; -- cgit v1.2.3-55-g7522 From c4b2bf6b4a35348fe6d1eb06928eb68d7b9d99a9 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Mon, 17 Jul 2017 23:28:06 -0700 Subject: openvswitch: Optimize operations for OvS flow_stats. When calling the flow_free() to free the flow, we call many times (cpu_possible_mask, eg. 128 as default) cpumask_next(). That will take up our CPU usage if we call the flow_free() frequently. When we put all packets to userspace via upcall, and OvS will send them back via netlink to ovs_packet_cmd_execute(will call flow_free). The test topo is shown as below. VM01 sends TCP packets to VM02, and OvS forward packtets. When testing, we use perf to report the system performance. VM01 --- OvS-VM --- VM02 Without this patch, perf-top show as below: The flow_free() is 3.02% CPU usage. 4.23% [kernel] [k] _raw_spin_unlock_irqrestore 3.62% [kernel] [k] __do_softirq 3.16% [kernel] [k] __memcpy 3.02% [kernel] [k] flow_free 2.42% libc-2.17.so [.] __memcpy_ssse3_back 2.18% [kernel] [k] copy_user_generic_unrolled 2.17% [kernel] [k] find_next_bit When applied this patch, perf-top show as below: Not shown on the list anymore. 4.11% [kernel] [k] _raw_spin_unlock_irqrestore 3.79% [kernel] [k] __do_softirq 3.46% [kernel] [k] __memcpy 2.73% libc-2.17.so [.] __memcpy_ssse3_back 2.25% [kernel] [k] copy_user_generic_unrolled 1.89% libc-2.17.so [.] _int_malloc 1.53% ovs-vswitchd [.] xlate_actions With this patch, the TCP throughput(we dont use Megaflow Cache + Microflow Cache) between VMs is 1.18Gbs/sec up to 1.30Gbs/sec (maybe ~10% performance imporve). This patch adds cpumask struct, the cpu_used_mask stores the cpu_id that the flow used. And we only check the flow_stats on the cpu we used, and it is unncessary to check all possible cpu when getting, cleaning, and updating the flow_stats. Adding the cpu_used_mask to sw_flow struct does’t increase the cacheline number. Signed-off-by: Tonghao Zhang Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- net/openvswitch/flow.c | 7 ++++--- net/openvswitch/flow.h | 2 ++ net/openvswitch/flow_table.c | 4 +++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 6ef51e764367..8c94cef25a72 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -72,7 +72,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, const struct sk_buff *skb) { struct flow_stats *stats; - int cpu = smp_processor_id(); + unsigned int cpu = smp_processor_id(); int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); stats = rcu_dereference(flow->stats[cpu]); @@ -117,6 +117,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, rcu_assign_pointer(flow->stats[cpu], new_stats); + cpumask_set_cpu(cpu, &flow->cpu_used_mask); goto unlock; } } @@ -144,7 +145,7 @@ void ovs_flow_stats_get(const struct sw_flow *flow, memset(ovs_stats, 0, sizeof(*ovs_stats)); /* We open code this to make sure cpu 0 is always considered */ - for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) { + for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { struct flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); if (stats) { @@ -168,7 +169,7 @@ void ovs_flow_stats_clear(struct sw_flow *flow) int cpu; /* We open code this to make sure cpu 0 is always considered */ - for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) { + for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { struct flow_stats *stats = ovsl_dereference(flow->stats[cpu]); if (stats) { diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index a9bc1c875965..1875bba4f865 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -219,6 +220,7 @@ struct sw_flow { */ struct sw_flow_key key; struct sw_flow_id id; + struct cpumask cpu_used_mask; struct sw_flow_mask *mask; struct sw_flow_actions __rcu *sf_acts; struct flow_stats __rcu *stats[]; /* One for each CPU. First one diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index ea7a8073fa02..80ea2a71852e 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -98,6 +98,8 @@ struct sw_flow *ovs_flow_alloc(void) RCU_INIT_POINTER(flow->stats[0], stats); + cpumask_set_cpu(0, &flow->cpu_used_mask); + return flow; err: kmem_cache_free(flow_cache, flow); @@ -141,7 +143,7 @@ static void flow_free(struct sw_flow *flow) if (flow->sf_acts) ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts); /* We open code this to make sure cpu 0 is always considered */ - for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpu_possible_mask)) + for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) if (flow->stats[cpu]) kmem_cache_free(flow_stats_cache, (struct flow_stats __force *)flow->stats[cpu]); -- cgit v1.2.3-55-g7522 From bb4d991a28cc86a2dfbeefeff32911ca9f779c18 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Wed, 19 Jul 2017 15:41:26 -0700 Subject: tcp: adjust tail loss probe timeout This patch adjusts the timeout formula to schedule the TCP loss probe (TLP). The previous formula uses 2*SRTT or 1.5*RTT + DelayACKMax if only one packet is in flight. It keeps a lower bound of 10 msec which is too large for short RTT connections (e.g. within a data-center). The new formula = 2*RTT + (inflight == 1 ? 200ms : 2ticks) which performs better for short and fast connections. Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: David S. Miller --- include/net/tcp.h | 3 +-- net/ipv4/tcp_output.c | 17 ++++++++++------- net/ipv4/tcp_recovery.c | 2 +- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 70483296157f..4f056ea79df2 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -139,6 +139,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #endif #define TCP_RTO_MAX ((unsigned)(120*HZ)) #define TCP_RTO_MIN ((unsigned)(HZ/5)) +#define TCP_TIMEOUT_MIN (2U) /* Min timeout for TCP timers in jiffies */ #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now * used as a fallback RTO for the @@ -150,8 +151,6 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes * for local resources. */ -#define TCP_REO_TIMEOUT_MIN (2000) /* Min RACK reordering timeout in usec */ - #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */ #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */ #define TCP_KEEPALIVE_INTVL (75*HZ) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4e985dea1dd2..886d874775df 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2377,7 +2377,6 @@ bool tcp_schedule_loss_probe(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); u32 timeout, tlp_time_stamp, rto_time_stamp; - u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3); /* No consecutive loss probes. */ if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) { @@ -2406,15 +2405,19 @@ bool tcp_schedule_loss_probe(struct sock *sk) tcp_send_head(sk)) return false; - /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account + /* Probe timeout is 2*rtt. Add minimum RTO to account * for delayed ack when there's one outstanding packet. If no RTT * sample is available then probe after TCP_TIMEOUT_INIT. */ - timeout = rtt << 1 ? : TCP_TIMEOUT_INIT; - if (tp->packets_out == 1) - timeout = max_t(u32, timeout, - (rtt + (rtt >> 1) + TCP_DELACK_MAX)); - timeout = max_t(u32, timeout, msecs_to_jiffies(10)); + if (tp->srtt_us) { + timeout = usecs_to_jiffies(tp->srtt_us >> 2); + if (tp->packets_out == 1) + timeout += TCP_RTO_MIN; + else + timeout += TCP_TIMEOUT_MIN; + } else { + timeout = TCP_TIMEOUT_INIT; + } /* If RTO is shorter, just schedule TLP in its place. */ tlp_time_stamp = tcp_jiffies32 + timeout; diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index fe9a493d0208..449cd914d58e 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -113,7 +113,7 @@ void tcp_rack_mark_lost(struct sock *sk) tp->rack.advanced = 0; tcp_rack_detect_loss(sk, &timeout); if (timeout) { - timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN); + timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN; inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, timeout, inet_csk(sk)->icsk_rto); } -- cgit v1.2.3-55-g7522 From 9b17010da57ae1a5d0a28c62f5e15abbce35edb0 Mon Sep 17 00:00:00 2001 From: Martin Habets Date: Tue, 18 Jul 2017 16:43:19 +0100 Subject: sfc: Add ethtool -m support for QSFP modules This also adds support for non-QSFP modules attached to QSFP. Signed-off-by: Martin Habets Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/mcdi_port.c | 224 ++++++++++++++++++++++++++++------- 1 file changed, 181 insertions(+), 43 deletions(-) diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index c905971c5f3a..d3f96a8f743b 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c @@ -746,59 +746,171 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, return NULL; } -#define SFP_PAGE_SIZE 128 -#define SFP_NUM_PAGES 2 -static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, - struct ethtool_eeprom *ee, u8 *data) +#define SFP_PAGE_SIZE 128 +#define SFF_DIAG_TYPE_OFFSET 92 +#define SFF_DIAG_ADDR_CHANGE BIT(2) +#define SFF_8079_NUM_PAGES 2 +#define SFF_8472_NUM_PAGES 4 +#define SFF_8436_NUM_PAGES 5 +#define SFF_DMT_LEVEL_OFFSET 94 + +/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom + * @efx: NIC context + * @page: EEPROM page number + * @data: Destination data pointer + * @offset: Offset in page to copy from in to data + * @space: Space available in data + * + * Return: + * >=0 - amount of data copied + * <0 - error + */ +static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx, + unsigned int page, + u8 *data, ssize_t offset, + ssize_t space) { MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX); MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN); size_t outlen; - int rc; unsigned int payload_len; - unsigned int space_remaining = ee->len; - unsigned int page; - unsigned int page_off; unsigned int to_copy; - u8 *user_data = data; + int rc; - BUILD_BUG_ON(SFP_PAGE_SIZE * SFP_NUM_PAGES != ETH_MODULE_SFF_8079_LEN); + if (offset > SFP_PAGE_SIZE) + return -EINVAL; - page_off = ee->offset % SFP_PAGE_SIZE; - page = ee->offset / SFP_PAGE_SIZE; + to_copy = min(space, SFP_PAGE_SIZE - offset); - while (space_remaining && (page < SFP_NUM_PAGES)) { - MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page); + rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO, + inbuf, sizeof(inbuf), + outbuf, sizeof(outbuf), + &outlen); - rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_MEDIA_INFO, - inbuf, sizeof(inbuf), - outbuf, sizeof(outbuf), - &outlen); - if (rc) - return rc; + if (rc) + return rc; + + if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + + SFP_PAGE_SIZE)) + return -EIO; + + payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN); + if (payload_len != SFP_PAGE_SIZE) + return -EIO; - if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST + - SFP_PAGE_SIZE)) - return -EIO; + memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, + to_copy); - payload_len = MCDI_DWORD(outbuf, - GET_PHY_MEDIA_INFO_OUT_DATALEN); - if (payload_len != SFP_PAGE_SIZE) - return -EIO; + return to_copy; +} - /* Copy as much as we can into data */ - payload_len -= page_off; - to_copy = (space_remaining < payload_len) ? - space_remaining : payload_len; +static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx, + unsigned int page, + u8 byte) +{ + int rc; + u8 data; - memcpy(user_data, - MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off, - to_copy); + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1); + if (rc == 1) + return data; + + return rc; +} + +static int efx_mcdi_phy_diag_type(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the diagnostic type at byte 92. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DIAG_TYPE_OFFSET); +} - space_remaining -= to_copy; - user_data += to_copy; - page_off = 0; - page++; +static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx) +{ + /* Page zero of the EEPROM includes the DMT level at byte 94. */ + return efx_mcdi_phy_get_module_eeprom_byte(efx, 0, + SFF_DMT_LEVEL_OFFSET); +} + +static u32 efx_mcdi_phy_module_type(struct efx_nic *efx) +{ + struct efx_mcdi_phy_data *phy_data = efx->phy_data; + + if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS) + return phy_data->media; + + /* A QSFP+ NIC may actually have an SFP+ module attached. + * The ID is page 0, byte 0. + */ + switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) { + case 0x3: + return MC_CMD_MEDIA_SFP_PLUS; + case 0xc: + case 0xd: + return MC_CMD_MEDIA_QSFP_PLUS; + default: + return 0; + } +} + +static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, + struct ethtool_eeprom *ee, u8 *data) +{ + int rc; + ssize_t space_remaining = ee->len; + unsigned int page_off; + bool ignore_missing; + int num_pages; + int page; + + switch (efx_mcdi_phy_module_type(efx)) { + case MC_CMD_MEDIA_SFP_PLUS: + num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ? + SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES; + page = 0; + ignore_missing = false; + break; + case MC_CMD_MEDIA_QSFP_PLUS: + num_pages = SFF_8436_NUM_PAGES; + page = -1; /* We obtain the lower page by asking for -1. */ + ignore_missing = true; /* Ignore missing pages after page 0. */ + break; + default: + return -EOPNOTSUPP; + } + + page_off = ee->offset % SFP_PAGE_SIZE; + page += ee->offset / SFP_PAGE_SIZE; + + while (space_remaining && (page < num_pages)) { + rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, + data, page_off, + space_remaining); + + if (rc > 0) { + space_remaining -= rc; + data += rc; + page_off = 0; + page++; + } else if (rc == 0) { + space_remaining = 0; + } else if (ignore_missing && (page > 0)) { + int intended_size = SFP_PAGE_SIZE - page_off; + + space_remaining -= intended_size; + if (space_remaining < 0) { + space_remaining = 0; + } else { + memset(data, 0, intended_size); + data += intended_size; + page_off = 0; + page++; + rc = 0; + } + } else { + return rc; + } } return 0; @@ -807,16 +919,42 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, static int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo) { - struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; + int sff_8472_level; + int diag_type; - switch (phy_cfg->media) { + switch (efx_mcdi_phy_module_type(efx)) { case MC_CMD_MEDIA_SFP_PLUS: - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - return 0; + sff_8472_level = efx_mcdi_phy_sff_8472_level(efx); + + /* If we can't read the diagnostics level we have none. */ + if (sff_8472_level < 0) + return -EOPNOTSUPP; + + /* Check if this module requires the (unsupported) address + * change operation. + */ + diag_type = efx_mcdi_phy_diag_type(efx); + + if ((sff_8472_level == 0) || + (diag_type & SFF_DIAG_ADDR_CHANGE)) { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } + break; + + case MC_CMD_MEDIA_QSFP_PLUS: + modinfo->type = ETH_MODULE_SFF_8436; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + break; + default: return -EOPNOTSUPP; } + + return 0; } static const struct efx_phy_operations efx_mcdi_phy_ops = { -- cgit v1.2.3-55-g7522 From eeef1713cacdb3cf657c9830e5a4c61e7f35c60a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 18 Jul 2017 18:49:26 +0300 Subject: ISDN: eicon: switch to use native bitmaps Two arrays are clearly bit maps, so, make that explicit by converting to bitmap API and remove custom helpers. Note sig_ind() uses out of boundary bit to (looks like) protect against potential bitmap_empty() checks for the same bitmap. This patch removes that since: 1) that didn't guarantee atomicity anyway; 2) the first operation inside the for-loop is set bit in the bitmap (which effectively makes it non-empty); 3) group_optimization() doesn't utilize possible emptiness of the bitmap in question. Thus, if there is a protection needed it should be implemented properly. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/isdn/hardware/eicon/divacapi.h | 16 +-- drivers/isdn/hardware/eicon/message.c | 247 ++++++++------------------------- 2 files changed, 58 insertions(+), 205 deletions(-) diff --git a/drivers/isdn/hardware/eicon/divacapi.h b/drivers/isdn/hardware/eicon/divacapi.h index a315a2914d70..c4868a0d82f4 100644 --- a/drivers/isdn/hardware/eicon/divacapi.h +++ b/drivers/isdn/hardware/eicon/divacapi.h @@ -26,15 +26,7 @@ /*#define DEBUG */ - - - - - - - - - +#include #define IMPLEMENT_DTMF 1 #define IMPLEMENT_LINE_INTERCONNECT2 1 @@ -82,8 +74,6 @@ #define CODEC_PERMANENT 0x02 #define ADV_VOICE 0x03 #define MAX_CIP_TYPES 5 /* kind of CIP types for group optimization */ -#define C_IND_MASK_DWORDS ((MAX_APPL + 32) >> 5) - #define FAX_CONNECT_INFO_BUFFER_SIZE 256 #define NCPI_BUFFER_SIZE 256 @@ -265,8 +255,8 @@ struct _PLCI { word ncci_ring_list; byte inc_dis_ncci_table[MAX_CHANNELS_PER_PLCI]; t_std_internal_command internal_command_queue[MAX_INTERNAL_COMMAND_LEVELS]; - dword c_ind_mask_table[C_IND_MASK_DWORDS]; - dword group_optimization_mask_table[C_IND_MASK_DWORDS]; + DECLARE_BITMAP(c_ind_mask_table, MAX_APPL); + DECLARE_BITMAP(group_optimization_mask_table, MAX_APPL); byte RBuffer[200]; dword msg_in_queue[MSG_IN_QUEUE_SIZE/sizeof(dword)]; API_SAVE saved_msg; diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index 3b11422b1cce..eadd1ed1e014 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -23,9 +23,7 @@ * */ - - - +#include #include "platform.h" #include "di_defs.h" @@ -35,19 +33,9 @@ #include "mdm_msg.h" #include "divasync.h" - - #define FILE_ "MESSAGE.C" #define dprintf - - - - - - - - /*------------------------------------------------------------------*/ /* This is options supported for all adapters that are server by */ /* XDI driver. Allo it is not necessary to ask it from every adapter*/ @@ -72,9 +60,6 @@ static dword diva_xdi_extended_features = 0; /*------------------------------------------------------------------*/ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci); -static void set_group_ind_mask(PLCI *plci); -static void clear_group_ind_mask_bit(PLCI *plci, word b); -static byte test_group_ind_mask_bit(PLCI *plci, word b); void AutomaticLaw(DIVA_CAPI_ADAPTER *); word CapiRelease(word); word CapiRegister(word); @@ -1086,106 +1071,6 @@ static void plci_remove(PLCI *plci) plci->State = OUTG_DIS_PENDING; } -/*------------------------------------------------------------------*/ -/* Application Group function helpers */ -/*------------------------------------------------------------------*/ - -static void set_group_ind_mask(PLCI *plci) -{ - word i; - - for (i = 0; i < C_IND_MASK_DWORDS; i++) - plci->group_optimization_mask_table[i] = 0xffffffffL; -} - -static void clear_group_ind_mask_bit(PLCI *plci, word b) -{ - plci->group_optimization_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); -} - -static byte test_group_ind_mask_bit(PLCI *plci, word b) -{ - return ((plci->group_optimization_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); -} - -/*------------------------------------------------------------------*/ -/* c_ind_mask operations for arbitrary MAX_APPL */ -/*------------------------------------------------------------------*/ - -static void clear_c_ind_mask(PLCI *plci) -{ - word i; - - for (i = 0; i < C_IND_MASK_DWORDS; i++) - plci->c_ind_mask_table[i] = 0; -} - -static byte c_ind_mask_empty(PLCI *plci) -{ - word i; - - i = 0; - while ((i < C_IND_MASK_DWORDS) && (plci->c_ind_mask_table[i] == 0)) - i++; - return (i == C_IND_MASK_DWORDS); -} - -static void set_c_ind_mask_bit(PLCI *plci, word b) -{ - plci->c_ind_mask_table[b >> 5] |= (1L << (b & 0x1f)); -} - -static void clear_c_ind_mask_bit(PLCI *plci, word b) -{ - plci->c_ind_mask_table[b >> 5] &= ~(1L << (b & 0x1f)); -} - -static byte test_c_ind_mask_bit(PLCI *plci, word b) -{ - return ((plci->c_ind_mask_table[b >> 5] & (1L << (b & 0x1f))) != 0); -} - -static void dump_c_ind_mask(PLCI *plci) -{ - word i, j, k; - dword d; - char *p; - char buf[40]; - - for (i = 0; i < C_IND_MASK_DWORDS; i += 4) - { - p = buf + 36; - *p = '\0'; - for (j = 0; j < 4; j++) - { - if (i + j < C_IND_MASK_DWORDS) - { - d = plci->c_ind_mask_table[i + j]; - for (k = 0; k < 8; k++) - { - *(--p) = hex_asc_lo(d); - d >>= 4; - } - } - else if (i != 0) - { - for (k = 0; k < 8; k++) - *(--p) = ' '; - } - *(--p) = ' '; - } - dbug(1, dprintf("c_ind_mask =%s", (char *) p)); - } -} - - - - - -#define dump_plcis(a) - - - /*------------------------------------------------------------------*/ /* translation function for each message */ /*------------------------------------------------------------------*/ @@ -1457,13 +1342,13 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, return 1; } else if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); - dump_c_ind_mask(plci); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); Reject = GET_WORD(parms[0].info); dbug(1, dprintf("Reject=0x%x", Reject)); if (Reject) { - if (c_ind_mask_empty(plci)) + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if ((Reject & 0xff00) == 0x3400) { @@ -1553,11 +1438,8 @@ static byte connect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, sig_req(plci, CALL_RES, 0); } - for (i = 0; i < max_appl; i++) { - if (test_c_ind_mask_bit(plci, i)) { - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); - } - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } } return 1; @@ -1584,13 +1466,10 @@ static byte disconnect_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, { if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); plci->appl = appl; - for (i = 0; i < max_appl; i++) - { - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); plci->State = OUTG_DIS_PENDING; } if (plci->Sig.Id && plci->appl) @@ -1634,7 +1513,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, { /* clear ind mask bit, just in case of collsion of */ /* DISCONNECT_IND and CONNECT_RES */ - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); ncci_free_receive_buffers(plci, 0); if (plci_remove_check(plci)) { @@ -1642,7 +1521,7 @@ static byte disconnect_res(dword Id, word Number, DIVA_CAPI_ADAPTER *a, } if (plci->State == INC_DIS_PENDING || plci->State == SUSPENDING) { - if (c_ind_mask_empty(plci)) { + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (plci->State != SUSPENDING) plci->State = IDLE; dbug(1, dprintf("chs=%d", plci->channels)); if (!plci->channels) { @@ -3351,13 +3230,11 @@ static byte select_b_req(dword Id, word Number, DIVA_CAPI_ADAPTER *a, } plci->State = INC_CON_CONNECTED_ALERT; plci->appl = appl; - clear_c_ind_mask_bit(plci, (word)(appl->Id - 1)); - dump_c_ind_mask(plci); - for (i = 0; i < max_appl; i++) /* disconnect the other appls */ - { /* its quasi a connect */ - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); - } + __clear_bit(appl->Id - 1, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); + /* disconnect the other appls its quasi a connect */ + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", _OTHER_APPL_CONNECTED); } api_save_msg(msg, "s", &plci->saved_msg); @@ -5692,19 +5569,17 @@ static void sig_ind(PLCI *plci) cip = find_cip(a, parms[4], parms[6]); cip_mask = 1L << cip; dbug(1, dprintf("cip=%d,cip_mask=%lx", cip, cip_mask)); - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); if (!remove_started && !a->adapter_disabled) { - set_c_ind_mask_bit(plci, MAX_APPL); group_optimization(a, plci); - for (i = 0; i < max_appl; i++) { + for_each_set_bit(i, plci->group_optimization_mask_table, max_appl) { if (application[i].Id && (a->CIP_Mask[i] & 1 || a->CIP_Mask[i] & cip_mask) - && CPN_filter_ok(parms[0], a, i) - && test_group_ind_mask_bit(plci, i)) { + && CPN_filter_ok(parms[0], a, i)) { dbug(1, dprintf("storedcip_mask[%d]=0x%lx", i, a->CIP_Mask[i])); - set_c_ind_mask_bit(plci, i); - dump_c_ind_mask(plci); + __set_bit(i, plci->c_ind_mask_table); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); plci->State = INC_CON_PENDING; plci->call_dir = (plci->call_dir & ~(CALL_DIR_OUT | CALL_DIR_ORIGINATE)) | CALL_DIR_IN | CALL_DIR_ANSWER; @@ -5750,10 +5625,9 @@ static void sig_ind(PLCI *plci) SendMultiIE(plci, Id, multi_pi_parms, PI, 0x210, true)); } } - clear_c_ind_mask_bit(plci, MAX_APPL); - dump_c_ind_mask(plci); + dbug(1, dprintf("c_ind_mask =%*pb", MAX_APPL, plci->c_ind_mask_table)); } - if (c_ind_mask_empty(plci)) { + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { sig_req(plci, HANGUP, 0); send_req(plci); plci->State = IDLE; @@ -5994,13 +5868,13 @@ static void sig_ind(PLCI *plci) break; case RESUME: - clear_c_ind_mask_bit(plci, (word)(plci->appl->Id - 1)); + __clear_bit(plci->appl->Id - 1, plci->c_ind_mask_table); PUT_WORD(&resume_cau[4], GOOD); sendf(plci->appl, _FACILITY_I, Id, 0, "ws", (word)3, resume_cau); break; case SUSPEND: - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); if (plci->NL.Id && !plci->nl_remove_id) { mixer_remove(plci); @@ -6037,15 +5911,12 @@ static void sig_ind(PLCI *plci) if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - for (i = 0; i < max_appl; i++) - { - if (test_c_ind_mask_bit(plci, i)) - sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); - } + for_each_set_bit(i, plci->c_ind_mask_table, max_appl) + sendf(&application[i], _DISCONNECT_I, Id, 0, "w", 0); } else { - clear_c_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); } if (!plci->appl) { @@ -6055,7 +5926,7 @@ static void sig_ind(PLCI *plci) a->listen_active--; } plci->State = INC_DIS_PENDING; - if (c_ind_mask_empty(plci)) + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { plci->State = IDLE; if (plci->NL.Id && !plci->nl_remove_id) @@ -6341,14 +6212,10 @@ static void SendInfo(PLCI *plci, dword Id, byte **parms, byte iesent) || Info_Number == DSP || Info_Number == UUI) { - for (j = 0; j < max_appl; j++) - { - if (test_c_ind_mask_bit(plci, j)) - { - dbug(1, dprintf("Ovl_Ind")); - iesent = true; - sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element); - } + for_each_set_bit(j, plci->c_ind_mask_table, max_appl) { + dbug(1, dprintf("Ovl_Ind")); + iesent = true; + sendf(&application[j], _INFO_I, Id, 0, "wS", Info_Number, Info_Element); } } } /* all other signalling states */ @@ -6416,14 +6283,10 @@ static byte SendMultiIE(PLCI *plci, dword Id, byte **parms, byte ie_type, } else if (!plci->appl && Info_Number) { /* overlap receiving broadcast */ - for (j = 0; j < max_appl; j++) - { - if (test_c_ind_mask_bit(plci, j)) - { - iesent = true; - dbug(1, dprintf("Mlt_Ovl_Ind")); - sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element); - } + for_each_set_bit(j, plci->c_ind_mask_table, max_appl) { + iesent = true; + dbug(1, dprintf("Mlt_Ovl_Ind")); + sendf(&application[j] , _INFO_I, Id, 0, "wS", Info_Number, Info_Element); } } /* all other signalling states */ else if (Info_Number @@ -7270,7 +7133,6 @@ static word get_plci(DIVA_CAPI_ADAPTER *a) word i, j; PLCI *plci; - dump_plcis(a); for (i = 0; i < a->max_plci && a->plci[i].Id; i++); if (i == a->max_plci) { dbug(1, dprintf("get_plci: out of PLCIs")); @@ -7321,8 +7183,8 @@ static word get_plci(DIVA_CAPI_ADAPTER *a) plci->ncci_ring_list = 0; for (j = 0; j < MAX_CHANNELS_PER_PLCI; j++) plci->inc_dis_ncci_table[j] = 0; - clear_c_ind_mask(plci); - set_group_ind_mask(plci); + bitmap_zero(plci->c_ind_mask_table, MAX_APPL); + bitmap_fill(plci->group_optimization_mask_table, MAX_APPL); plci->fax_connect_info_length = 0; plci->nsf_control_bits = 0; plci->ncpi_state = 0x00; @@ -9373,10 +9235,10 @@ word CapiRelease(word Id) if (plci->State == INC_CON_PENDING || plci->State == INC_CON_ALERT) { - if (test_c_ind_mask_bit(plci, (word)(Id - 1))) + if (test_bit(Id - 1, plci->c_ind_mask_table)) { - clear_c_ind_mask_bit(plci, (word)(Id - 1)); - if (c_ind_mask_empty(plci)) + __clear_bit(Id - 1, plci->c_ind_mask_table); + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { sig_req(plci, HANGUP, 0); send_req(plci); @@ -9384,10 +9246,10 @@ word CapiRelease(word Id) } } } - if (test_c_ind_mask_bit(plci, (word)(Id - 1))) + if (test_bit(Id - 1, plci->c_ind_mask_table)) { - clear_c_ind_mask_bit(plci, (word)(Id - 1)); - if (c_ind_mask_empty(plci)) + __clear_bit(Id - 1, plci->c_ind_mask_table); + if (bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (!plci->appl) { @@ -9452,7 +9314,7 @@ word CapiRelease(word Id) static word plci_remove_check(PLCI *plci) { if (!plci) return true; - if (!plci->NL.Id && c_ind_mask_empty(plci)) + if (!plci->NL.Id && bitmap_empty(plci->c_ind_mask_table, MAX_APPL)) { if (plci->Sig.Id == 0xff) plci->Sig.Id = 0; @@ -14735,7 +14597,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) word appl_number_group_type[MAX_APPL]; PLCI *auxplci; - set_group_ind_mask(plci); /* all APPLs within this inc. call are allowed to dial in */ + /* all APPLs within this inc. call are allowed to dial in */ + bitmap_fill(plci->group_optimization_mask_table, MAX_APPL); if (!a->group_optimization_enabled) { @@ -14771,13 +14634,12 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) if (a->plci[k].Id) { auxplci = &a->plci[k]; - if (auxplci->appl == &application[i]) /* application has a busy PLCI */ - { + if (auxplci->appl == &application[i]) { + /* application has a busy PLCI */ busy = true; dbug(1, dprintf("Appl 0x%x is busy", i + 1)); - } - else if (test_c_ind_mask_bit(auxplci, i)) /* application has an incoming call pending */ - { + } else if (test_bit(i, plci->c_ind_mask_table)) { + /* application has an incoming call pending */ busy = true; dbug(1, dprintf("Appl 0x%x has inc. call pending", i + 1)); } @@ -14826,7 +14688,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) if (appl_number_group_type[i] == appl_number_group_type[j]) { dbug(1, dprintf("Appl 0x%x is member of group 0x%x, no call", j + 1, appl_number_group_type[j])); - clear_group_ind_mask_bit(plci, j); /* disable call on other group members */ + /* disable call on other group members */ + __clear_bit(j, plci->group_optimization_mask_table); appl_number_group_type[j] = 0; /* remove disabled group member from group list */ } } @@ -14834,7 +14697,7 @@ static void group_optimization(DIVA_CAPI_ADAPTER *a, PLCI *plci) } else /* application should not get a call */ { - clear_group_ind_mask_bit(plci, i); + __clear_bit(i, plci->group_optimization_mask_table); } } -- cgit v1.2.3-55-g7522 From 8410095730c0874eb265bbcc87e9ec0d0ff8c183 Mon Sep 17 00:00:00 2001 From: Rosen, Rami Date: Tue, 18 Jul 2017 22:23:30 +0300 Subject: net/packet: remove unused PGV_FROM_VMALLOC definition. This patch removes the definition of PGV_FROM_VMALLOC from af_packet.c. The PGV_FROM_VMALLOC definition was already removed by commit 441c793a5650 ("net: cleanup unused macros in net directory"), and its usage was removed even before by commit c56b4d90123b ("af_packet: remove pgv.flags"); but it was added back by mistake later on, in commit f6fb8f100b80 ("af-packet: TPACKET_V3 flexible buffer implementation"). Signed-off-by: Rami Rosen Signed-off-by: David S. Miller --- net/packet/af_packet.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e3beb28203eb..ee035cbe5621 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -177,8 +177,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, #define BLK_PLUS_PRIV(sz_of_priv) \ (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT)) -#define PGV_FROM_VMALLOC 1 - #define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status) #define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts) #define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt) -- cgit v1.2.3-55-g7522 From e7d53ad3239de636ea478fc003d3652b49b8e593 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 18 Jul 2017 16:23:56 -0400 Subject: net: dsa: unexport dsa_is_port_initialized The dsa_is_port_initialized helper is only used by dsa_switch_resume and dsa_switch_suspend, if CONFIG_PM_SLEEP is enabled. Make it static to dsa.c. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- include/net/dsa.h | 5 ----- net/dsa/dsa.c | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index 58969b9a090c..88da272d20d0 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -256,11 +256,6 @@ static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p) return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p); } -static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p) -{ - return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev; -} - static inline u8 dsa_upstream_port(struct dsa_switch *ds) { struct dsa_switch_tree *dst = ds->dst; diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 416ac4ef9ba9..a55e2e4087a4 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -220,6 +220,11 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, } #ifdef CONFIG_PM_SLEEP +static bool dsa_is_port_initialized(struct dsa_switch *ds, int p) +{ + return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev; +} + int dsa_switch_suspend(struct dsa_switch *ds) { int i, ret = 0; -- cgit v1.2.3-55-g7522 From 06548fbb609d54fd8c03dd4cdad1059ddb44378c Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Tue, 18 Jul 2017 15:37:11 -0500 Subject: wireless: airo: remove unnecessary static in writerids() Remove unnecessary static on local function pointer _writer_. Such pointer is initialized before being used, on every execution path throughout the function. The static has no benefit and, removing it reduces the object file size. This issue was detected using Coccinelle and the following semantic patch: @bad exists@ position p; identifier x; type T; @@ static T x@p; ... x = <+...x...+> @@ identifier x; expression e; type T; position p != bad.p; @@ -static T x@p; ... when != x when strict ?x = e; In the following log you can see a significant difference in the object file size. This log is the output of the size command, before and after the code change: before: text data bss dec hex filename 113797 19152 1216 134165 20c15 drivers/net/wireless/cisco/airo.o after: text data bss dec hex filename 113881 19096 1152 134129 20bf1 drivers/net/wireless/cisco/airo.o Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/wireless/cisco/airo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 84143a02adce..54201c02fdb8 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -7837,7 +7837,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) { struct airo_info *ai = dev->ml_priv; int ridcode; int enabled; - static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); + int (*writer)(struct airo_info *, u16 rid, const void *, int, int); unsigned char *iobuf; /* Only super-user can write RIDs */ -- cgit v1.2.3-55-g7522 From f55dda4bded9d2042d296b9215bf6453f7411a0d Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Tue, 18 Jul 2017 15:41:06 -0500 Subject: rtlwifi: remove useless code Remove useless local variables last_read_point and last_txw_point and the code related. Signed-off-by: Gustavo A. R. Silva Acked-by: Larry Finger Signed-off-by: David S. Miller --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index 55f238a2a310..c58393eab6a1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -478,7 +478,6 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) struct rtl_priv *rtlpriv = rtl_priv(hw); u16 read_point = 0, write_point = 0, remind_cnt = 0; u32 tmp_4byte = 0; - static u16 last_read_point; static bool start_rx; tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX); @@ -506,7 +505,6 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index) rtlpci->rx_ring[queue_index].next_rx_rp = write_point; - last_read_point = read_point; return remind_cnt; } @@ -917,7 +915,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, struct rtl_priv *rtlpriv = rtl_priv(hw); u16 cur_tx_rp = 0; u16 cur_tx_wp = 0; - static u16 last_txw_point; static bool over_run; u32 tmp = 0; u8 q_idx = *val; @@ -951,9 +948,6 @@ void rtl92ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, rtl_write_word(rtlpriv, get_desc_addr_fr_q_idx(q_idx), ring->cur_tx_wp); - - if (q_idx == 1) - last_txw_point = cur_tx_wp; } if (ring->avl_desc < (max_tx_desc - 15)) { -- cgit v1.2.3-55-g7522 From 648e8b86d0ae01bd5a61df2d776b70ed8b96e2b8 Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Tue, 18 Jul 2017 15:43:33 -0500 Subject: net: tulip: remove useless code in tulip_init_one() Remove useless local variable multiport_cnt and the code related. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/tulip_core.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index 17e566a8b345..84394b43c0a1 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1303,7 +1303,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 0x00, 'L', 'i', 'n', 'u', 'x' }; static int last_irq; - static int multiport_cnt; /* For four-port boards w/one EEPROM */ int i, irq; unsigned short sum; unsigned char *ee_data; @@ -1557,7 +1556,6 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && ee_data[2] == 0) { sa_offset = 2; /* Grrr, damn Matrox boards. */ - multiport_cnt = 4; } #ifdef CONFIG_MIPS_COBALT if ((pdev->bus->number == 0) && -- cgit v1.2.3-55-g7522 From 048578a1954f6c473ba12c9b4519e8dce4a4549b Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Tue, 18 Jul 2017 15:45:29 -0500 Subject: qlcnic: remove unnecessary static in qlcnic_dump_fw() Remove unnecessary static on local variable fw_dump_ops. Such variable is initialized before being used, on every execution path throughout the function. The static has no benefit and, removing it reduces the object file size. This issue was detected using Coccinelle and the following semantic patch: @bad exists@ position p; identifier x; type T; @@ static T x@p; ... x = <+...x...+> @@ identifier x; expression e; type T; position p != bad.p; @@ -static T x@p; ... when != x when strict ?x = e; In the following log you can see a difference in the object file size. This log is the output of the size command, before and after the code change: before: text data bss dec hex filename 19032 2136 64 21232 52f0 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.o after: text data bss dec hex filename 19020 2048 0 21068 524c drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.o Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index 0844b7c75767..afa10a163da1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -1285,7 +1285,7 @@ flash_temp: int qlcnic_dump_fw(struct qlcnic_adapter *adapter) { struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; - static const struct qlcnic_dump_operations *fw_dump_ops; + const struct qlcnic_dump_operations *fw_dump_ops; struct qlcnic_83xx_dump_template_hdr *hdr_83xx; u32 entry_offset, dump, no_entries, buf_offset = 0; int i, k, ops_cnt, ops_index, dump_size = 0; -- cgit v1.2.3-55-g7522 From f03b06f3bae893d0f4fbe7c41adc1d45016bb974 Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Tue, 18 Jul 2017 15:48:06 -0500 Subject: net: ethernet: mediatek: remove useless code in mtk_poll_tx() Remove useless local variable _condition_ and the code related. Signed-off-by: Gustavo A. R. Silva Acked-by: Sean Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b3d0c2e6347a..7e95cf547ff1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1027,7 +1027,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) unsigned int done[MTK_MAX_DEVS]; unsigned int bytes[MTK_MAX_DEVS]; u32 cpu, dma; - static int condition; int total = 0, i; memset(done, 0, sizeof(done)); @@ -1051,10 +1050,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) mac = 1; skb = tx_buf->skb; - if (!skb) { - condition = 1; + if (!skb) break; - } if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { bytes[mac] += skb->len; -- cgit v1.2.3-55-g7522 From 93fe07e52ea1218a78f3886d23479958e14a0eea Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Tue, 18 Jul 2017 15:50:15 -0500 Subject: liquidio: lio_vf_main: remove unnecessary static in setup_io_queues() Remove unnecessary static on local variables cpu_id_modulus and cpu_id. Such variables are initialized before being used, on every execution path throughout the function. The static has no benefit and, removing it reduces the object file size. This issue was detected using Coccinelle and the following semantic patch: @bad exists@ position p; identifier x; type T; @@ static T x@p; ... x = <+...x...+> @@ identifier x; expression e; type T; position p != bad.p; @@ -static T x@p; ... when != x when strict ?x = e; In the following log you can see a significant difference in the object file size. Also, there is a significant difference in the bss segment. This log is the output of the size command, before and after the code change: before: text data bss dec hex filename 55656 10680 576 66912 10560 drivers/net/ethernet/cavium/liquidio/lio_vf_main.o after: text data bss dec hex filename 55796 10536 448 66780 104dc drivers/net/ethernet/cavium/liquidio/lio_vf_main.o Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 9b247102eb92..935ff299cdd9 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1663,10 +1663,10 @@ static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) { struct octeon_droq_ops droq_ops; struct net_device *netdev; - static int cpu_id_modulus; + int cpu_id_modulus; struct octeon_droq *droq; struct napi_struct *napi; - static int cpu_id; + int cpu_id; int num_tx_descs; struct lio *lio; int retval = 0; -- cgit v1.2.3-55-g7522 From eb2b98742641ae544fd16311055789ab7cac62aa Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Tue, 18 Jul 2017 15:53:48 -0500 Subject: liquidio: lio_main: remove unnecessary static in setup_io_queues() Remove unnecessary static on local variables cpu_id_modulus and cpu_id. Such variables are initialized before being used, on every execution path throughout the function. The static has no benefit and, removing it reduces the object file size. This issue was detected using Coccinelle and the following semantic patch: @bad exists@ position p; identifier x; type T; @@ static T x@p; ... x = <+...x...+> @@ identifier x; expression e; type T; position p != bad.p; @@ -static T x@p; ... when != x when strict ?x = e; In the following log you can see a significant difference in the object file size. Also, there is a significant difference in the bss segment. This log is the output of the size command, before and after the code change: before: text data bss dec hex filename 78689 15272 27808 121769 1dba9 drivers/net/ethernet/cavium/liquidio/lio_main.o after: text data bss dec hex filename 78667 15128 27680 121475 1da83 drivers/net/ethernet/cavium/liquidio/lio_main.o Signed-off-by: Gustavo A. R. Silva Acked-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 51583ae4b1eb..1d8fefa9ce64 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2544,8 +2544,8 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev, { struct octeon_droq_ops droq_ops; struct net_device *netdev; - static int cpu_id; - static int cpu_id_modulus; + int cpu_id; + int cpu_id_modulus; struct octeon_droq *droq; struct napi_struct *napi; int q, q_no, retval = 0; -- cgit v1.2.3-55-g7522 From ddee3103eeafd1d9dbe831274c3813d5cac30bc3 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 18 Jul 2017 15:59:25 -0700 Subject: bluetooth: 6lowpan dev_close never returns error The function dev_close in current kernel will never return an error. Later changes will make it void. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/bluetooth/6lowpan.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index ab3b654b05cc..2af4f1cc0ab4 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -618,12 +618,8 @@ static void ifup(struct net_device *netdev) static void ifdown(struct net_device *netdev) { - int err; - rtnl_lock(); - err = dev_close(netdev); - if (err < 0) - BT_INFO("iface %s cannot be closed (%d)", netdev->name, err); + dev_close(netdev); rtnl_unlock(); } -- cgit v1.2.3-55-g7522 From 4a614dd3e550b66a2e57119e2d5e0fbc7f17f634 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 18 Jul 2017 15:59:26 -0700 Subject: hns: remove useless void cast There is no need to cast away return value of dev_close. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a8db27e86a11..78cb20c67aa6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -595,7 +595,7 @@ static void hns_nic_self_test(struct net_device *ndev, set_bit(NIC_STATE_TESTING, &priv->state); if (if_running) - (void)dev_close(ndev); + dev_close(ndev); for (i = 0; i < SELF_TEST_TPYE_NUM; i++) { if (!st_param[i][1]) -- cgit v1.2.3-55-g7522 From 7051b88a35c7dde5705923833117e14f9cc17d92 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 18 Jul 2017 15:59:27 -0700 Subject: net: make dev_close and related functions void There is no useful return value from dev_close. All paths return 0. Change dev_close and helper functions to void. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/netdevice.h | 4 ++-- net/core/dev.c | 26 +++++++++++--------------- 2 files changed, 13 insertions(+), 17 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c60351b84323..614642eb7eb7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2432,8 +2432,8 @@ struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); struct net_device *__dev_get_by_name(struct net *net, const char *name); int dev_alloc_name(struct net_device *dev, const char *name); int dev_open(struct net_device *dev); -int dev_close(struct net_device *dev); -int dev_close_many(struct list_head *head, bool unlink); +void dev_close(struct net_device *dev); +void dev_close_many(struct list_head *head, bool unlink); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); int dev_queue_xmit(struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index 467420eda02e..d1b9c9b6c970 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1413,7 +1413,7 @@ int dev_open(struct net_device *dev) } EXPORT_SYMBOL(dev_open); -static int __dev_close_many(struct list_head *head) +static void __dev_close_many(struct list_head *head) { struct net_device *dev; @@ -1455,23 +1455,18 @@ static int __dev_close_many(struct list_head *head) dev->flags &= ~IFF_UP; netpoll_poll_enable(dev); } - - return 0; } -static int __dev_close(struct net_device *dev) +static void __dev_close(struct net_device *dev) { - int retval; LIST_HEAD(single); list_add(&dev->close_list, &single); - retval = __dev_close_many(&single); + __dev_close_many(&single); list_del(&single); - - return retval; } -int dev_close_many(struct list_head *head, bool unlink) +void dev_close_many(struct list_head *head, bool unlink) { struct net_device *dev, *tmp; @@ -1488,8 +1483,6 @@ int dev_close_many(struct list_head *head, bool unlink) if (unlink) list_del_init(&dev->close_list); } - - return 0; } EXPORT_SYMBOL(dev_close_many); @@ -1502,7 +1495,7 @@ EXPORT_SYMBOL(dev_close_many); * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier * chain. */ -int dev_close(struct net_device *dev) +void dev_close(struct net_device *dev) { if (dev->flags & IFF_UP) { LIST_HEAD(single); @@ -1511,7 +1504,6 @@ int dev_close(struct net_device *dev) dev_close_many(&single, true); list_del(&single); } - return 0; } EXPORT_SYMBOL(dev_close); @@ -6725,8 +6717,12 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags) */ ret = 0; - if ((old_flags ^ flags) & IFF_UP) - ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev); + if ((old_flags ^ flags) & IFF_UP) { + if (old_flags & IFF_UP) + __dev_close(dev); + else + ret = __dev_open(dev); + } if ((flags ^ dev->gflags) & IFF_PROMISC) { int inc = (flags & IFF_PROMISC) ? 1 : -1; -- cgit v1.2.3-55-g7522 From 1b01994aa6f9bad60d2b162c17dca530c21e2687 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:12 -0700 Subject: netvsc: force link update after MTU change If two MTU changes are in less than update interval (2 seconds), then the netvsc network device may get stuck with no carrier. The netvsc driver debounces link status events which is fine for unsolicited updates, but blocks getting the update after down/up from MTU reinitialization. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 63c98bbbc596..09b07ca9e69a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -783,6 +783,7 @@ static int netvsc_set_channels(struct net_device *net, ret = netvsc_open(net); /* We may have missed link change notifications */ + net_device_ctx->last_reconfig = 0; schedule_delayed_work(&net_device_ctx->dwork, 0); return ret; -- cgit v1.2.3-55-g7522 From 79e8cbe7a789a0863cc6cd874872b4dd63ec5947 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:13 -0700 Subject: netvsc: add some rtnl_dereference annotations In a couple places RTNL is held, and the netvsc_device pointer is acquired without annotation. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 5 +++-- drivers/net/hyperv/netvsc_drv.c | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0a9167dd72fb..e202ec5d6f63 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -41,7 +41,7 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct hv_device *dev = net_device_ctx->device_ctx; - struct netvsc_device *nv_dev = net_device_ctx->nvdev; + struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; memset(init_pkt, 0, sizeof(struct nvsp_message)); @@ -549,7 +549,8 @@ void netvsc_device_remove(struct hv_device *device) { struct net_device *ndev = hv_get_drvdata(device); struct net_device_context *net_device_ctx = netdev_priv(ndev); - struct netvsc_device *net_device = net_device_ctx->nvdev; + struct netvsc_device *net_device + = rtnl_dereference(net_device_ctx->nvdev); int i; netvsc_disconnect_vsp(device); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 09b07ca9e69a..e8e82a6a4b1a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -69,7 +69,7 @@ static void netvsc_set_multicast_list(struct net_device *net) static int netvsc_open(struct net_device *net) { struct net_device_context *ndev_ctx = netdev_priv(net); - struct netvsc_device *nvdev = ndev_ctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); struct rndis_device *rdev; int ret = 0; @@ -1364,7 +1364,7 @@ static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) continue; /* not a netvsc device */ net_device_ctx = netdev_priv(dev); - if (net_device_ctx->nvdev == NULL) + if (!rtnl_dereference(net_device_ctx->nvdev)) continue; /* device is removed */ if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) @@ -1589,7 +1589,8 @@ static int netvsc_remove(struct hv_device *dev) * removed. Also blocks mtu and channel changes. */ rtnl_lock(); - rndis_filter_device_remove(dev, ndev_ctx->nvdev); + rndis_filter_device_remove(dev, + rtnl_dereference(ndev_ctx->nvdev)); rtnl_unlock(); unregister_netdev(net); -- cgit v1.2.3-55-g7522 From a5e1ec3833211bc0136649a89ee3a29355c7b43b Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:14 -0700 Subject: netvsc: change order of steps in setting queues This fixes the error unwind logic for incorrect number of queues. If netif_set_real_num_XX_queues failed then rndis_filter_device_add would have been called twice. Since input arguments are already ranged checked this is a hypothetical only problem, not possible in actual code. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e8e82a6a4b1a..91637336d1fb 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -724,17 +724,15 @@ static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, device_info.ring_size = ring_size; device_info.max_num_vrss_chns = num_chn; - ret = rndis_filter_device_add(dev, &device_info); - if (ret) - return ret; - ret = netif_set_real_num_tx_queues(net, num_chn); if (ret) return ret; ret = netif_set_real_num_rx_queues(net, num_chn); + if (ret) + return ret; - return ret; + return rndis_filter_device_add(dev, &device_info); } static int netvsc_set_channels(struct net_device *net, -- cgit v1.2.3-55-g7522 From ea383bf146be1e190f1d696e7db060afa8c93c31 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:15 -0700 Subject: netvsc: change logic for change mtu and set_queues Use device detach/attach to ensure that no packets are handed to device during state changes. Call rndis_filter_open/close directly as part of later VF related changes. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 + drivers/net/hyperv/netvsc_drv.c | 38 ++++++++++++++++++-------------------- drivers/net/hyperv/rndis_filter.c | 5 +++++ 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d6c25580f8dd..5d541a1462c2 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -200,6 +200,7 @@ int netvsc_recv_callback(struct net_device *net, const struct ndis_pkt_8021q_info *vlan); void netvsc_channel_cb(void *context); int netvsc_poll(struct napi_struct *napi, int budget); +bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); int rndis_filter_device_add(struct hv_device *dev, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 91637336d1fb..82e41c056e53 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -742,7 +742,7 @@ static int netvsc_set_channels(struct net_device *net, struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); unsigned int count = channels->combined_count; - bool was_running; + bool was_opened; int ret; /* We do not support separate count for rx, tx, or other */ @@ -762,12 +762,9 @@ static int netvsc_set_channels(struct net_device *net, if (count > nvdev->max_chn) return -EINVAL; - was_running = netif_running(net); - if (was_running) { - ret = netvsc_close(net); - if (ret) - return ret; - } + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); rndis_filter_device_remove(dev, nvdev); @@ -777,8 +774,9 @@ static int netvsc_set_channels(struct net_device *net, else netvsc_set_queues(net, dev, nvdev->num_chn); - if (was_running) - ret = netvsc_open(net); + nvdev = rtnl_dereference(net_device_ctx->nvdev); + if (was_opened) + rndis_filter_open(nvdev); /* We may have missed link change notifications */ net_device_ctx->last_reconfig = 0; @@ -848,18 +846,15 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct hv_device *hdev = ndevctx->device_ctx; struct netvsc_device_info device_info; - bool was_running; - int ret = 0; + bool was_opened; if (!nvdev || nvdev->destroy) return -ENODEV; - was_running = netif_running(ndev); - if (was_running) { - ret = netvsc_close(ndev); - if (ret) - return ret; - } + netif_device_detach(ndev); + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; @@ -877,14 +872,17 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) ndev->mtu = mtu; rndis_filter_device_add(hdev, &device_info); + nvdev = rtnl_dereference(ndevctx->nvdev); - if (was_running) - ret = netvsc_open(ndev); + if (was_opened) + rndis_filter_open(nvdev); + + netif_device_attach(ndev); /* We may have missed link change notifications */ schedule_delayed_work(&ndevctx->dwork, 0); - return ret; + return 0; } static void netvsc_get_stats64(struct net_device *net, diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 85c00e1c52b6..313c6d00d7d9 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1302,3 +1302,8 @@ int rndis_filter_close(struct netvsc_device *nvdev) return rndis_filter_close_device(nvdev->extension); } + +bool rndis_filter_opened(const struct netvsc_device *nvdev) +{ + return atomic_read(&nvdev->open_cnt) > 0; +} -- cgit v1.2.3-55-g7522 From 9749fed5d43d84b86f1c98b70167c31c296bb6a6 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:16 -0700 Subject: netvsc: use ERR_PTR to avoid dereference issues The rndis_filter_device_add function is called both in probe context and RTNL context,and creates the netvsc_device inner structure. It is easier to get the RTNL lock annotation correct if it returns the object directly, rather than implicitly by updating network device private data. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 8 ++++---- drivers/net/hyperv/netvsc.c | 13 ++++++------ drivers/net/hyperv/netvsc_drv.c | 34 ++++++++++++++++++------------- drivers/net/hyperv/rndis_filter.c | 43 ++++++++++++++++----------------------- 4 files changed, 49 insertions(+), 49 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 5d541a1462c2..e620374727c8 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -183,8 +183,8 @@ struct rndis_device { /* Interface */ struct rndis_message; struct netvsc_device; -int netvsc_device_add(struct hv_device *device, - const struct netvsc_device_info *info); +struct netvsc_device *netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *info); void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, @@ -203,8 +203,8 @@ int netvsc_poll(struct napi_struct *napi, int budget); bool rndis_filter_opened(const struct netvsc_device *nvdev); int rndis_filter_open(struct netvsc_device *nvdev); int rndis_filter_close(struct netvsc_device *nvdev); -int rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *info); +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *info); void rndis_filter_update(struct netvsc_device *nvdev); void rndis_filter_device_remove(struct hv_device *dev, struct netvsc_device *nvdev); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index e202ec5d6f63..4a2550559442 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -29,6 +29,8 @@ #include #include #include +#include + #include #include "hyperv_net.h" @@ -1272,8 +1274,8 @@ void netvsc_channel_cb(void *context) * netvsc_device_add - Callback when the device belonging to this * driver is added */ -int netvsc_device_add(struct hv_device *device, - const struct netvsc_device_info *device_info) +struct netvsc_device *netvsc_device_add(struct hv_device *device, + const struct netvsc_device_info *device_info) { int i, ret = 0; int ring_size = device_info->ring_size; @@ -1283,7 +1285,7 @@ int netvsc_device_add(struct hv_device *device, net_device = alloc_net_device(); if (!net_device) - return -ENOMEM; + return ERR_PTR(-ENOMEM); net_device->ring_size = ring_size; @@ -1339,7 +1341,7 @@ int netvsc_device_add(struct hv_device *device, goto close; } - return ret; + return net_device; close: netif_napi_del(&net_device->chan_table[0].napi); @@ -1350,6 +1352,5 @@ close: cleanup: free_netvsc_device(&net_device->rcu); - return ret; - + return ERR_PTR(ret); } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 82e41c056e53..0ca8c74143b4 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -717,6 +717,7 @@ static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, u32 num_chn) { struct netvsc_device_info device_info; + struct netvsc_device *net_device; int ret; memset(&device_info, 0, sizeof(device_info)); @@ -732,7 +733,8 @@ static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, if (ret) return ret; - return rndis_filter_device_add(dev, &device_info); + net_device = rndis_filter_device_add(dev, &device_info); + return IS_ERR(net_device) ? PTR_ERR(net_device) : 0; } static int netvsc_set_channels(struct net_device *net, @@ -845,8 +847,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) struct net_device_context *ndevctx = netdev_priv(ndev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct hv_device *hdev = ndevctx->device_ctx; + int orig_mtu = ndev->mtu; struct netvsc_device_info device_info; bool was_opened; + int ret = 0; if (!nvdev || nvdev->destroy) return -ENODEV; @@ -863,16 +867,16 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) rndis_filter_device_remove(hdev, nvdev); - /* 'nvdev' has been freed in rndis_filter_device_remove() -> - * netvsc_device_remove () -> free_netvsc_device(). - * We mustn't access it before it's re-created in - * rndis_filter_device_add() -> netvsc_device_add(). - */ - ndev->mtu = mtu; - rndis_filter_device_add(hdev, &device_info); - nvdev = rtnl_dereference(ndevctx->nvdev); + nvdev = rndis_filter_device_add(hdev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); + + /* Attempt rollback to original MTU */ + ndev->mtu = orig_mtu; + rndis_filter_device_add(hdev, &device_info); + } if (was_opened) rndis_filter_open(nvdev); @@ -882,7 +886,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) /* We may have missed link change notifications */ schedule_delayed_work(&ndevctx->dwork, 0); - return 0; + return ret; } static void netvsc_get_stats64(struct net_device *net, @@ -1525,8 +1529,10 @@ static int netvsc_probe(struct hv_device *dev, memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; - ret = rndis_filter_device_add(dev, &device_info); - if (ret != 0) { + + nvdev = rndis_filter_device_add(dev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); free_netdev(net); hv_set_drvdata(dev, NULL); @@ -1540,11 +1546,11 @@ static int netvsc_probe(struct hv_device *dev, NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; net->vlan_features = net->features; - /* RCU not necessary here, device not registered */ - nvdev = net_device_ctx->nvdev; netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); + netdev_lockdep_set_classes(net); + /* MTU range: 68 - 1500 or 65521 */ net->min_mtu = NETVSC_MTU_MIN; if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2) diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 313c6d00d7d9..cacf1e5536f7 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -658,9 +658,9 @@ cleanup: static int rndis_filter_set_offload_params(struct net_device *ndev, + struct netvsc_device *nvdev, struct ndis_offload_params *req_offloads) { - struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -1052,8 +1052,8 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) complete(&nvscdev->channel_init_wait); } -int rndis_filter_device_add(struct hv_device *dev, - struct netvsc_device_info *device_info) +struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, + struct netvsc_device_info *device_info) { struct net_device *net = hv_get_drvdata(dev); struct net_device_context *net_device_ctx = netdev_priv(net); @@ -1072,21 +1072,20 @@ int rndis_filter_device_add(struct hv_device *dev, rndis_device = get_rndis_device(); if (!rndis_device) - return -ENODEV; + return ERR_PTR(-ENODEV); /* * Let the inner driver handle this first to create the netvsc channel * NOTE! Once the channel is created, we may get a receive callback * (RndisFilterOnReceive()) before this call is completed */ - ret = netvsc_device_add(dev, device_info); - if (ret != 0) { + net_device = netvsc_device_add(dev, device_info); + if (IS_ERR(net_device)) { kfree(rndis_device); - return ret; + return net_device; } /* Initialize the rndis device */ - net_device = net_device_ctx->nvdev; net_device->max_chn = 1; net_device->num_chn = 1; @@ -1097,10 +1096,8 @@ int rndis_filter_device_add(struct hv_device *dev, /* Send the rndis initialization message */ ret = rndis_filter_init_device(rndis_device); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + if (ret != 0) + goto err_dev_remv; /* Get the MTU from the host */ size = sizeof(u32); @@ -1112,19 +1109,15 @@ int rndis_filter_device_add(struct hv_device *dev, /* Get the mac address */ ret = rndis_filter_query_device_mac(rndis_device); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + if (ret != 0) + goto err_dev_remv; memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); /* Find HW offload capabilities */ ret = rndis_query_hwcaps(rndis_device, &hwcaps); - if (ret != 0) { - rndis_filter_device_remove(dev, net_device); - return ret; - } + if (ret != 0) + goto err_dev_remv; /* A value of zero means "no change"; now turn on what we want. */ memset(&offloads, 0, sizeof(struct ndis_offload_params)); @@ -1179,7 +1172,7 @@ int rndis_filter_device_add(struct hv_device *dev, netif_set_gso_max_size(net, gso_max_size); - ret = rndis_filter_set_offload_params(net, &offloads); + ret = rndis_filter_set_offload_params(net, net_device, &offloads); if (ret) goto err_dev_remv; @@ -1190,7 +1183,7 @@ int rndis_filter_device_add(struct hv_device *dev, rndis_device->link_state ? "down" : "up"); if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) - return 0; + return net_device; rndis_filter_query_link_speed(rndis_device); @@ -1223,7 +1216,7 @@ int rndis_filter_device_add(struct hv_device *dev, num_rss_qs = net_device->num_chn - 1; if (num_rss_qs == 0) - return 0; + return net_device; refcount_set(&net_device->sc_offered, num_rss_qs); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); @@ -1260,11 +1253,11 @@ out: net_device->num_chn = 1; } - return 0; /* return 0 because primary channel can be used alone */ + return net_device; err_dev_remv: rndis_filter_device_remove(dev, net_device); - return ret; + return ERR_PTR(ret); } void rndis_filter_device_remove(struct hv_device *dev, -- cgit v1.2.3-55-g7522 From 2a926f791211b40ba114f45e0e7bfefd2fac5d30 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:17 -0700 Subject: netvsc: need rcu_derefence when accessing internal device info The netvsc_device structure should be accessed by rcu_dereference in the send path. Change arguments to netvsc_send() to make this easier to do correctly. Remove no longer needed hv_device_to_netvsc_device. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 10 +++------- drivers/net/hyperv/netvsc.c | 8 +++++--- drivers/net/hyperv/netvsc_drv.c | 4 ++-- drivers/net/hyperv/rndis_filter.c | 2 +- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index e620374727c8..0054b6929f6e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -183,10 +183,12 @@ struct rndis_device { /* Interface */ struct rndis_message; struct netvsc_device; +struct net_device_context; + struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *info); void netvsc_device_remove(struct hv_device *device); -int netvsc_send(struct hv_device *device, +int netvsc_send(struct net_device_context *ndc, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer **page_buffer, @@ -790,12 +792,6 @@ net_device_to_netvsc_device(struct net_device *ndev) return ((struct net_device_context *)netdev_priv(ndev))->nvdev; } -static inline struct netvsc_device * -hv_device_to_netvsc_device(struct hv_device *device) -{ - return net_device_to_netvsc_device(hv_get_drvdata(device)); -} - /* NdisInitialize message */ struct rndis_initialize_request { u32 req_id; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4a2550559442..3c6f3ae520d9 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -822,13 +822,15 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, msdp->count = 0; } -int netvsc_send(struct hv_device *device, +/* RCU already held by caller */ +int netvsc_send(struct net_device_context *ndev_ctx, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, struct hv_page_buffer **pb, struct sk_buff *skb) { - struct netvsc_device *net_device = hv_device_to_netvsc_device(device); + struct netvsc_device *net_device = rcu_dereference(ndev_ctx->nvdev); + struct hv_device *device = ndev_ctx->device_ctx; int ret = 0; struct netvsc_channel *nvchan; u32 pktlen = packet->total_data_buflen, msd_len = 0; @@ -840,7 +842,7 @@ int netvsc_send(struct hv_device *device, bool xmit_more = (skb != NULL) ? skb->xmit_more : false; /* If device is rescinded, return error and packet will get dropped. */ - if (unlikely(net_device->destroy)) + if (unlikely(!net_device || net_device->destroy)) return -ENODEV; /* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 0ca8c74143b4..1238600d717e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -505,8 +505,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx->device_ctx, packet, - rndis_msg, &pb, skb); + + ret = netvsc_send(net_device_ctx, packet, rndis_msg, &pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index cacf1e5536f7..9ab67c8309ff 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -243,7 +243,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, pb[0].len; } - ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL); + ret = netvsc_send(net_device_ctx, packet, NULL, &pb, NULL); return ret; } -- cgit v1.2.3-55-g7522 From 35fbbccfb417385c1c8cc6f799154ea1ebdc22ef Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:18 -0700 Subject: netvsc: save pointer to parent netvsc_device in channel table Keep back pointer in the per-channel data structure to avoid any possible RCU related issues when napi poll is called but netvsc_device is in RCU limbo. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 + drivers/net/hyperv/netvsc.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 0054b6929f6e..d13572879e7e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -727,6 +727,7 @@ struct net_device_context { /* Per channel data */ struct netvsc_channel { struct vmbus_channel *channel; + struct netvsc_device *net_device; const struct vmpacket_descriptor *desc; struct napi_struct napi; struct multi_send_data msd; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 3c6f3ae520d9..c15640c6fd83 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1224,11 +1224,11 @@ int netvsc_poll(struct napi_struct *napi, int budget) { struct netvsc_channel *nvchan = container_of(napi, struct netvsc_channel, napi); + struct netvsc_device *net_device = nvchan->net_device; struct vmbus_channel *channel = nvchan->channel; struct hv_device *device = netvsc_channel_to_device(channel); u16 q_idx = channel->offermsg.offer.sub_channel_index; struct net_device *ndev = hv_get_drvdata(device); - struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); int work_done = 0; /* If starting a new interval */ @@ -1307,6 +1307,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, struct netvsc_channel *nvchan = &net_device->chan_table[i]; nvchan->channel = device->channel; + nvchan->net_device = net_device; } /* Enable NAPI handler before init callbacks */ -- cgit v1.2.3-55-g7522 From 3962981f4822aaf284234efd0500041417faea86 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 19 Jul 2017 11:53:19 -0700 Subject: netvsc: add rtnl annotations in rndis The rndis functions are used when changing device state. Therefore the references from network device to internal state are protected by RTNL mutex. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 6 ------ drivers/net/hyperv/netvsc.c | 6 ++++-- drivers/net/hyperv/netvsc_drv.c | 1 + drivers/net/hyperv/rndis_filter.c | 12 ++++++++++-- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index d13572879e7e..afb65f753574 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -787,12 +787,6 @@ struct netvsc_device { struct rcu_head rcu; }; -static inline struct netvsc_device * -net_device_to_netvsc_device(struct net_device *ndev) -{ - return ((struct net_device_context *)netdev_priv(ndev))->nvdev; -} - /* NdisInitialize message */ struct rndis_initialize_request { u32 req_id; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index c15640c6fd83..0a9d9feedc3f 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -105,7 +105,8 @@ static void netvsc_destroy_buf(struct hv_device *device) { struct nvsp_message *revoke_packet; struct net_device *ndev = hv_get_drvdata(device); - struct netvsc_device *net_device = net_device_to_netvsc_device(ndev); + struct net_device_context *ndc = netdev_priv(ndev); + struct netvsc_device *net_device = rtnl_dereference(ndc->nvdev); int ret; /* @@ -829,7 +830,8 @@ int netvsc_send(struct net_device_context *ndev_ctx, struct hv_page_buffer **pb, struct sk_buff *skb) { - struct netvsc_device *net_device = rcu_dereference(ndev_ctx->nvdev); + struct netvsc_device *net_device + = rcu_dereference_rtnl(ndev_ctx->nvdev); struct hv_device *device = ndev_ctx->device_ctx; int ret = 0; struct netvsc_channel *nvchan; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1238600d717e..a164981c15f7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1548,6 +1548,7 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); + rtnl_unlock(); netdev_lockdep_set_classes(net); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 9ab67c8309ff..e439886f72c1 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -84,6 +84,14 @@ static struct rndis_device *get_rndis_device(void) return device; } +static struct netvsc_device * +net_device_to_netvsc_device(struct net_device *ndev) +{ + struct net_device_context *net_device_ctx = netdev_priv(ndev); + + return rtnl_dereference(net_device_ctx->nvdev); +} + static struct rndis_request *get_rndis_request(struct rndis_device *dev, u32 msg_type, u32 msg_len) @@ -472,7 +480,7 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { struct net_device_context *ndevctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = ndevctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct ndis_offload *hwcaps; u32 nvsp_version = nvdev->nvsp_version; u8 ndis_rev; @@ -944,7 +952,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev) struct rndis_request *request; struct rndis_halt_request *halt; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = net_device_ctx->nvdev; + struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, -- cgit v1.2.3-55-g7522 From 04d8980b4a9ca178be1c703467f2ed4ac0800e90 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Wed, 19 Jul 2017 13:09:18 +0530 Subject: cxgb4: Update register ranges of T4/T5/T6 adapters Signed-off-by: Arjun Vynipadath Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 51 ++++++++---------------------- 1 file changed, 14 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 82bf7aac6cdb..570c0958a0e1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -913,7 +913,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0xd010, 0xd03c, 0xdfc0, 0xdfe0, 0xe000, 0xea7c, - 0xf000, 0x11190, + 0xf000, 0x11110, + 0x11118, 0x11190, 0x19040, 0x1906c, 0x19078, 0x19080, 0x1908c, 0x190e4, @@ -1439,8 +1440,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, - 0x30038, 0x30038, - 0x30040, 0x30040, 0x30100, 0x30144, 0x30190, 0x301a0, 0x301a8, 0x301b8, @@ -1551,8 +1550,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x33c3c, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, - 0x34038, 0x34038, - 0x34040, 0x34040, 0x34100, 0x34144, 0x34190, 0x341a0, 0x341a8, 0x341b8, @@ -1663,8 +1660,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x37c3c, 0x37c50, 0x37cf0, 0x37cfc, 0x38000, 0x38030, - 0x38038, 0x38038, - 0x38040, 0x38040, 0x38100, 0x38144, 0x38190, 0x381a0, 0x381a8, 0x381b8, @@ -1775,8 +1770,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x3bc3c, 0x3bc50, 0x3bcf0, 0x3bcfc, 0x3c000, 0x3c030, - 0x3c038, 0x3c038, - 0x3c040, 0x3c040, 0x3c100, 0x3c144, 0x3c190, 0x3c1a0, 0x3c1a8, 0x3c1b8, @@ -2040,12 +2033,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1190, 0x1194, 0x11a0, 0x11a4, 0x11b0, 0x11b4, - 0x11fc, 0x1258, - 0x1280, 0x12d4, - 0x12d9, 0x12d9, - 0x12de, 0x12de, - 0x12e3, 0x12e3, - 0x12e8, 0x133c, + 0x11fc, 0x1274, + 0x1280, 0x133c, 0x1800, 0x18fc, 0x3000, 0x302c, 0x3060, 0x30b0, @@ -2076,6 +2065,9 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x5ea0, 0x5eb0, 0x5ec0, 0x5ec0, 0x5ec8, 0x5ed0, + 0x5ee0, 0x5ee0, + 0x5ef0, 0x5ef0, + 0x5f00, 0x5f00, 0x6000, 0x6020, 0x6028, 0x6040, 0x6058, 0x609c, @@ -2133,6 +2125,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0xd300, 0xd31c, 0xdfc0, 0xdfe0, 0xe000, 0xf008, + 0xf010, 0xf018, + 0xf020, 0xf028, 0x11000, 0x11014, 0x11048, 0x1106c, 0x11074, 0x11088, @@ -2256,13 +2250,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x1ff00, 0x1ff84, 0x1ffc0, 0x1ffc8, 0x30000, 0x30030, - 0x30038, 0x30038, - 0x30040, 0x30040, - 0x30048, 0x30048, - 0x30050, 0x30050, - 0x3005c, 0x30060, - 0x30068, 0x30068, - 0x30070, 0x30070, 0x30100, 0x30168, 0x30190, 0x301a0, 0x301a8, 0x301b8, @@ -2325,13 +2312,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x326a8, 0x326a8, 0x326ec, 0x326ec, 0x32a00, 0x32abc, - 0x32b00, 0x32b38, + 0x32b00, 0x32b18, + 0x32b20, 0x32b38, 0x32b40, 0x32b58, 0x32b60, 0x32b78, 0x32c00, 0x32c00, 0x32c08, 0x32c3c, - 0x32e00, 0x32e2c, - 0x32f00, 0x32f2c, 0x33000, 0x3302c, 0x33034, 0x33050, 0x33058, 0x33058, @@ -2396,13 +2382,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x33c38, 0x33c50, 0x33cf0, 0x33cfc, 0x34000, 0x34030, - 0x34038, 0x34038, - 0x34040, 0x34040, - 0x34048, 0x34048, - 0x34050, 0x34050, - 0x3405c, 0x34060, - 0x34068, 0x34068, - 0x34070, 0x34070, 0x34100, 0x34168, 0x34190, 0x341a0, 0x341a8, 0x341b8, @@ -2465,13 +2444,12 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x366a8, 0x366a8, 0x366ec, 0x366ec, 0x36a00, 0x36abc, - 0x36b00, 0x36b38, + 0x36b00, 0x36b18, + 0x36b20, 0x36b38, 0x36b40, 0x36b58, 0x36b60, 0x36b78, 0x36c00, 0x36c00, 0x36c08, 0x36c3c, - 0x36e00, 0x36e2c, - 0x36f00, 0x36f2c, 0x37000, 0x3702c, 0x37034, 0x37050, 0x37058, 0x37058, @@ -2545,8 +2523,7 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) 0x40280, 0x40280, 0x40304, 0x40304, 0x40330, 0x4033c, - 0x41304, 0x413b8, - 0x413c0, 0x413c8, + 0x41304, 0x413c8, 0x413d0, 0x413dc, 0x413f0, 0x413f0, 0x41400, 0x4140c, -- cgit v1.2.3-55-g7522 From 98dc77d57169f51d100f8b0cb3e4d1e0911ef7a4 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Tue, 4 Jul 2017 12:57:56 +0200 Subject: Bluetooth: hci_bcm: Make bcm_request_irq fail if no IRQ resource In case of no IRQ resource associated to the bcm_device, requesting IRQ should return an error in order to not enable low power mgmt. Signed-off-by: Loic Poulain Reported-by: Ian Molton Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcm.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 6a662d0161b4..6b42372c53ef 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -176,7 +176,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data) static int bcm_request_irq(struct bcm_data *bcm) { struct bcm_device *bdev = bcm->dev; - int err = 0; + int err; /* If this is not a platform device, do not enable PM functionalities */ mutex_lock(&bcm_device_lock); @@ -185,21 +185,23 @@ static int bcm_request_irq(struct bcm_data *bcm) goto unlock; } - if (bdev->irq > 0) { - err = devm_request_irq(&bdev->pdev->dev, bdev->irq, - bcm_host_wake, IRQF_TRIGGER_RISING, - "host_wake", bdev); - if (err) - goto unlock; + if (bdev->irq <= 0) { + err = -EOPNOTSUPP; + goto unlock; + } - device_init_wakeup(&bdev->pdev->dev, true); + err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake, + IRQF_TRIGGER_RISING, "host_wake", bdev); + if (err) + goto unlock; - pm_runtime_set_autosuspend_delay(&bdev->pdev->dev, - BCM_AUTOSUSPEND_DELAY); - pm_runtime_use_autosuspend(&bdev->pdev->dev); - pm_runtime_set_active(&bdev->pdev->dev); - pm_runtime_enable(&bdev->pdev->dev); - } + device_init_wakeup(&bdev->pdev->dev, true); + + pm_runtime_set_autosuspend_delay(&bdev->pdev->dev, + BCM_AUTOSUSPEND_DELAY); + pm_runtime_use_autosuspend(&bdev->pdev->dev); + pm_runtime_set_active(&bdev->pdev->dev); + pm_runtime_enable(&bdev->pdev->dev); unlock: mutex_unlock(&bcm_device_lock); -- cgit v1.2.3-55-g7522 From 628c26b4c41ab64b26c6cfd832ee42d4b6d2666e Mon Sep 17 00:00:00 2001 From: Dmitry Tunin Date: Thu, 6 Jul 2017 14:41:13 +0300 Subject: Bluetooth: btusb: Add support of all Foxconn (105b) Broadcom devices There is another device T: Bus=01 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=105b ProdID=e066 Rev=01.12 S: Manufacturer=Broadcom Corp S: Product=BCM20702A0 S: SerialNumber=342387DAE35E C: #Ifs= 4 Cfg#= 1 Atr=e0 MxPwr=0mA I: If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) I: If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=(none) I: If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none) I: If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=01 Driver=(none) Since we have Cls=ff, we can add all of them. Signed-off-by: Dmitry Tunin Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index fa24d693af24..b1e01b057a7c 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -131,7 +131,8 @@ static const struct usb_device_id btusb_table[] = { { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM43142A0 (Foxconn/Lenovo) */ - { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM }, + { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM920703 (HTC Vive) */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01), -- cgit v1.2.3-55-g7522 From 2193a9800b1d6ae771539e1cdae1e7340b9d90ea Mon Sep 17 00:00:00 2001 From: Joan Jani Date: Thu, 6 Jul 2017 20:35:32 +0000 Subject: Bluetooth: btqca: Fixed a coding style error Fixed this coding style erro ./drivers/bluetooth/btqca.c:84: ERROR: code indent should use tabs where possible Signed-off-by: Joan Jani Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btqca.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 28afd5d585f9..0bbdfcef2aa8 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -81,7 +81,7 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version) * and lower 2 bytes from patch will be used. */ *rome_version = (le32_to_cpu(ver->soc_id) << 16) | - (le16_to_cpu(ver->rome_ver) & 0x0000ffff); + (le16_to_cpu(ver->rome_ver) & 0x0000ffff); out: kfree_skb(skb); -- cgit v1.2.3-55-g7522 From ca2eae7d25a1d3c7c675808fa907188e6a099537 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Sat, 8 Jul 2017 14:43:34 +0100 Subject: Bluetooth: hci_nokia: prevent crash on module removal Only cancel any ongoing work after making sure, that no new work can be scheduled. This fixes a race condition in the remove handler. Signed-off-by: Ian Molton Reviewed-by: Sebastian Reichel Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_nokia.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 181a15b549e5..bc1f4496583e 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -770,10 +770,12 @@ static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) struct hci_uart *hu = &btdev->hu; struct hci_dev *hdev = hu->hdev; - cancel_work_sync(&hu->write_work); hci_unregister_dev(hdev); hci_free_dev(hdev); + + cancel_work_sync(&hu->write_work); + hu->proto->close(hu); pm_runtime_disable(&btdev->serdev->dev); -- cgit v1.2.3-55-g7522 From a529df8207b4dbc1a6ffd89d228afdcde042c97e Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Sat, 8 Jul 2017 14:43:35 +0100 Subject: Bluetooth: hci_nokia: remove duplicate call to pm_runtime_disable() pm_runtime_disable() is called in the _close() handler. Since we call the _close() handler on remove, there is no need to call pm_runtime_disable() a second time. Signed-off-by: Ian Molton Reviewed-by: Sebastian Reichel Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_nokia.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index bc1f4496583e..6dbb1f6ff6bd 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -777,8 +777,6 @@ static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) cancel_work_sync(&hu->write_work); hu->proto->close(hu); - - pm_runtime_disable(&btdev->serdev->dev); } static int nokia_bluetooth_runtime_suspend(struct device *dev) -- cgit v1.2.3-55-g7522 From fd865802c66bc451dc515ed89360f84376ce1a56 Mon Sep 17 00:00:00 2001 From: Leif Liddy Date: Sat, 8 Jul 2017 20:55:32 +0200 Subject: Bluetooth: btusb: fix QCA Rome suspend/resume There's been numerous reported instances where BTUSB_QCA_ROME bluetooth controllers stop functioning upon resume from suspend. These devices seem to be losing power during suspend. Patch will detect a status change on resume and perform a reset. Signed-off-by: Leif Liddy Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b1e01b057a7c..0d533b258aa6 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3068,6 +3068,12 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + + /* QCA Rome devices lose their updated firmware over suspend, + * but the USB hub doesn't notice any status change. + * Explicitly request a device reset on resume. + */ + set_bit(BTUSB_RESET_RESUME, &data->flags); } #ifdef CONFIG_BT_HCIBTUSB_RTL -- cgit v1.2.3-55-g7522 From c34dc3bfa7642fda423208579015d615f7becfa0 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Sat, 8 Jul 2017 17:37:41 +0100 Subject: Bluetooth: hci_serdev: Introduce hci_uart_unregister_device() Several drivers have the same (and incorrect) code in their _remove() handler. Coalesce this into a shared function. Signed-off-by: Ian Molton Reviewed-by: Sebastian Reichel Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_serdev.c | 13 +++++++++++++ drivers/bluetooth/hci_uart.h | 1 + 2 files changed, 14 insertions(+) diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index aea930101dd2..b725ac4f7ff6 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -354,3 +354,16 @@ err_alloc: return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); + +void hci_uart_unregister_device(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + + hci_unregister_dev(hdev); + hci_free_dev(hdev); + + cancel_work_sync(&hu->write_work); + + hu->proto->close(hu); +} +EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index c6e9e1cf63f8..d9cd95d81149 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -112,6 +112,7 @@ struct hci_uart { int hci_uart_register_proto(const struct hci_uart_proto *p); int hci_uart_unregister_proto(const struct hci_uart_proto *p); int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p); +void hci_uart_unregister_device(struct hci_uart *hu); int hci_uart_tx_wakeup(struct hci_uart *hu); int hci_uart_init_ready(struct hci_uart *hu); -- cgit v1.2.3-55-g7522 From 05f2a0bcecefdac5f2d27dff3164dcebff4d9618 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Sat, 8 Jul 2017 17:37:42 +0100 Subject: Bluetooth: hci_nokia: Use new hci_uart_unregister_device() function Simplify _remove() path for hci_nokia.c Signed-off-by: Ian Molton Reviewed-by: Sebastian Reichel Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_nokia.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 6dbb1f6ff6bd..3539fd03f47e 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -767,16 +767,8 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) { struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev); - struct hci_uart *hu = &btdev->hu; - struct hci_dev *hdev = hu->hdev; - - hci_unregister_dev(hdev); - hci_free_dev(hdev); - - cancel_work_sync(&hu->write_work); - - hu->proto->close(hu); + hci_uart_unregister_device(&btdev->hu); } static int nokia_bluetooth_runtime_suspend(struct device *dev) -- cgit v1.2.3-55-g7522 From 37f5258d1cde34e73a5ce3dfe00a80274e299c72 Mon Sep 17 00:00:00 2001 From: Ian Molton Date: Sat, 8 Jul 2017 17:37:43 +0100 Subject: Bluetooth: hci_ll: Use new hci_uart_unregister_device() function Convert hci_ll to use hci_uart_unregister_device(). This simplifies the _remove() handler as well as fixes a potential race condition on unload. Signed-off-by: Ian Molton Reviewed-by: Sebastian Reichel Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_ll.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index c982943f0747..1b898445a0b8 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -742,14 +742,8 @@ static int hci_ti_probe(struct serdev_device *serdev) static void hci_ti_remove(struct serdev_device *serdev) { struct ll_device *lldev = serdev_device_get_drvdata(serdev); - struct hci_uart *hu = &lldev->hu; - struct hci_dev *hdev = hu->hdev; - cancel_work_sync(&hu->write_work); - - hci_unregister_dev(hdev); - hci_free_dev(hdev); - hu->proto->close(hu); + hci_uart_unregister_device(&lldev->hu); } static const struct of_device_id hci_ti_of_match[] = { -- cgit v1.2.3-55-g7522 From 1d609dd32cd209f4643c7fd450f19d114cec0de7 Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Wed, 19 Jul 2017 17:49:31 -0500 Subject: Bluetooth: btwilink: remove unnecessary static in bt_ti_probe() Remove unnecessary static on local variable hst. Such variable is initialized before being used, on every execution path throughout the function. The static has no benefit and, removing it reduces the object file size. This issue was detected using Coccinelle and the following semantic patch: @bad exists@ position p; identifier x; type T; @@ static T x@p; ... x = <+...x...+> @@ identifier x; expression e; type T; position p != bad.p; @@ -static T x@p; ... when != x when strict ?x = e; In the following log you can see the difference in the object file size. This log is the output of the size command, before and after the code change: before: text data bss dec hex filename 4029 2528 128 6685 1a1d drivers/bluetooth/btwilink.o after: text data bss dec hex filename 4007 2472 64 6543 198f drivers/bluetooth/btwilink.o Signed-off-by: Gustavo A. R. Silva Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btwilink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 85a3978b064f..0cdb8961e9a1 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -276,7 +276,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb) static int bt_ti_probe(struct platform_device *pdev) { - static struct ti_st *hst; + struct ti_st *hst; struct hci_dev *hdev; int err; -- cgit v1.2.3-55-g7522 From 760446f967678e14ee1b6464ee1bb8562f299fa6 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Thu, 20 Jul 2017 18:28:48 +0530 Subject: cxgb4: display serial config and vpd versions print the versions of vpd and serial configuration file, flashed to adapter, and cleanup the relevant code. Signed-off-by: Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 14 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 57 +------- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 173 ++++++++++++++++++++++++ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 2 + 4 files changed, 188 insertions(+), 58 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ef4be781fd05..1978abbc6ceb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -338,10 +338,12 @@ struct adapter_params { unsigned int sf_nsec; /* # of flash sectors */ unsigned int sf_fw_start; /* start of FW image in flash */ - unsigned int fw_vers; - unsigned int bs_vers; /* bootstrap version */ - unsigned int tp_vers; - unsigned int er_vers; /* expansion ROM version */ + unsigned int fw_vers; /* firmware version */ + unsigned int bs_vers; /* bootstrap version */ + unsigned int tp_vers; /* TP microcode version */ + unsigned int er_vers; /* expansion ROM version */ + unsigned int scfg_vers; /* Serial Configuration version */ + unsigned int vpd_vers; /* VPD Version */ u8 api_vers[7]; unsigned short mtus[NMTUS]; @@ -1407,6 +1409,10 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); int t4_get_exprom_version(struct adapter *adapter, u32 *vers); +int t4_get_scfg_version(struct adapter *adapter, u32 *vers); +int t4_get_vpd_version(struct adapter *adapter, u32 *vers); +int t4_get_version_info(struct adapter *adapter); +void t4_dump_version_info(struct adapter *adapter); int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, const u8 *fw_data, unsigned int fw_size, struct fw_hdr *card_fw, enum dev_state state, int *reset); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e403fa18f1b1..fdf220aa08d6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3610,11 +3610,8 @@ static int adap_init0(struct adapter *adap) * later reporting and B. to warn if the currently loaded firmware * is excessively mismatched relative to the driver.) */ - t4_get_fw_version(adap, &adap->params.fw_vers); - t4_get_bs_version(adap, &adap->params.bs_vers); - t4_get_tp_version(adap, &adap->params.tp_vers); - t4_get_exprom_version(adap, &adap->params.er_vers); + t4_get_version_info(adap); ret = t4_check_fw_version(adap); /* If firmware is too old (not supported by driver) force an update. */ if (ret) @@ -4560,56 +4557,8 @@ static void cxgb4_check_pcie_caps(struct adapter *adap) /* Dump basic information about the adapter */ static void print_adapter_info(struct adapter *adapter) { - /* Device information */ - dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", - adapter->params.vpd.id, - CHELSIO_CHIP_RELEASE(adapter->params.chip)); - dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", - adapter->params.vpd.sn, adapter->params.vpd.pn); - - /* Firmware Version */ - if (!adapter->params.fw_vers) - dev_warn(adapter->pdev_dev, "No firmware loaded\n"); - else - dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); - - /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap - * Firmware, so dev_info() is more appropriate here.) - */ - if (!adapter->params.bs_vers) - dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); - else - dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); - - /* TP Microcode Version */ - if (!adapter->params.tp_vers) - dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); - else - dev_info(adapter->pdev_dev, - "TP Microcode version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); - - /* Expansion ROM version */ - if (!adapter->params.er_vers) - dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); - else - dev_info(adapter->pdev_dev, - "Expansion ROM version: %u.%u.%u.%u\n", - FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), - FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), - FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), - FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + /* Hardware/Firmware/etc. Version/Revision IDs */ + t4_dump_version_info(adapter); /* Software/Hardware configuration */ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n", diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 570c0958a0e1..db41b3e99b81 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3076,6 +3076,179 @@ int t4_get_exprom_version(struct adapter *adap, u32 *vers) return 0; } +/** + * t4_get_vpd_version - return the VPD version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the VPD via the Firmware interface (thus this can only be called + * once we're ready to issue Firmware commands). The format of the + * VPD version is adapter specific. Returns 0 on success, an error on + * failure. + * + * Note that early versions of the Firmware didn't include the ability + * to retrieve the VPD version, so we zero-out the return-value parameter + * in that case to avoid leaving it with garbage in it. + * + * Also note that the Firmware will return its cached copy of the VPD + * Revision ID, not the actual Revision ID as written in the Serial + * EEPROM. This is only an issue if a new VPD has been written and the + * Firmware/Chip haven't yet gone through a RESET sequence. So it's best + * to defer calling this routine till after a FW_RESET_CMD has been issued + * if the Host Driver will be performing a full adapter initialization. + */ +int t4_get_vpd_version(struct adapter *adapter, u32 *vers) +{ + u32 vpdrev_param; + int ret; + + vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &vpdrev_param, vers); + if (ret) + *vers = 0; + return ret; +} + +/** + * t4_get_scfg_version - return the Serial Configuration version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the Serial Configuration Version via the Firmware interface + * (thus this can only be called once we're ready to issue Firmware + * commands). The format of the Serial Configuration version is + * adapter specific. Returns 0 on success, an error on failure. + * + * Note that early versions of the Firmware didn't include the ability + * to retrieve the Serial Configuration version, so we zero-out the + * return-value parameter in that case to avoid leaving it with + * garbage in it. + * + * Also note that the Firmware will return its cached copy of the Serial + * Initialization Revision ID, not the actual Revision ID as written in + * the Serial EEPROM. This is only an issue if a new VPD has been written + * and the Firmware/Chip haven't yet gone through a RESET sequence. So + * it's best to defer calling this routine till after a FW_RESET_CMD has + * been issued if the Host Driver will be performing a full adapter + * initialization. + */ +int t4_get_scfg_version(struct adapter *adapter, u32 *vers) +{ + u32 scfgrev_param; + int ret; + + scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &scfgrev_param, vers); + if (ret) + *vers = 0; + return ret; +} + +/** + * t4_get_version_info - extract various chip/firmware version information + * @adapter: the adapter + * + * Reads various chip/firmware version numbers and stores them into the + * adapter Adapter Parameters structure. If any of the efforts fails + * the first failure will be returned, but all of the version numbers + * will be read. + */ +int t4_get_version_info(struct adapter *adapter) +{ + int ret = 0; + + #define FIRST_RET(__getvinfo) \ + do { \ + int __ret = __getvinfo; \ + if (__ret && !ret) \ + ret = __ret; \ + } while (0) + + FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers)); + FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers)); + FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers)); + FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers)); + FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers)); + FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers)); + + #undef FIRST_RET + return ret; +} + +/** + * t4_dump_version_info - dump all of the adapter configuration IDs + * @adapter: the adapter + * + * Dumps all of the various bits of adapter configuration version/revision + * IDs information. This is typically called at some point after + * t4_get_version_info() has been called. + */ +void t4_dump_version_info(struct adapter *adapter) +{ + /* Device information */ + dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n", + adapter->params.vpd.id, + CHELSIO_CHIP_RELEASE(adapter->params.chip)); + dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n", + adapter->params.vpd.sn, adapter->params.vpd.pn); + + /* Firmware Version */ + if (!adapter->params.fw_vers) + dev_warn(adapter->pdev_dev, "No firmware loaded\n"); + else + dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers)); + + /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap + * Firmware, so dev_info() is more appropriate here.) + */ + if (!adapter->params.bs_vers) + dev_info(adapter->pdev_dev, "No bootstrap loaded\n"); + else + dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers)); + + /* TP Microcode Version */ + if (!adapter->params.tp_vers) + dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n"); + else + dev_info(adapter->pdev_dev, + "TP Microcode version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers)); + + /* Expansion ROM version */ + if (!adapter->params.er_vers) + dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n"); + else + dev_info(adapter->pdev_dev, + "Expansion ROM version: %u.%u.%u.%u\n", + FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers), + FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers), + FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers)); + + /* Serial Configuration version */ + dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n", + adapter->params.scfg_vers); + + /* VPD Version */ + dev_info(adapter->pdev_dev, "VPD version: %#x\n", + adapter->params.vpd_vers); +} + /** * t4_check_fw_version - check if the FW is supported with this driver * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 0ebed64d62d3..ad825fbc21a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1124,6 +1124,8 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, FW_PARAMS_PARAM_DEV_FWCACHE = 0x18, + FW_PARAMS_PARAM_DEV_SCFGREV = 0x1A, + FW_PARAMS_PARAM_DEV_VPDREV = 0x1B, FW_PARAMS_PARAM_DEV_RI_FR_NSMR_TPTE_WR = 0x1C, FW_PARAMS_PARAM_DEV_MPSBGMAP = 0x1E, }; -- cgit v1.2.3-55-g7522 From 727f8914477e4642c7d1ff381667cdc4178b40c6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 21 Jul 2017 10:39:26 +0100 Subject: rxrpc: Expose UAPI definitions to userspace Move UAPI definitions from the internal header and place them in a UAPI header file so that userspace can make use of them. Signed-off-by: David Howells --- include/linux/rxrpc.h | 79 --------------------------------------------- include/uapi/linux/rxrpc.h | 80 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 80 insertions(+), 79 deletions(-) delete mode 100644 include/linux/rxrpc.h create mode 100644 include/uapi/linux/rxrpc.h diff --git a/include/linux/rxrpc.h b/include/linux/rxrpc.h deleted file mode 100644 index 7343f71783dc..000000000000 --- a/include/linux/rxrpc.h +++ /dev/null @@ -1,79 +0,0 @@ -/* AF_RXRPC parameters - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_H -#define _LINUX_RXRPC_H - -#include -#include - -/* - * RxRPC socket address - */ -struct sockaddr_rxrpc { - sa_family_t srx_family; /* address family */ - u16 srx_service; /* service desired */ - u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ - u16 transport_len; /* length of transport address */ - union { - sa_family_t family; /* transport address family */ - struct sockaddr_in sin; /* IPv4 transport address */ - struct sockaddr_in6 sin6; /* IPv6 transport address */ - } transport; -}; - -/* - * RxRPC socket options - */ -#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ -#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ -#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ -#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ -#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */ -#define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */ - -/* - * RxRPC control messages - * - If neither abort or accept are specified, the message is a data message. - * - terminal messages mean that a user call ID tag can be recycled - * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg() - */ -enum rxrpc_cmsg_type { - RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */ - RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */ - RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */ - RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */ - RXRPC_BUSY = 6, /* -r: server busy received [terminal] */ - RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */ - RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */ - RXRPC_ACCEPT = 9, /* s-: [Service] accept request */ - RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ - RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ - RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ - RXRPC__SUPPORTED -}; - -/* - * RxRPC security levels - */ -#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */ -#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */ -#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */ - -/* - * RxRPC security indices - */ -#define RXRPC_SECURITY_NONE 0 /* no security protocol */ -#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */ -#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */ -#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */ - -#endif /* _LINUX_RXRPC_H */ diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h new file mode 100644 index 000000000000..08e2fb9c70ae --- /dev/null +++ b/include/uapi/linux/rxrpc.h @@ -0,0 +1,80 @@ +/* Types and definitions for AF_RXRPC. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_RXRPC_H +#define _UAPI_LINUX_RXRPC_H + +#include +#include +#include + +/* + * RxRPC socket address + */ +struct sockaddr_rxrpc { + sa_family_t srx_family; /* address family */ + u16 srx_service; /* service desired */ + u16 transport_type; /* type of transport socket (SOCK_DGRAM) */ + u16 transport_len; /* length of transport address */ + union { + sa_family_t family; /* transport address family */ + struct sockaddr_in sin; /* IPv4 transport address */ + struct sockaddr_in6 sin6; /* IPv6 transport address */ + } transport; +}; + +/* + * RxRPC socket options + */ +#define RXRPC_SECURITY_KEY 1 /* [clnt] set client security key */ +#define RXRPC_SECURITY_KEYRING 2 /* [srvr] set ring of server security keys */ +#define RXRPC_EXCLUSIVE_CONNECTION 3 /* Deprecated; use RXRPC_EXCLUSIVE_CALL instead */ +#define RXRPC_MIN_SECURITY_LEVEL 4 /* minimum security level */ +#define RXRPC_UPGRADEABLE_SERVICE 5 /* Upgrade service[0] -> service[1] */ +#define RXRPC_SUPPORTED_CMSG 6 /* Get highest supported control message type */ + +/* + * RxRPC control messages + * - If neither abort or accept are specified, the message is a data message. + * - terminal messages mean that a user call ID tag can be recycled + * - s/r/- indicate whether these are applicable to sendmsg() and/or recvmsg() + */ +enum rxrpc_cmsg_type { + RXRPC_USER_CALL_ID = 1, /* sr: user call ID specifier */ + RXRPC_ABORT = 2, /* sr: abort request / notification [terminal] */ + RXRPC_ACK = 3, /* -r: [Service] RPC op final ACK received [terminal] */ + RXRPC_NET_ERROR = 5, /* -r: network error received [terminal] */ + RXRPC_BUSY = 6, /* -r: server busy received [terminal] */ + RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */ + RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */ + RXRPC_ACCEPT = 9, /* s-: [Service] accept request */ + RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ + RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ + RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ + RXRPC__SUPPORTED +}; + +/* + * RxRPC security levels + */ +#define RXRPC_SECURITY_PLAIN 0 /* plain secure-checksummed packets only */ +#define RXRPC_SECURITY_AUTH 1 /* authenticated packets */ +#define RXRPC_SECURITY_ENCRYPT 2 /* encrypted packets */ + +/* + * RxRPC security indices + */ +#define RXRPC_SECURITY_NONE 0 /* no security protocol */ +#define RXRPC_SECURITY_RXKAD 2 /* kaserver or kerberos 4 */ +#define RXRPC_SECURITY_RXGK 4 /* gssapi-based */ +#define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */ + +#endif /* _UAPI_LINUX_RXRPC_H */ -- cgit v1.2.3-55-g7522 From ddc6c70f07bb1f6dd39a2c6c430f7b4fa95199c8 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 21 Jul 2017 10:07:10 +0100 Subject: rxrpc: Move the packet.h include file into net/rxrpc/ Move the protocol description header file into net/rxrpc/ and rename it to protocol.h. It's no longer necessary to expose it as packets are no longer exposed to kernel services (such as AFS) that use the facility. The abort codes are transferred to the UAPI header instead as we pass these back to userspace and also to kernel services. Signed-off-by: David Howells --- fs/afs/misc.c | 1 - fs/afs/rxrpc.c | 1 - include/rxrpc/packet.h | 235 --------------------------------------------- include/uapi/linux/rxrpc.h | 44 +++++++++ net/rxrpc/ar-internal.h | 2 +- net/rxrpc/protocol.h | 190 ++++++++++++++++++++++++++++++++++++ 6 files changed, 235 insertions(+), 238 deletions(-) delete mode 100644 include/rxrpc/packet.h create mode 100644 net/rxrpc/protocol.h diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 100b207efc9e..c05f1f1c0d41 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -12,7 +12,6 @@ #include #include #include -#include #include "internal.h" #include "afs_fs.h" diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 02781e78ffb6..10743043d431 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -14,7 +14,6 @@ #include #include -#include #include "internal.h" #include "afs_cm.h" diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h deleted file mode 100644 index a2dcfb850b9f..000000000000 --- a/include/rxrpc/packet.h +++ /dev/null @@ -1,235 +0,0 @@ -/* packet.h: Rx packet layout and definitions - * - * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef _LINUX_RXRPC_PACKET_H -#define _LINUX_RXRPC_PACKET_H - -typedef u32 rxrpc_seq_t; /* Rx message sequence number */ -typedef u32 rxrpc_serial_t; /* Rx message serial number */ -typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */ -typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ - -/*****************************************************************************/ -/* - * on-the-wire Rx packet header - * - all multibyte fields should be in network byte order - */ -struct rxrpc_wire_header { - __be32 epoch; /* client boot timestamp */ -#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */ - - __be32 cid; /* connection and channel ID */ -#define RXRPC_MAXCALLS 4 /* max active calls per conn */ -#define RXRPC_CHANNELMASK (RXRPC_MAXCALLS-1) /* mask for channel ID */ -#define RXRPC_CIDMASK (~RXRPC_CHANNELMASK) /* mask for connection ID */ -#define RXRPC_CIDSHIFT ilog2(RXRPC_MAXCALLS) /* shift for connection ID */ -#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */ - - __be32 callNumber; /* call ID (0 for connection-level packets) */ - __be32 seq; /* sequence number of pkt in call stream */ - __be32 serial; /* serial number of pkt sent to network */ - - uint8_t type; /* packet type */ -#define RXRPC_PACKET_TYPE_DATA 1 /* data */ -#define RXRPC_PACKET_TYPE_ACK 2 /* ACK */ -#define RXRPC_PACKET_TYPE_BUSY 3 /* call reject */ -#define RXRPC_PACKET_TYPE_ABORT 4 /* call/connection abort */ -#define RXRPC_PACKET_TYPE_ACKALL 5 /* ACK all outstanding packets on call */ -#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */ -#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */ -#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */ -#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */ -#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */ - - uint8_t flags; /* packet flags */ -#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */ -#define RXRPC_REQUEST_ACK 0x02 /* request an unconditional ACK of this packet */ -#define RXRPC_LAST_PACKET 0x04 /* the last packet from this side for this call */ -#define RXRPC_MORE_PACKETS 0x08 /* more packets to come */ -#define RXRPC_JUMBO_PACKET 0x20 /* [DATA] this is a jumbo packet */ -#define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */ - - uint8_t userStatus; /* app-layer defined status */ -#define RXRPC_USERSTATUS_SERVICE_UPGRADE 0x01 /* AuriStor service upgrade request */ - - uint8_t securityIndex; /* security protocol ID */ - union { - __be16 _rsvd; /* reserved */ - __be16 cksum; /* kerberos security checksum */ - }; - __be16 serviceId; /* service ID */ - -} __packed; - -#define RXRPC_SUPPORTED_PACKET_TYPES ( \ - (1 << RXRPC_PACKET_TYPE_DATA) | \ - (1 << RXRPC_PACKET_TYPE_ACK) | \ - (1 << RXRPC_PACKET_TYPE_BUSY) | \ - (1 << RXRPC_PACKET_TYPE_ABORT) | \ - (1 << RXRPC_PACKET_TYPE_ACKALL) | \ - (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \ - (1 << RXRPC_PACKET_TYPE_RESPONSE) | \ - /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \ - (1 << RXRPC_PACKET_TYPE_VERSION)) - -/*****************************************************************************/ -/* - * jumbo packet secondary header - * - can be mapped to read header by: - * - new_serial = serial + 1 - * - new_seq = seq + 1 - * - new_flags = j_flags - * - new__rsvd = j__rsvd - * - duplicating all other fields - */ -struct rxrpc_jumbo_header { - uint8_t flags; /* packet flags (as per rxrpc_header) */ - uint8_t pad; - union { - __be16 _rsvd; /* reserved */ - __be16 cksum; /* kerberos security checksum */ - }; -}; - -#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ -#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) - -/*****************************************************************************/ -/* - * on-the-wire Rx ACK packet data payload - * - all multibyte fields should be in network byte order - */ -struct rxrpc_ackpacket { - __be16 bufferSpace; /* number of packet buffers available */ - __be16 maxSkew; /* diff between serno being ACK'd and highest serial no - * received */ - __be32 firstPacket; /* sequence no of first ACK'd packet in attached list */ - __be32 previousPacket; /* sequence no of previous packet received */ - __be32 serial; /* serial no of packet that prompted this ACK */ - - uint8_t reason; /* reason for ACK */ -#define RXRPC_ACK_REQUESTED 1 /* ACK was requested on packet */ -#define RXRPC_ACK_DUPLICATE 2 /* duplicate packet received */ -#define RXRPC_ACK_OUT_OF_SEQUENCE 3 /* out of sequence packet received */ -#define RXRPC_ACK_EXCEEDS_WINDOW 4 /* packet received beyond end of ACK window */ -#define RXRPC_ACK_NOSPACE 5 /* packet discarded due to lack of buffer space */ -#define RXRPC_ACK_PING 6 /* keep alive ACK */ -#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */ -#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */ -#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */ -#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */ - - uint8_t nAcks; /* number of ACKs */ -#define RXRPC_MAXACKS 255 - - uint8_t acks[0]; /* list of ACK/NAKs */ -#define RXRPC_ACK_TYPE_NACK 0 -#define RXRPC_ACK_TYPE_ACK 1 - -} __packed; - -/* Some ACKs refer to specific packets and some are general and can be updated. */ -#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \ - (1 << RXRPC_ACK_PING_RESPONSE) | \ - (1 << RXRPC_ACK_DELAY) | \ - (1 << RXRPC_ACK_IDLE)) - - -/* - * ACK packets can have a further piece of information tagged on the end - */ -struct rxrpc_ackinfo { - __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */ - __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */ - __be32 rwind; /* Rx window size (packets) [AFS 3.4] */ - __be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */ -}; - -/*****************************************************************************/ -/* - * Kerberos security type-2 challenge packet - */ -struct rxkad_challenge { - __be32 version; /* version of this challenge type */ - __be32 nonce; /* encrypted random number */ - __be32 min_level; /* minimum security level */ - __be32 __padding; /* padding to 8-byte boundary */ -} __packed; - -/*****************************************************************************/ -/* - * Kerberos security type-2 response packet - */ -struct rxkad_response { - __be32 version; /* version of this response type */ - __be32 __pad; - - /* encrypted bit of the response */ - struct { - __be32 epoch; /* current epoch */ - __be32 cid; /* parent connection ID */ - __be32 checksum; /* checksum */ - __be32 securityIndex; /* security type */ - __be32 call_id[4]; /* encrypted call IDs */ - __be32 inc_nonce; /* challenge nonce + 1 */ - __be32 level; /* desired level */ - } encrypted; - - __be32 kvno; /* Kerberos key version number */ - __be32 ticket_len; /* Kerberos ticket length */ -} __packed; - -/*****************************************************************************/ -/* - * RxRPC-level abort codes - */ -#define RX_CALL_DEAD -1 /* call/conn has been inactive and is shut down */ -#define RX_INVALID_OPERATION -2 /* invalid operation requested / attempted */ -#define RX_CALL_TIMEOUT -3 /* call timeout exceeded */ -#define RX_EOF -4 /* unexpected end of data on read op */ -#define RX_PROTOCOL_ERROR -5 /* low-level protocol error */ -#define RX_USER_ABORT -6 /* generic user abort */ -#define RX_ADDRINUSE -7 /* UDP port in use */ -#define RX_DEBUGI_BADTYPE -8 /* bad debugging packet type */ - -/* - * (un)marshalling abort codes (rxgen) - */ -#define RXGEN_CC_MARSHAL -450 -#define RXGEN_CC_UNMARSHAL -451 -#define RXGEN_SS_MARSHAL -452 -#define RXGEN_SS_UNMARSHAL -453 -#define RXGEN_DECODE -454 -#define RXGEN_OPCODE -455 -#define RXGEN_SS_XDRFREE -456 -#define RXGEN_CC_XDRFREE -457 - -/* - * Rx kerberos security abort codes - * - unfortunately we have no generalised security abort codes to say things - * like "unsupported security", so we have to use these instead and hope the - * other side understands - */ -#define RXKADINCONSISTENCY 19270400 /* security module structure inconsistent */ -#define RXKADPACKETSHORT 19270401 /* packet too short for security challenge */ -#define RXKADLEVELFAIL 19270402 /* security level negotiation failed */ -#define RXKADTICKETLEN 19270403 /* ticket length too short or too long */ -#define RXKADOUTOFSEQUENCE 19270404 /* packet had bad sequence number */ -#define RXKADNOAUTH 19270405 /* caller not authorised */ -#define RXKADBADKEY 19270406 /* illegal key: bad parity or weak */ -#define RXKADBADTICKET 19270407 /* security object was passed a bad ticket */ -#define RXKADUNKNOWNKEY 19270408 /* ticket contained unknown key version number */ -#define RXKADEXPIRED 19270409 /* authentication expired */ -#define RXKADSEALEDINCON 19270410 /* sealed data inconsistent */ -#define RXKADDATALEN 19270411 /* user data too long */ -#define RXKADILLEGALLEVEL 19270412 /* caller not authorised to use encrypted conns */ - -#endif /* _LINUX_RXRPC_PACKET_H */ diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h index 08e2fb9c70ae..9656aad8f8f7 100644 --- a/include/uapi/linux/rxrpc.h +++ b/include/uapi/linux/rxrpc.h @@ -77,4 +77,48 @@ enum rxrpc_cmsg_type { #define RXRPC_SECURITY_RXGK 4 /* gssapi-based */ #define RXRPC_SECURITY_RXK5 5 /* kerberos 5 */ +/* + * RxRPC-level abort codes + */ +#define RX_CALL_DEAD -1 /* call/conn has been inactive and is shut down */ +#define RX_INVALID_OPERATION -2 /* invalid operation requested / attempted */ +#define RX_CALL_TIMEOUT -3 /* call timeout exceeded */ +#define RX_EOF -4 /* unexpected end of data on read op */ +#define RX_PROTOCOL_ERROR -5 /* low-level protocol error */ +#define RX_USER_ABORT -6 /* generic user abort */ +#define RX_ADDRINUSE -7 /* UDP port in use */ +#define RX_DEBUGI_BADTYPE -8 /* bad debugging packet type */ + +/* + * (un)marshalling abort codes (rxgen) + */ +#define RXGEN_CC_MARSHAL -450 +#define RXGEN_CC_UNMARSHAL -451 +#define RXGEN_SS_MARSHAL -452 +#define RXGEN_SS_UNMARSHAL -453 +#define RXGEN_DECODE -454 +#define RXGEN_OPCODE -455 +#define RXGEN_SS_XDRFREE -456 +#define RXGEN_CC_XDRFREE -457 + +/* + * Rx kerberos security abort codes + * - unfortunately we have no generalised security abort codes to say things + * like "unsupported security", so we have to use these instead and hope the + * other side understands + */ +#define RXKADINCONSISTENCY 19270400 /* security module structure inconsistent */ +#define RXKADPACKETSHORT 19270401 /* packet too short for security challenge */ +#define RXKADLEVELFAIL 19270402 /* security level negotiation failed */ +#define RXKADTICKETLEN 19270403 /* ticket length too short or too long */ +#define RXKADOUTOFSEQUENCE 19270404 /* packet had bad sequence number */ +#define RXKADNOAUTH 19270405 /* caller not authorised */ +#define RXKADBADKEY 19270406 /* illegal key: bad parity or weak */ +#define RXKADBADTICKET 19270407 /* security object was passed a bad ticket */ +#define RXKADUNKNOWNKEY 19270408 /* ticket contained unknown key version number */ +#define RXKADEXPIRED 19270409 /* authentication expired */ +#define RXKADSEALEDINCON 19270410 /* sealed data inconsistent */ +#define RXKADDATALEN 19270411 /* user data too long */ +#define RXKADILLEGALLEVEL 19270412 /* caller not authorised to use encrypted conns */ + #endif /* _UAPI_LINUX_RXRPC_H */ diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index 69b97339ff9d..8c0db9b3e4ab 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include "protocol.h" #if 0 #define CHECK_SLAB_OKAY(X) \ diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h new file mode 100644 index 000000000000..4bddcf3face3 --- /dev/null +++ b/net/rxrpc/protocol.h @@ -0,0 +1,190 @@ +/* packet.h: Rx packet layout and definitions + * + * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_RXRPC_PACKET_H +#define _LINUX_RXRPC_PACKET_H + +typedef u32 rxrpc_seq_t; /* Rx message sequence number */ +typedef u32 rxrpc_serial_t; /* Rx message serial number */ +typedef __be32 rxrpc_seq_net_t; /* on-the-wire Rx message sequence number */ +typedef __be32 rxrpc_serial_net_t; /* on-the-wire Rx message serial number */ + +/*****************************************************************************/ +/* + * on-the-wire Rx packet header + * - all multibyte fields should be in network byte order + */ +struct rxrpc_wire_header { + __be32 epoch; /* client boot timestamp */ +#define RXRPC_RANDOM_EPOCH 0x80000000 /* Random if set, date-based if not */ + + __be32 cid; /* connection and channel ID */ +#define RXRPC_MAXCALLS 4 /* max active calls per conn */ +#define RXRPC_CHANNELMASK (RXRPC_MAXCALLS-1) /* mask for channel ID */ +#define RXRPC_CIDMASK (~RXRPC_CHANNELMASK) /* mask for connection ID */ +#define RXRPC_CIDSHIFT ilog2(RXRPC_MAXCALLS) /* shift for connection ID */ +#define RXRPC_CID_INC (1 << RXRPC_CIDSHIFT) /* connection ID increment */ + + __be32 callNumber; /* call ID (0 for connection-level packets) */ + __be32 seq; /* sequence number of pkt in call stream */ + __be32 serial; /* serial number of pkt sent to network */ + + uint8_t type; /* packet type */ +#define RXRPC_PACKET_TYPE_DATA 1 /* data */ +#define RXRPC_PACKET_TYPE_ACK 2 /* ACK */ +#define RXRPC_PACKET_TYPE_BUSY 3 /* call reject */ +#define RXRPC_PACKET_TYPE_ABORT 4 /* call/connection abort */ +#define RXRPC_PACKET_TYPE_ACKALL 5 /* ACK all outstanding packets on call */ +#define RXRPC_PACKET_TYPE_CHALLENGE 6 /* connection security challenge (SRVR->CLNT) */ +#define RXRPC_PACKET_TYPE_RESPONSE 7 /* connection secutity response (CLNT->SRVR) */ +#define RXRPC_PACKET_TYPE_DEBUG 8 /* debug info request */ +#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */ +#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */ + + uint8_t flags; /* packet flags */ +#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */ +#define RXRPC_REQUEST_ACK 0x02 /* request an unconditional ACK of this packet */ +#define RXRPC_LAST_PACKET 0x04 /* the last packet from this side for this call */ +#define RXRPC_MORE_PACKETS 0x08 /* more packets to come */ +#define RXRPC_JUMBO_PACKET 0x20 /* [DATA] this is a jumbo packet */ +#define RXRPC_SLOW_START_OK 0x20 /* [ACK] slow start supported */ + + uint8_t userStatus; /* app-layer defined status */ +#define RXRPC_USERSTATUS_SERVICE_UPGRADE 0x01 /* AuriStor service upgrade request */ + + uint8_t securityIndex; /* security protocol ID */ + union { + __be16 _rsvd; /* reserved */ + __be16 cksum; /* kerberos security checksum */ + }; + __be16 serviceId; /* service ID */ + +} __packed; + +#define RXRPC_SUPPORTED_PACKET_TYPES ( \ + (1 << RXRPC_PACKET_TYPE_DATA) | \ + (1 << RXRPC_PACKET_TYPE_ACK) | \ + (1 << RXRPC_PACKET_TYPE_BUSY) | \ + (1 << RXRPC_PACKET_TYPE_ABORT) | \ + (1 << RXRPC_PACKET_TYPE_ACKALL) | \ + (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \ + (1 << RXRPC_PACKET_TYPE_RESPONSE) | \ + /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \ + (1 << RXRPC_PACKET_TYPE_VERSION)) + +/*****************************************************************************/ +/* + * jumbo packet secondary header + * - can be mapped to read header by: + * - new_serial = serial + 1 + * - new_seq = seq + 1 + * - new_flags = j_flags + * - new__rsvd = j__rsvd + * - duplicating all other fields + */ +struct rxrpc_jumbo_header { + uint8_t flags; /* packet flags (as per rxrpc_header) */ + uint8_t pad; + union { + __be16 _rsvd; /* reserved */ + __be16 cksum; /* kerberos security checksum */ + }; +}; + +#define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ +#define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) + +/*****************************************************************************/ +/* + * on-the-wire Rx ACK packet data payload + * - all multibyte fields should be in network byte order + */ +struct rxrpc_ackpacket { + __be16 bufferSpace; /* number of packet buffers available */ + __be16 maxSkew; /* diff between serno being ACK'd and highest serial no + * received */ + __be32 firstPacket; /* sequence no of first ACK'd packet in attached list */ + __be32 previousPacket; /* sequence no of previous packet received */ + __be32 serial; /* serial no of packet that prompted this ACK */ + + uint8_t reason; /* reason for ACK */ +#define RXRPC_ACK_REQUESTED 1 /* ACK was requested on packet */ +#define RXRPC_ACK_DUPLICATE 2 /* duplicate packet received */ +#define RXRPC_ACK_OUT_OF_SEQUENCE 3 /* out of sequence packet received */ +#define RXRPC_ACK_EXCEEDS_WINDOW 4 /* packet received beyond end of ACK window */ +#define RXRPC_ACK_NOSPACE 5 /* packet discarded due to lack of buffer space */ +#define RXRPC_ACK_PING 6 /* keep alive ACK */ +#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */ +#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */ +#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */ +#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */ + + uint8_t nAcks; /* number of ACKs */ +#define RXRPC_MAXACKS 255 + + uint8_t acks[0]; /* list of ACK/NAKs */ +#define RXRPC_ACK_TYPE_NACK 0 +#define RXRPC_ACK_TYPE_ACK 1 + +} __packed; + +/* Some ACKs refer to specific packets and some are general and can be updated. */ +#define RXRPC_ACK_UPDATEABLE ((1 << RXRPC_ACK_REQUESTED) | \ + (1 << RXRPC_ACK_PING_RESPONSE) | \ + (1 << RXRPC_ACK_DELAY) | \ + (1 << RXRPC_ACK_IDLE)) + + +/* + * ACK packets can have a further piece of information tagged on the end + */ +struct rxrpc_ackinfo { + __be32 rxMTU; /* maximum Rx MTU size (bytes) [AFS 3.3] */ + __be32 maxMTU; /* maximum interface MTU size (bytes) [AFS 3.3] */ + __be32 rwind; /* Rx window size (packets) [AFS 3.4] */ + __be32 jumbo_max; /* max packets to stick into a jumbo packet [AFS 3.5] */ +}; + +/*****************************************************************************/ +/* + * Kerberos security type-2 challenge packet + */ +struct rxkad_challenge { + __be32 version; /* version of this challenge type */ + __be32 nonce; /* encrypted random number */ + __be32 min_level; /* minimum security level */ + __be32 __padding; /* padding to 8-byte boundary */ +} __packed; + +/*****************************************************************************/ +/* + * Kerberos security type-2 response packet + */ +struct rxkad_response { + __be32 version; /* version of this response type */ + __be32 __pad; + + /* encrypted bit of the response */ + struct { + __be32 epoch; /* current epoch */ + __be32 cid; /* parent connection ID */ + __be32 checksum; /* checksum */ + __be32 securityIndex; /* security type */ + __be32 call_id[4]; /* encrypted call IDs */ + __be32 inc_nonce; /* challenge nonce + 1 */ + __be32 level; /* desired level */ + } encrypted; + + __be32 kvno; /* Kerberos key version number */ + __be32 ticket_len; /* Kerberos ticket length */ +} __packed; + +#endif /* _LINUX_RXRPC_PACKET_H */ -- cgit v1.2.3-55-g7522 From d98422cb6619408f1779457fefc5da983fc6bc3b Mon Sep 17 00:00:00 2001 From: Derek Robson Date: Sat, 22 Jul 2017 13:47:07 +1200 Subject: Bluetooth: Style fix - align block comments Fixed alignment of all block comments. Found using checkpatch Signed-off-by: Derek Robson Signed-off-by: Marcel Holtmann --- drivers/bluetooth/ath3k.c | 3 ++- drivers/bluetooth/bt3c_cs.c | 8 +++++--- drivers/bluetooth/btmrvl_sdio.c | 6 ++++-- drivers/bluetooth/btsdio.c | 3 ++- drivers/bluetooth/btuart_cs.c | 8 +++++--- drivers/bluetooth/btusb.c | 15 ++++++++++----- drivers/bluetooth/btwilink.c | 6 +++--- drivers/bluetooth/hci_ldisc.c | 3 ++- drivers/bluetooth/hci_ll.c | 3 ++- 9 files changed, 35 insertions(+), 20 deletions(-) diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index b793853ff05f..204afe66de92 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -140,7 +140,8 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files - * for AR3012 */ + * for AR3012 + */ static const struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 32dcac017395..194788739a83 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -684,14 +684,16 @@ static int bt3c_config(struct pcmcia_device *link) unsigned long try; /* First pass: look for a config entry that looks normal. - Two tries: without IO aliases, then with aliases */ + * Two tries: without IO aliases, then with aliases + */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try)) goto found_port; /* Second pass: try to find an entry that isn't picky about - its base address, then try to grab any standard serial port - address, and finally try to get any free port. */ + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL)) goto found_port; diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index eb794f08b238..03341ce98c32 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -1455,7 +1455,8 @@ done: fw_dump_ptr = fw_dump_data; /* Dump all the memory data into single file, a userspace script will - be used to split all the memory data to multiple files*/ + * be used to split all the memory data to multiple files + */ BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start"); for (idx = 0; idx < dump_num; idx++) { struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; @@ -1482,7 +1483,8 @@ done: } /* fw_dump_data will be free in device coredump release function - after 5 min*/ + * after 5 min + */ dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL); BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end"); } diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 1cb958e199eb..c8e945d19ffe 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -144,7 +144,8 @@ static int btsdio_rx_packet(struct btsdio_data *data) if (!skb) { /* Out of memory. Prepare a read retry and just * return with the expectation that the next time - * we're called we'll have more memory. */ + * we're called we'll have more memory. + */ return -ENOMEM; } diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index 7df79bb12350..310e9c2e09b6 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -614,14 +614,16 @@ static int btuart_config(struct pcmcia_device *link) int try; /* First pass: look for a config entry that looks normal. - Two tries: without IO aliases, then with aliases */ + * Two tries: without IO aliases, then with aliases + */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, btuart_check_config, &try)) goto found_port; /* Second pass: try to find an entry that isn't picky about - its base address, then try to grab any standard serial port - address, and finally try to get any free port. */ + * its base address, then try to grab any standard serial port + * address, and finally try to get any free port. + */ if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL)) goto found_port; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 0d533b258aa6..154237c78119 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -657,7 +657,8 @@ static void btusb_intr_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -746,7 +747,8 @@ static void btusb_bulk_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -841,7 +843,8 @@ static void btusb_isoc_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -953,7 +956,8 @@ static void btusb_diag_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected */ + * -ENODEV: device got disconnected + */ if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); @@ -2897,7 +2901,8 @@ static int btusb_probe(struct usb_interface *intf, struct usb_device *udev = interface_to_usbdev(intf); /* Old firmware would otherwise let ath3k driver load - * patch and sysconfig files */ + * patch and sysconfig files + */ if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001) return -ENODEV; } diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c index 0cdb8961e9a1..5ef8000f90a9 100644 --- a/drivers/bluetooth/btwilink.c +++ b/drivers/bluetooth/btwilink.c @@ -93,8 +93,7 @@ static void st_reg_completion_cb(void *priv_data, int data) complete(&lhst->wait_reg_completion); } -/* Called by Shared Transport layer when receive data is - * available */ +/* Called by Shared Transport layer when receive data is available */ static long st_receive(void *priv_data, struct sk_buff *skb) { struct ti_st *lhst = priv_data; @@ -198,7 +197,8 @@ static int ti_st_open(struct hci_dev *hdev) } /* Is ST registration callback - * called with ERROR status? */ + * called with ERROR status? + */ if (hst->reg_status != 0) { BT_ERR("ST registration completed with invalid " "status %d", hst->reg_status); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 8397b716fa65..a746627e784e 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -457,7 +457,8 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_DBG("tty %p", tty); /* Error if the tty has no write op instead of leaving an exploitable - hole */ + * hole + */ if (tty->ops->write == NULL) return -EOPNOTSUPP; diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 1b898445a0b8..424c15aa7bb7 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -622,7 +622,8 @@ static int download_firmware(struct ll_device *lldev) cmd = (struct hci_command *)action_ptr; if (cmd->opcode == 0xff36) { /* ignore remote change - * baud rate HCI VS command */ + * baud rate HCI VS command + */ bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware"); break; } -- cgit v1.2.3-55-g7522 From 56a97e701c49ff66dd1a5e5e02775209ab5147d3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 10 Jul 2017 15:06:39 +0200 Subject: netfilter: expect: add to hash table after expect init assuming we have lockless readers we should make sure they can only see expectations that have already been initialized. hlist_add_head_rcu acts as memory barrier, move it after timer setup. Theoretically we could crash due to a del_timer() on other cpu seeing garbage data. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_expect.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 899c2c36da13..2c63808bea96 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -368,12 +368,6 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) /* two references : one for hash insert, one for the timer */ refcount_add(2, &exp->use); - hlist_add_head_rcu(&exp->lnode, &master_help->expectations); - master_help->expecting[exp->class]++; - - hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); - net->ct.expect_count++; - setup_timer(&exp->timeout, nf_ct_expectation_timed_out, (unsigned long)exp); helper = rcu_dereference_protected(master_help->helper, @@ -384,6 +378,12 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) } add_timer(&exp->timeout); + hlist_add_head_rcu(&exp->lnode, &master_help->expectations); + master_help->expecting[exp->class]++; + + hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]); + net->ct.expect_count++; + NF_CT_STAT_INC(net, expect_create); } -- cgit v1.2.3-55-g7522 From 9f08ea848117ab521efcfd3e004d8e1a0edc640c Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 18 Jul 2017 20:18:09 +0200 Subject: netfilter: nf_tables: keep chain counters away from hot path These chain counters are only used by the iptables-compat tool, that allow users to use the x_tables extensions from the existing nf_tables framework. This patch makes nf_tables by ~5% for the general usecase, ie. native nft users, where no chain counters are used at all. Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables_core.h | 2 ++ net/netfilter/nf_tables_api.c | 11 +++-------- net/netfilter/nf_tables_core.c | 26 ++++++++++++++++++-------- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h index 8f690effec37..424684c33771 100644 --- a/include/net/netfilter/nf_tables_core.h +++ b/include/net/netfilter/nf_tables_core.h @@ -49,6 +49,8 @@ struct nft_payload_set { }; extern const struct nft_expr_ops nft_payload_fast_ops; + +extern struct static_key_false nft_counters_enabled; extern struct static_key_false nft_trace_enabled; #endif /* _NET_NF_TABLES_CORE_H */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7843efa33c59..7fbf0070aba1 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1240,6 +1240,8 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) module_put(basechain->type->owner); free_percpu(basechain->stats); + if (basechain->stats) + static_branch_dec(&nft_counters_enabled); if (basechain->ops[0].dev != NULL) dev_put(basechain->ops[0].dev); kfree(basechain); @@ -1504,14 +1506,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, return PTR_ERR(stats); } basechain->stats = stats; - } else { - stats = netdev_alloc_pcpu_stats(struct nft_stats); - if (stats == NULL) { - nft_chain_release_hook(&hook); - kfree(basechain); - return -ENOMEM; - } - rcu_assign_pointer(basechain->stats, stats); + static_branch_inc(&nft_counters_enabled); } hookfn = hook.type->hooks[hook.num]; diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 65dbeadcb118..c5bab08b0d73 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -114,6 +114,22 @@ static bool nft_payload_fast_eval(const struct nft_expr *expr, return true; } +DEFINE_STATIC_KEY_FALSE(nft_counters_enabled); + +static noinline void nft_update_chain_stats(const struct nft_chain *chain, + const struct nft_pktinfo *pkt) +{ + struct nft_stats *stats; + + local_bh_disable(); + stats = this_cpu_ptr(rcu_dereference(nft_base_chain(chain)->stats)); + u64_stats_update_begin(&stats->syncp); + stats->pkts++; + stats->bytes += pkt->skb->len; + u64_stats_update_end(&stats->syncp); + local_bh_enable(); +} + struct nft_jumpstack { const struct nft_chain *chain; const struct nft_rule *rule; @@ -130,7 +146,6 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv) struct nft_regs regs; unsigned int stackptr = 0; struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; - struct nft_stats *stats; int rulenum; unsigned int gencursor = nft_genmask_cur(net); struct nft_traceinfo info; @@ -220,13 +235,8 @@ next_rule: nft_trace_packet(&info, basechain, NULL, -1, NFT_TRACETYPE_POLICY); - rcu_read_lock_bh(); - stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats)); - u64_stats_update_begin(&stats->syncp); - stats->pkts++; - stats->bytes += pkt->skb->len; - u64_stats_update_end(&stats->syncp); - rcu_read_unlock_bh(); + if (static_branch_unlikely(&nft_counters_enabled)) + nft_update_chain_stats(basechain, pkt); return nft_base_chain(basechain)->policy; } -- cgit v1.2.3-55-g7522 From 0b35f6031a00329800bacc04085188c300c3a4d8 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Wed, 19 Jul 2017 14:27:33 +0900 Subject: netfilter: Remove duplicated rcu_read_lock. This patch removes duplicate rcu_read_lock(). 1. IPVS part: According to Julian Anastasov's mention, contexts of ipvs are described at: http://marc.info/?l=netfilter-devel&m=149562884514072&w=2, in summary: - packet RX/TX: does not need locks because packets come from hooks. - sync msg RX: backup server uses RCU locks while registering new connections. - ip_vs_ctl.c: configuration get/set, RCU locks needed. - xt_ipvs.c: It is a netfilter match, running from hook context. As result, rcu_read_lock and rcu_read_unlock can be removed from: - ip_vs_core.c: all - ip_vs_ctl.c: - only from ip_vs_has_real_service - ip_vs_ftp.c: all - ip_vs_proto_sctp.c: all - ip_vs_proto_tcp.c: all - ip_vs_proto_udp.c: all - ip_vs_xmit.c: all (contains only packet processing) 2. Netfilter part: There are three types of functions that are guaranteed the rcu_read_lock(). First, as result, functions are only called by nf_hook(): - nf_conntrack_broadcast_help(), pptp_expectfn(), set_expected_rtp_rtcp(). - tcpmss_reverse_mtu(), tproxy_laddr4(), tproxy_laddr6(). - match_lookup_rt6(), check_hlist(), hashlimit_mt_common(). - xt_osf_match_packet(). Second, functions that caller already held the rcu_read_lock(). - destroy_conntrack(), ctnetlink_conntrack_event(). - ctnl_timeout_find_get(), nfqnl_nf_hook_drop(). Third, functions that are mixed with type1 and type2. These functions are called by nf_hook() also these are called by ordinary functions that already held the rcu_read_lock(): - __ctnetlink_glue_build(), ctnetlink_expect_event(). - ctnetlink_proto_size(). Applied files are below: - nf_conntrack_broadcast.c, nf_conntrack_core.c, nf_conntrack_netlink.c. - nf_conntrack_pptp.c, nf_conntrack_sip.c, nfnetlink_cttimeout.c. - nfnetlink_queue.c, xt_TCPMSS.c, xt_TPROXY.c, xt_addrtype.c. - xt_connlimit.c, xt_hashlimit.c, xt_osf.c Detailed calltrace can be found at: http://marc.info/?l=netfilter-devel&m=149667610710350&w=2 Signed-off-by: Taehee Yoo Acked-by: Julian Anastasov Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/ip_vs_core.c | 8 ------ net/netfilter/ipvs/ip_vs_ctl.c | 3 --- net/netfilter/ipvs/ip_vs_ftp.c | 2 -- net/netfilter/ipvs/ip_vs_proto_sctp.c | 11 ++------ net/netfilter/ipvs/ip_vs_proto_tcp.c | 10 +------- net/netfilter/ipvs/ip_vs_proto_udp.c | 10 +------- net/netfilter/ipvs/ip_vs_xmit.c | 46 +++------------------------------- net/netfilter/nf_conntrack_broadcast.c | 2 -- net/netfilter/nf_conntrack_core.c | 3 --- net/netfilter/nf_conntrack_netlink.c | 12 --------- net/netfilter/nf_conntrack_pptp.c | 2 -- net/netfilter/nf_conntrack_sip.c | 6 +---- net/netfilter/nfnetlink_cttimeout.c | 2 -- net/netfilter/nfnetlink_queue.c | 2 -- net/netfilter/xt_TCPMSS.c | 2 -- net/netfilter/xt_TPROXY.c | 4 --- net/netfilter/xt_addrtype.c | 3 --- net/netfilter/xt_connlimit.c | 3 --- net/netfilter/xt_hashlimit.c | 8 +++--- net/netfilter/xt_osf.c | 2 -- 20 files changed, 13 insertions(+), 128 deletions(-) diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index e31956b58aba..2ff9d9070c95 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -125,14 +125,12 @@ ip_vs_in_stats(struct ip_vs_conn *cp, struct sk_buff *skb) s->cnt.inbytes += skb->len; u64_stats_update_end(&s->syncp); - rcu_read_lock(); svc = rcu_dereference(dest->svc); s = this_cpu_ptr(svc->stats.cpustats); u64_stats_update_begin(&s->syncp); s->cnt.inpkts++; s->cnt.inbytes += skb->len; u64_stats_update_end(&s->syncp); - rcu_read_unlock(); s = this_cpu_ptr(ipvs->tot_stats.cpustats); u64_stats_update_begin(&s->syncp); @@ -159,14 +157,12 @@ ip_vs_out_stats(struct ip_vs_conn *cp, struct sk_buff *skb) s->cnt.outbytes += skb->len; u64_stats_update_end(&s->syncp); - rcu_read_lock(); svc = rcu_dereference(dest->svc); s = this_cpu_ptr(svc->stats.cpustats); u64_stats_update_begin(&s->syncp); s->cnt.outpkts++; s->cnt.outbytes += skb->len; u64_stats_update_end(&s->syncp); - rcu_read_unlock(); s = this_cpu_ptr(ipvs->tot_stats.cpustats); u64_stats_update_begin(&s->syncp); @@ -1222,7 +1218,6 @@ static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum, if (!pptr) return NULL; - rcu_read_lock(); dest = ip_vs_find_real_service(ipvs, af, iph->protocol, &iph->saddr, pptr[0]); if (dest) { @@ -1237,7 +1232,6 @@ static struct ip_vs_conn *__ip_vs_rs_conn_out(unsigned int hooknum, pptr[0], pptr[1]); } } - rcu_read_unlock(); return cp; } @@ -1689,11 +1683,9 @@ ip_vs_in_icmp(struct netns_ipvs *ipvs, struct sk_buff *skb, int *related, if (dest) { struct ip_vs_dest_dst *dest_dst; - rcu_read_lock(); dest_dst = rcu_dereference(dest->dest_dst); if (dest_dst) mtu = dst_mtu(dest_dst->dst_cache); - rcu_read_unlock(); } if (mtu > 68 + sizeof(struct iphdr)) mtu -= sizeof(struct iphdr); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 1fa3c2307b6e..4f940d7eb2f7 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -550,18 +550,15 @@ bool ip_vs_has_real_service(struct netns_ipvs *ipvs, int af, __u16 protocol, /* Check for "full" addressed entries */ hash = ip_vs_rs_hashkey(af, daddr, dport); - rcu_read_lock(); hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) { if (dest->port == dport && dest->af == af && ip_vs_addr_equal(af, &dest->addr, daddr) && (dest->protocol == protocol || dest->vfwmark)) { /* HIT */ - rcu_read_unlock(); return true; } } - rcu_read_unlock(); return false; } diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index fb780be76d15..3e17d32b629d 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -269,13 +269,11 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, * hopefully it will succeed on the retransmitted * packet. */ - rcu_read_lock(); mangled = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, iph->ihl * 4, start - data, end - start, buf, buf_len); - rcu_read_unlock(); if (mangled) { ip_vs_nfct_expect_related(skb, ct, n_cp, IPPROTO_TCP, 0, 0); diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 3ffad4adaddf..e1efa446b305 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -38,7 +38,6 @@ sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, return 0; } - rcu_read_lock(); if (likely(!ip_vs_iph_inverse(iph))) svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol, &iph->daddr, ports[1]); @@ -53,7 +52,6 @@ sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, * It seems that we are very loaded. * We have to drop this packet :( */ - rcu_read_unlock(); *verdict = NF_DROP; return 0; } @@ -67,11 +65,9 @@ sctp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, *verdict = ip_vs_leave(svc, skb, pd, iph); else *verdict = NF_DROP; - rcu_read_unlock(); return 0; } } - rcu_read_unlock(); /* NF_ACCEPT */ return 1; } @@ -526,12 +522,10 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp) /* Lookup application incarnations and bind the right one */ hash = sctp_app_hashkey(cp->vport); - rcu_read_lock(); list_for_each_entry_rcu(inc, &ipvs->sctp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; - rcu_read_unlock(); IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" "%s:%u to app %s on port %u\n", @@ -544,11 +538,10 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp) cp->app = inc; if (inc->init_conn) result = inc->init_conn(inc, cp); - goto out; + break; } } - rcu_read_unlock(); -out: + return result; } diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 12dc8d5bc37d..121a321b91be 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -63,7 +63,6 @@ tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, } /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ - rcu_read_lock(); if (likely(!ip_vs_iph_inverse(iph))) svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol, @@ -80,7 +79,6 @@ tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, * It seems that we are very loaded. * We have to drop this packet :( */ - rcu_read_unlock(); *verdict = NF_DROP; return 0; } @@ -95,11 +93,9 @@ tcp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, *verdict = ip_vs_leave(svc, skb, pd, iph); else *verdict = NF_DROP; - rcu_read_unlock(); return 0; } } - rcu_read_unlock(); /* NF_ACCEPT */ return 1; } @@ -661,12 +657,10 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) /* Lookup application incarnations and bind the right one */ hash = tcp_app_hashkey(cp->vport); - rcu_read_lock(); list_for_each_entry_rcu(inc, &ipvs->tcp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; - rcu_read_unlock(); IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" "%s:%u to app %s on port %u\n", @@ -680,12 +674,10 @@ tcp_app_conn_bind(struct ip_vs_conn *cp) cp->app = inc; if (inc->init_conn) result = inc->init_conn(inc, cp); - goto out; + break; } } - rcu_read_unlock(); - out: return result; } diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index e494e9a88c7f..30e11cd6aa8a 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -53,7 +53,6 @@ udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, return 0; } - rcu_read_lock(); if (likely(!ip_vs_iph_inverse(iph))) svc = ip_vs_service_find(ipvs, af, skb->mark, iph->protocol, &iph->daddr, ports[1]); @@ -69,7 +68,6 @@ udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, * It seems that we are very loaded. * We have to drop this packet :( */ - rcu_read_unlock(); *verdict = NF_DROP; return 0; } @@ -84,11 +82,9 @@ udp_conn_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, *verdict = ip_vs_leave(svc, skb, pd, iph); else *verdict = NF_DROP; - rcu_read_unlock(); return 0; } } - rcu_read_unlock(); /* NF_ACCEPT */ return 1; } @@ -410,12 +406,10 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) /* Lookup application incarnations and bind the right one */ hash = udp_app_hashkey(cp->vport); - rcu_read_lock(); list_for_each_entry_rcu(inc, &ipvs->udp_apps[hash], p_list) { if (inc->port == cp->vport) { if (unlikely(!ip_vs_app_inc_get(inc))) break; - rcu_read_unlock(); IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" "%s:%u to app %s on port %u\n", @@ -429,12 +423,10 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp) cp->app = inc; if (inc->init_conn) result = inc->init_conn(inc, cp); - goto out; + break; } } - rcu_read_unlock(); - out: return result; } diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 2eab1e0400f4..90d396814798 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -678,7 +678,6 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0) goto tx_error; @@ -689,14 +688,12 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; } @@ -710,7 +707,6 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL, &iph->daddr, NULL, ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0) @@ -720,14 +716,12 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; } @@ -746,7 +740,6 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); /* check if it is a connection of no-client-port */ if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { __be16 _pt, *p; @@ -815,14 +808,12 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); - rcu_read_unlock(); LeaveFunction(10); return rc; tx_error: kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; } @@ -837,7 +828,6 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); /* check if it is a connection of no-client-port */ if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) { __be16 _pt, *p; @@ -906,7 +896,6 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); - rcu_read_unlock(); LeaveFunction(10); return rc; @@ -914,7 +903,6 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, tx_error: LeaveFunction(10); kfree_skb(skb); - rcu_read_unlock(); return NF_STOLEN; } #endif @@ -1035,7 +1023,6 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | @@ -1043,10 +1030,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh); if (local < 0) goto tx_error; - if (local) { - rcu_read_unlock(); + if (local) return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); - } rt = skb_rtable(skb); tdev = rt->dst.dev; @@ -1095,7 +1080,6 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_local_out(net, skb->sk, skb); else if (ret == NF_DROP) kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); @@ -1104,7 +1088,6 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, tx_error: if (!IS_ERR(skb)) kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; } @@ -1127,7 +1110,6 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest, &cp->daddr.in6, &saddr, ipvsh, 1, @@ -1136,10 +1118,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_RT_MODE_TUNNEL); if (local < 0) goto tx_error; - if (local) { - rcu_read_unlock(); + if (local) return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1); - } rt = (struct rt6_info *) skb_dst(skb); tdev = rt->dst.dev; @@ -1185,7 +1165,6 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip6_local_out(cp->ipvs->net, skb->sk, skb); else if (ret == NF_DROP) kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); @@ -1194,7 +1173,6 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, tx_error: if (!IS_ERR(skb)) kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; } @@ -1213,17 +1191,14 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_KNOWN_NH, NULL, ipvsh); if (local < 0) goto tx_error; - if (local) { - rcu_read_unlock(); + if (local) return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); - } ip_send_check(ip_hdr(skb)); @@ -1231,14 +1206,12 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; } @@ -1252,7 +1225,6 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, EnterFunction(10); - rcu_read_lock(); local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest, &cp->daddr.in6, NULL, ipvsh, 0, @@ -1261,23 +1233,19 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, IP_VS_RT_MODE_KNOWN_NH); if (local < 0) goto tx_error; - if (local) { - rcu_read_unlock(); + if (local) return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1); - } /* Another hack: avoid icmp_send in ip_fragment */ skb->ignore_df = 1; ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; } @@ -1322,7 +1290,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, rt_mode = (hooknum != NF_INET_FORWARD) ? IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL; - rcu_read_lock(); local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, rt_mode, NULL, iph); if (local < 0) @@ -1368,12 +1335,10 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); - rcu_read_unlock(); goto out; tx_error: kfree_skb(skb); - rcu_read_unlock(); rc = NF_STOLEN; out: LeaveFunction(10); @@ -1414,7 +1379,6 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, rt_mode = (hooknum != NF_INET_FORWARD) ? IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL; - rcu_read_lock(); local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest, &cp->daddr.in6, NULL, ipvsh, 0, rt_mode); if (local < 0) @@ -1460,12 +1424,10 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, skb->ignore_df = 1; rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); - rcu_read_unlock(); goto out; tx_error: kfree_skb(skb); - rcu_read_unlock(); rc = NF_STOLEN; out: LeaveFunction(10); diff --git a/net/netfilter/nf_conntrack_broadcast.c b/net/netfilter/nf_conntrack_broadcast.c index 4e99cca61612..ecc3ab784633 100644 --- a/net/netfilter/nf_conntrack_broadcast.c +++ b/net/netfilter/nf_conntrack_broadcast.c @@ -40,7 +40,6 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb, if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) goto out; - rcu_read_lock(); in_dev = __in_dev_get_rcu(rt->dst.dev); if (in_dev != NULL) { for_primary_ifa(in_dev) { @@ -50,7 +49,6 @@ int nf_conntrack_broadcast_help(struct sk_buff *skb, } } endfor_ifa(in_dev); } - rcu_read_unlock(); if (mask == 0) goto out; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9979f46c81dc..69746928cc0a 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -407,13 +407,10 @@ destroy_conntrack(struct nf_conntrack *nfct) nf_ct_tmpl_free(ct); return; } - rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->destroy) l4proto->destroy(ct); - rcu_read_unlock(); - local_bh_disable(); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 7999e70c3bfb..4dba71de4de7 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -539,13 +539,11 @@ static inline size_t ctnetlink_proto_size(const struct nf_conn *ct) struct nf_conntrack_l4proto *l4proto; size_t len = 0; - rcu_read_lock(); l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); len += l3proto->nla_size; l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); len += l4proto->nla_size; - rcu_read_unlock(); return len; } @@ -664,7 +662,6 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - rcu_read_lock(); zone = nf_ct_zone(ct); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); @@ -736,8 +733,6 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) && ctnetlink_dump_mark(skb, ct) < 0) goto nla_put_failure; #endif - rcu_read_unlock(); - nlmsg_end(skb, nlh); err = nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); @@ -747,7 +742,6 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item) return 0; nla_put_failure: - rcu_read_unlock(); nlmsg_cancel(skb, nlh); nlmsg_failure: kfree_skb(skb); @@ -2213,7 +2207,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) const struct nf_conntrack_zone *zone; struct nlattr *nest_parms; - rcu_read_lock(); zone = nf_ct_zone(ct); nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED); @@ -2272,11 +2265,9 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) #endif if (ctnetlink_dump_labels(skb, ct) < 0) goto nla_put_failure; - rcu_read_unlock(); return 0; nla_put_failure: - rcu_read_unlock(); return -ENOSPC; } @@ -2661,17 +2652,14 @@ ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item) nfmsg->version = NFNETLINK_V0; nfmsg->res_id = 0; - rcu_read_lock(); if (ctnetlink_exp_dump_expect(skb, exp) < 0) goto nla_put_failure; - rcu_read_unlock(); nlmsg_end(skb, nlh); nfnetlink_send(skb, net, item->portid, group, item->report, GFP_ATOMIC); return 0; nla_put_failure: - rcu_read_unlock(); nlmsg_cancel(skb, nlh); nlmsg_failure: kfree_skb(skb); diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c index 6959e93063d4..11562f2a08bb 100644 --- a/net/netfilter/nf_conntrack_pptp.c +++ b/net/netfilter/nf_conntrack_pptp.c @@ -113,7 +113,6 @@ static void pptp_expectfn(struct nf_conn *ct, /* Can you see how rusty this code is, compared with the pre-2.6.11 * one? That's what happened to my shiny newnat of 2002 ;( -HW */ - rcu_read_lock(); nf_nat_pptp_expectfn = rcu_dereference(nf_nat_pptp_hook_expectfn); if (nf_nat_pptp_expectfn && ct->master->status & IPS_NAT_MASK) nf_nat_pptp_expectfn(ct, exp); @@ -136,7 +135,6 @@ static void pptp_expectfn(struct nf_conn *ct, pr_debug("not found\n"); } } - rcu_read_unlock(); } static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct, diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index d38af4274335..4dbb5bad4363 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -884,7 +884,6 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, tuple.dst.u3 = *daddr; tuple.dst.u.udp.port = port; - rcu_read_lock(); do { exp = __nf_ct_expect_find(net, nf_ct_zone(ct), &tuple); @@ -918,10 +917,8 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, goto err1; } - if (skip_expect) { - rcu_read_unlock(); + if (skip_expect) return NF_ACCEPT; - } rtp_exp = nf_ct_expect_alloc(ct); if (rtp_exp == NULL) @@ -952,7 +949,6 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, err2: nf_ct_expect_put(rtp_exp); err1: - rcu_read_unlock(); return ret; } diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 400e9ae97153..7ce9e86d374c 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -505,7 +505,6 @@ ctnl_timeout_find_get(struct net *net, const char *name) { struct ctnl_timeout *timeout, *matching = NULL; - rcu_read_lock(); list_for_each_entry_rcu(timeout, &net->nfct_timeout_list, head) { if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0) continue; @@ -521,7 +520,6 @@ ctnl_timeout_find_get(struct net *net, const char *name) break; } err: - rcu_read_unlock(); return matching; } diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 16fa04086880..7c543bfbf624 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -928,7 +928,6 @@ static unsigned int nfqnl_nf_hook_drop(struct net *net) unsigned int instances = 0; int i; - rcu_read_lock(); for (i = 0; i < INSTANCE_BUCKETS; i++) { struct nfqnl_instance *inst; struct hlist_head *head = &q->instance_table[i]; @@ -938,7 +937,6 @@ static unsigned int nfqnl_nf_hook_drop(struct net *net) instances++; } } - rcu_read_unlock(); return instances; } diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index c64aca611ac5..9dae4d665965 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -62,11 +62,9 @@ static u_int32_t tcpmss_reverse_mtu(struct net *net, memset(fl6, 0, sizeof(*fl6)); fl6->daddr = ipv6_hdr(skb)->saddr; } - rcu_read_lock(); ai = nf_get_afinfo(family); if (ai != NULL) ai->route(net, (struct dst_entry **)&rt, &fl, false); - rcu_read_unlock(); if (rt != NULL) { mtu = dst_mtu(&rt->dst); diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index d767e35fff6b..2b74f37132fc 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -70,13 +70,11 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) return user_laddr; laddr = 0; - rcu_read_lock(); indev = __in_dev_get_rcu(skb->dev); for_primary_ifa(indev) { laddr = ifa->ifa_local; break; } endfor_ifa(indev); - rcu_read_unlock(); return laddr ? laddr : daddr; } @@ -391,7 +389,6 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, return user_laddr; laddr = NULL; - rcu_read_lock(); indev = __in6_dev_get(skb->dev); if (indev) { read_lock_bh(&indev->lock); @@ -404,7 +401,6 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr, } read_unlock_bh(&indev->lock); } - rcu_read_unlock(); return laddr ? laddr : daddr; } diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c index e329dabde35f..3b2be2ae6987 100644 --- a/net/netfilter/xt_addrtype.c +++ b/net/netfilter/xt_addrtype.c @@ -47,8 +47,6 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, if (dev) flow.flowi6_oif = dev->ifindex; - rcu_read_lock(); - afinfo = nf_get_afinfo(NFPROTO_IPV6); if (afinfo != NULL) { const struct nf_ipv6_ops *v6ops; @@ -63,7 +61,6 @@ static u32 match_lookup_rt6(struct net *net, const struct net_device *dev, } else { route_err = 1; } - rcu_read_unlock(); if (route_err) return XT_ADDRTYPE_UNREACHABLE; diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index b8fd4ab762ed..97589b8a2a40 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -144,7 +144,6 @@ static unsigned int check_hlist(struct net *net, unsigned int length = 0; *addit = true; - rcu_read_lock(); /* check the saved connections */ hlist_for_each_entry_safe(conn, n, head, node) { @@ -179,8 +178,6 @@ static unsigned int check_hlist(struct net *net, length++; } - rcu_read_unlock(); - return length; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 762e1874f28b..ffdb611e54a2 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -659,12 +659,12 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par, if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0) goto hotdrop; - rcu_read_lock_bh(); + local_bh_disable(); dh = dsthash_find(hinfo, &dst); if (dh == NULL) { dh = dsthash_alloc_init(hinfo, &dst, &race); if (dh == NULL) { - rcu_read_unlock_bh(); + local_bh_enable(); goto hotdrop; } else if (race) { /* Already got an entry, update expiration timeout */ @@ -689,12 +689,12 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par, /* below the limit */ dh->rateinfo.credit -= cost; spin_unlock(&dh->lock); - rcu_read_unlock_bh(); + local_bh_enable(); return !(cfg->mode & XT_HASHLIMIT_INVERT); } spin_unlock(&dh->lock); - rcu_read_unlock_bh(); + local_bh_enable(); /* default match is underlimit - so over the limit, we need to invert */ return cfg->mode & XT_HASHLIMIT_INVERT; diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 71cfa9551d08..36e14b1f061d 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c @@ -226,7 +226,6 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p) sizeof(struct tcphdr), optsize, opts); } - rcu_read_lock(); list_for_each_entry_rcu(kf, &xt_osf_fingers[df], finger_entry) { int foptsize, optnum; @@ -340,7 +339,6 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p) info->loglevel == XT_OSF_LOGLEVEL_FIRST) break; } - rcu_read_unlock(); if (!fcount && (info->flags & XT_OSF_LOG)) nf_log_packet(net, xt_family(p), xt_hooknum(p), skb, xt_in(p), -- cgit v1.2.3-55-g7522 From 784b4e612d42a2b7578d7fab2ed78940e10536bc Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Wed, 19 Jul 2017 16:32:23 +0200 Subject: netfilter: nf_tables: Attach process info to NFT_MSG_NEWGEN notifications This is helpful for 'nft monitor' to track which process caused a given change to the ruleset. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 ++ net/netfilter/nf_tables_api.c | 5 ++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 683f6f88fcac..6f0a950e21c3 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1221,6 +1221,8 @@ enum nft_objref_attributes { enum nft_gen_attributes { NFTA_GEN_UNSPEC, NFTA_GEN_ID, + NFTA_GEN_PROC_PID, + NFTA_GEN_PROC_NAME, __NFTA_GEN_MAX }; #define NFTA_GEN_MAX (__NFTA_GEN_MAX - 1) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7fbf0070aba1..b77ad0813564 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4657,6 +4657,7 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; + char buf[TASK_COMM_LEN]; int event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWGEN); nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct nfgenmsg), 0); @@ -4668,7 +4669,9 @@ static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net, nfmsg->version = NFNETLINK_V0; nfmsg->res_id = htons(net->nft.base_seq & 0xffff); - if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq))) + if (nla_put_be32(skb, NFTA_GEN_ID, htonl(net->nft.base_seq)) || + nla_put_be32(skb, NFTA_GEN_PROC_PID, htonl(task_pid_nr(current))) || + nla_put_string(skb, NFTA_GEN_PROC_NAME, get_task_comm(buf, current))) goto nla_put_failure; nlmsg_end(skb, nlh); -- cgit v1.2.3-55-g7522 From 19cfe912c37bbc184c673c3c081edff62ea8e565 Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Thu, 20 Jul 2017 18:53:50 +0800 Subject: Bluetooth: btusb: Fix memory leak in play_deferred Currently we are calling usb_submit_urb directly to submit deferred tx urbs after unanchor them. So the usb_giveback_urb_bh would failed to unref it in usb_unanchor_urb and cause memory leak: unreferenced object 0xffffffc0ce0fa400 (size 256): ... backtrace: [] __save_stack_trace+0x48/0x6c [] create_object+0x138/0x254 [] kmemleak_alloc+0x58/0x8c [] __kmalloc+0x1d4/0x2a0 [] usb_alloc_urb+0x30/0x60 [] alloc_ctrl_urb+0x38/0x120 [btusb] [] btusb_send_frame+0x64/0xf8 [btusb] Put those urbs in tx_anchor to avoid the leak, and also fix the error handling. Signed-off-by: Jeffy Chen Reviewed-by: Oliver Neukum Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 154237c78119..1cefff772cd0 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3271,13 +3271,28 @@ static void play_deferred(struct btusb_data *data) int err; while ((urb = usb_get_from_anchor(&data->deferred))) { + usb_anchor_urb(urb, &data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + BT_ERR("%s urb %p submission failed (%d)", + data->hdev->name, urb, -err); + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&data->deferred); } static int btusb_resume(struct usb_interface *intf) -- cgit v1.2.3-55-g7522 From 6a48542091d6d1d35edcd8b7422a6689c45916a9 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Sat, 22 Jul 2017 11:57:47 +0200 Subject: Bluetooth: hci_nokia: select BT_BCM for btbcm_set_bdaddr() The Nokia devices require the setup of its Public Bluetooth Device Address and for that it is required to depend on vendor specific commands. For Broadcom based Nokia devices, that is part of btbcm module and can be selected via BT_BCM config option. Signed-off-by: Marcel Holtmann Reviewed-by: Sebastian Reichel Signed-off-by: Johan Hedberg --- drivers/bluetooth/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 35952a94875e..3a6ead603e49 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -98,6 +98,7 @@ config BT_HCIUART_NOKIA depends on BT_HCIUART_SERDEV depends on PM select BT_HCIUART_H4 + select BT_BCM help Nokia H4+ is serial protocol for communication between Bluetooth device and host. This protocol is required for Bluetooth devices -- cgit v1.2.3-55-g7522 From 87646a348ebcb1415a479be31f1c41f276e057e6 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 19 Jul 2017 16:54:45 +0800 Subject: virtio_ring: allow to store zero as the ctx Allow zero to be store as a ctx, with this we could store e.g zero value which could be meaningful for the case of storing headroom through ctx. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/virtio/virtio_ring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5e1b548828e6..9aaa177e8209 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -391,7 +391,7 @@ static inline int virtqueue_add(struct virtqueue *_vq, vq->desc_state[head].data = data; if (indirect) vq->desc_state[head].indir_desc = desc; - if (ctx) + else vq->desc_state[head].indir_desc = ctx; /* Put entry in available array (but don't update avail->idx until they -- cgit v1.2.3-55-g7522 From 28b39bc7c57e7920b0cbba3d79ba0f134e0f76f0 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 19 Jul 2017 16:54:46 +0800 Subject: virtio-net: pack headroom into ctx for mergeable buffers Pack headroom into ctx - this way when we get a buffer we can figure out the actual headroom that was allocated for the buffer. Will be helpful to optimize switching between XDP and non-XDP modes which have different headroom requirements. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 99830167ea2f..3223a36ad9a8 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -270,6 +270,23 @@ static void skb_xmit_done(struct virtqueue *vq) netif_wake_subqueue(vi->dev, vq2txq(vq)); } +#define MRG_CTX_HEADER_SHIFT 22 +static void *mergeable_len_to_ctx(unsigned int truesize, + unsigned int headroom) +{ + return (void *)(unsigned long)((headroom << MRG_CTX_HEADER_SHIFT) | truesize); +} + +static unsigned int mergeable_ctx_to_headroom(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx >> MRG_CTX_HEADER_SHIFT; +} + +static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) +{ + return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); +} + /* Called from bottom half context */ static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, @@ -639,13 +656,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } rcu_read_unlock(); - if (unlikely(len > (unsigned long)ctx)) { + truesize = mergeable_ctx_to_truesize(ctx); + if (unlikely(len > truesize)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)ctx); dev->stats.rx_length_errors++; goto err_skb; } - truesize = (unsigned long)ctx; + head_skb = page_to_skb(vi, rq, page, offset, len, truesize); curr_skb = head_skb; @@ -665,13 +683,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, } page = virt_to_head_page(buf); - if (unlikely(len > (unsigned long)ctx)) { + + truesize = mergeable_ctx_to_truesize(ctx); + if (unlikely(len > truesize)) { pr_debug("%s: rx error: len %u exceeds truesize %lu\n", dev->name, len, (unsigned long)ctx); dev->stats.rx_length_errors++; goto err_skb; } - truesize = (unsigned long)ctx; num_skb_frags = skb_shinfo(curr_skb)->nr_frags; if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { @@ -889,7 +908,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; buf += headroom; /* advance address leaving hole at front of pkt */ - ctx = (void *)(unsigned long)len; + ctx = mergeable_len_to_ctx(len, headroom); get_page(alloc_frag->page); alloc_frag->offset += len + headroom; hole = alloc_frag->size - alloc_frag->offset; -- cgit v1.2.3-55-g7522 From 192f68cf35f5eefe28ce8acbb9a3dfc747149b64 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 19 Jul 2017 16:54:47 +0800 Subject: virtio-net: switch to use new ctx API for small buffer Use ctx API to store headroom for small buffers. Following patches will retrieve this info and use it for XDP. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3223a36ad9a8..a8a5968a8c61 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -410,7 +410,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi) static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, - void *buf, unsigned int len) + void *buf, void *ctx, + unsigned int len) { struct sk_buff *skb; struct bpf_prog *xdp_prog; @@ -773,7 +774,7 @@ static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, else if (vi->big_packets) skb = receive_big(dev, vi, rq, buf, len); else - skb = receive_small(dev, vi, rq, buf, len); + skb = receive_small(dev, vi, rq, buf, ctx, len); if (unlikely(!skb)) return 0; @@ -806,12 +807,18 @@ frame_err: return 0; } +/* Unlike mergeable buffers, all buffers are allocated to the + * same size, except for the headroom. For this reason we do + * not need to use mergeable_len_to_ctx here - it is enough + * to store the headroom as the context ignoring the truesize. + */ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { struct page_frag *alloc_frag = &rq->alloc_frag; char *buf; unsigned int xdp_headroom = virtnet_get_headroom(vi); + void *ctx = (void *)(unsigned long)xdp_headroom; int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom; int err; @@ -825,7 +832,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, alloc_frag->offset += len; sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom, vi->hdr_len + GOOD_PACKET_LEN); - err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); + err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); @@ -1034,7 +1041,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget) void *buf; struct virtnet_stats *stats = this_cpu_ptr(vi->stats); - if (vi->mergeable_rx_bufs) { + if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; while (received < budget && @@ -2202,7 +2209,7 @@ static int virtnet_find_vqs(struct virtnet_info *vi) names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); if (!names) goto err_names; - if (vi->mergeable_rx_bufs) { + if (!vi->big_packets || vi->mergeable_rx_bufs) { ctx = kzalloc(total_vqs * sizeof(*ctx), GFP_KERNEL); if (!ctx) goto err_ctx; -- cgit v1.2.3-55-g7522 From 4941d472bf95b4345d6e38906fcf354e74afa311 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 19 Jul 2017 16:54:48 +0800 Subject: virtio-net: do not reset during XDP set We currently reset the device during XDP set, the main reason is that we allocate more headroom with XDP (for header adjustment). This works but causes network downtime for users. Previous patches encoded the headroom in the buffer context, this makes it possible to detect the case where a buffer with headroom insufficient for XDP is added to the queue and XDP is enabled afterwards. Upon detection, we handle this case by copying the packet (slow, but it's a temporary condition). Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 232 ++++++++++++++++++++++------------------------- 1 file changed, 106 insertions(+), 126 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a8a5968a8c61..f894713dca2a 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -407,6 +407,69 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi) return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0; } +/* We copy the packet for XDP in the following cases: + * + * 1) Packet is scattered across multiple rx buffers. + * 2) Headroom space is insufficient. + * + * This is inefficient but it's a temporary condition that + * we hit right after XDP is enabled and until queue is refilled + * with large buffers with sufficient headroom - so it should affect + * at most queue size packets. + * Afterwards, the conditions to enable + * XDP should preclude the underlying device from sending packets + * across multiple buffers (num_buf > 1), and we make sure buffers + * have enough headroom. + */ +static struct page *xdp_linearize_page(struct receive_queue *rq, + u16 *num_buf, + struct page *p, + int offset, + int page_off, + unsigned int *len) +{ + struct page *page = alloc_page(GFP_ATOMIC); + + if (!page) + return NULL; + + memcpy(page_address(page) + page_off, page_address(p) + offset, *len); + page_off += *len; + + while (--*num_buf) { + unsigned int buflen; + void *buf; + int off; + + buf = virtqueue_get_buf(rq->vq, &buflen); + if (unlikely(!buf)) + goto err_buf; + + p = virt_to_head_page(buf); + off = buf - page_address(p); + + /* guard against a misconfigured or uncooperative backend that + * is sending packet larger than the MTU. + */ + if ((page_off + buflen) > PAGE_SIZE) { + put_page(p); + goto err_buf; + } + + memcpy(page_address(page) + page_off, + page_address(p) + off, buflen); + page_off += buflen; + put_page(p); + } + + /* Headroom does not contribute to packet length */ + *len = page_off - VIRTIO_XDP_HEADROOM; + return page; +err_buf: + __free_pages(page, 0); + return NULL; +} + static struct sk_buff *receive_small(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -415,12 +478,14 @@ static struct sk_buff *receive_small(struct net_device *dev, { struct sk_buff *skb; struct bpf_prog *xdp_prog; - unsigned int xdp_headroom = virtnet_get_headroom(vi); + unsigned int xdp_headroom = (unsigned long)ctx; unsigned int header_offset = VIRTNET_RX_PAD + xdp_headroom; unsigned int headroom = vi->hdr_len + header_offset; unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + struct page *page = virt_to_head_page(buf); unsigned int delta = 0; + struct page *xdp_page; len -= vi->hdr_len; rcu_read_lock(); @@ -434,6 +499,27 @@ static struct sk_buff *receive_small(struct net_device *dev, if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags)) goto err_xdp; + if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) { + int offset = buf - page_address(page) + header_offset; + unsigned int tlen = len + vi->hdr_len; + u16 num_buf = 1; + + xdp_headroom = virtnet_get_headroom(vi); + header_offset = VIRTNET_RX_PAD + xdp_headroom; + headroom = vi->hdr_len + header_offset; + buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + xdp_page = xdp_linearize_page(rq, &num_buf, page, + offset, header_offset, + &tlen); + if (!xdp_page) + goto err_xdp; + + buf = page_address(xdp_page); + put_page(page); + page = xdp_page; + } + xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len; xdp.data = xdp.data_hard_start + xdp_headroom; xdp.data_end = xdp.data + len; @@ -462,7 +548,7 @@ static struct sk_buff *receive_small(struct net_device *dev, skb = build_skb(buf, buflen); if (!skb) { - put_page(virt_to_head_page(buf)); + put_page(page); goto err; } skb_reserve(skb, headroom - delta); @@ -478,7 +564,7 @@ err: err_xdp: rcu_read_unlock(); dev->stats.rx_dropped++; - put_page(virt_to_head_page(buf)); + put_page(page); xdp_xmit: return NULL; } @@ -503,66 +589,6 @@ err: return NULL; } -/* The conditions to enable XDP should preclude the underlying device from - * sending packets across multiple buffers (num_buf > 1). However per spec - * it does not appear to be illegal to do so but rather just against convention. - * So in order to avoid making a system unresponsive the packets are pushed - * into a page and the XDP program is run. This will be extremely slow and we - * push a warning to the user to fix this as soon as possible. Fixing this may - * require resolving the underlying hardware to determine why multiple buffers - * are being received or simply loading the XDP program in the ingress stack - * after the skb is built because there is no advantage to running it here - * anymore. - */ -static struct page *xdp_linearize_page(struct receive_queue *rq, - u16 *num_buf, - struct page *p, - int offset, - unsigned int *len) -{ - struct page *page = alloc_page(GFP_ATOMIC); - unsigned int page_off = VIRTIO_XDP_HEADROOM; - - if (!page) - return NULL; - - memcpy(page_address(page) + page_off, page_address(p) + offset, *len); - page_off += *len; - - while (--*num_buf) { - unsigned int buflen; - void *buf; - int off; - - buf = virtqueue_get_buf(rq->vq, &buflen); - if (unlikely(!buf)) - goto err_buf; - - p = virt_to_head_page(buf); - off = buf - page_address(p); - - /* guard against a misconfigured or uncooperative backend that - * is sending packet larger than the MTU. - */ - if ((page_off + buflen) > PAGE_SIZE) { - put_page(p); - goto err_buf; - } - - memcpy(page_address(page) + page_off, - page_address(p) + off, buflen); - page_off += buflen; - put_page(p); - } - - /* Headroom does not contribute to packet length */ - *len = page_off - VIRTIO_XDP_HEADROOM; - return page; -err_buf: - __free_pages(page, 0); - return NULL; -} - static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -577,6 +603,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, struct sk_buff *head_skb, *curr_skb; struct bpf_prog *xdp_prog; unsigned int truesize; + unsigned int headroom = mergeable_ctx_to_headroom(ctx); head_skb = NULL; @@ -589,10 +616,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, u32 act; /* This happens when rx buffer size is underestimated */ - if (unlikely(num_buf > 1)) { + if (unlikely(num_buf > 1 || + headroom < virtnet_get_headroom(vi))) { /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, &num_buf, - page, offset, &len); + page, offset, + VIRTIO_XDP_HEADROOM, + &len); if (!xdp_page) goto err_xdp; offset = VIRTIO_XDP_HEADROOM; @@ -835,7 +865,6 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp); if (err < 0) put_page(virt_to_head_page(buf)); - return err; } @@ -1840,7 +1869,6 @@ static void virtnet_freeze_down(struct virtio_device *vdev) } static int init_vqs(struct virtnet_info *vi); -static void _remove_vq_common(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { @@ -1869,39 +1897,6 @@ static int virtnet_restore_up(struct virtio_device *vdev) return err; } -static int virtnet_reset(struct virtnet_info *vi, int curr_qp, int xdp_qp) -{ - struct virtio_device *dev = vi->vdev; - int ret; - - virtio_config_disable(dev); - dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; - virtnet_freeze_down(dev); - _remove_vq_common(vi); - - virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); - virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER); - - ret = virtio_finalize_features(dev); - if (ret) - goto err; - - vi->xdp_queue_pairs = xdp_qp; - ret = virtnet_restore_up(dev); - if (ret) - goto err; - ret = _virtnet_set_queues(vi, curr_qp); - if (ret) - goto err; - - virtio_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - virtio_config_enable(dev); - return 0; -err: - virtio_add_status(dev, VIRTIO_CONFIG_S_FAILED); - return ret; -} - static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { @@ -1948,35 +1943,29 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, return PTR_ERR(prog); } - /* Changing the headroom in buffers is a disruptive operation because - * existing buffers must be flushed and reallocated. This will happen - * when a xdp program is initially added or xdp is disabled by removing - * the xdp program resulting in number of XDP queues changing. - */ - if (vi->xdp_queue_pairs != xdp_qp) { - err = virtnet_reset(vi, curr_qp + xdp_qp, xdp_qp); - if (err) { - dev_warn(&dev->dev, "XDP reset failure.\n"); - goto virtio_reset_err; - } - } + /* Make sure NAPI is not using any XDP TX queues for RX. */ + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); + err = _virtnet_set_queues(vi, curr_qp + xdp_qp); + if (err) + goto err; + vi->xdp_queue_pairs = xdp_qp; for (i = 0; i < vi->max_queue_pairs; i++) { old_prog = rtnl_dereference(vi->rq[i].xdp_prog); rcu_assign_pointer(vi->rq[i].xdp_prog, prog); if (old_prog) bpf_prog_put(old_prog); + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); } return 0; -virtio_reset_err: - /* On reset error do our best to unwind XDP changes inflight and return - * error up to user space for resolution. The underlying reset hung on - * us so not much we can do here. - */ +err: + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); if (prog) bpf_prog_sub(prog, vi->max_queue_pairs - 1); return err; @@ -2622,15 +2611,6 @@ free: return err; } -static void _remove_vq_common(struct virtnet_info *vi) -{ - vi->vdev->config->reset(vi->vdev); - free_unused_bufs(vi); - _free_receive_bufs(vi); - free_receive_page_frags(vi); - virtnet_del_vqs(vi); -} - static void remove_vq_common(struct virtnet_info *vi) { vi->vdev->config->reset(vi->vdev); -- cgit v1.2.3-55-g7522 From 3f93522ffab2d46a36b57adf324a54e674fc9536 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 19 Jul 2017 16:54:49 +0800 Subject: virtio-net: switch off offloads on demand if possible on XDP set Current XDP implementation wants guest offloads feature to be disabled on device. This is inconvenient and means guest can't benefit from offloads if XDP is not used. This patch tries to address this limitation by disabling the offloads on demand through control guest offloads. Guest offloads will be disabled and enabled on demand on XDP set. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 70 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 65 insertions(+), 5 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f894713dca2a..d4751ce23b4f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,6 +57,11 @@ DECLARE_EWMA(pkt_len, 0, 64) #define VIRTNET_DRIVER_VERSION "1.0.0" +const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_UFO }; + struct virtnet_stats { struct u64_stats_sync tx_syncp; struct u64_stats_sync rx_syncp; @@ -164,10 +169,13 @@ struct virtnet_info { u8 ctrl_promisc; u8 ctrl_allmulti; u16 ctrl_vid; + u64 ctrl_offloads; /* Ethtool settings */ u8 duplex; u32 speed; + + unsigned long guest_offloads; }; struct padded_vnet_hdr { @@ -1897,6 +1905,47 @@ static int virtnet_restore_up(struct virtio_device *vdev) return err; } +static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) +{ + struct scatterlist sg; + vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); + + sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, + VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { + dev_warn(&vi->dev->dev, "Fail to set guest offload. \n"); + return -EINVAL; + } + + return 0; +} + +static int virtnet_clear_guest_offloads(struct virtnet_info *vi) +{ + u64 offloads = 0; + + if (!vi->guest_offloads) + return 0; + + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) + offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; + + return virtnet_set_guest_offloads(vi, offloads); +} + +static int virtnet_restore_guest_offloads(struct virtnet_info *vi) +{ + u64 offloads = vi->guest_offloads; + + if (!vi->guest_offloads) + return 0; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) + offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; + + return virtnet_set_guest_offloads(vi, offloads); +} + static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { @@ -1906,10 +1955,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, u16 xdp_qp = 0, curr_qp; int i, err; - if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || - virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) { + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) + && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); return -EOPNOTSUPP; } @@ -1956,6 +2006,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, for (i = 0; i < vi->max_queue_pairs; i++) { old_prog = rtnl_dereference(vi->rq[i].xdp_prog); rcu_assign_pointer(vi->rq[i].xdp_prog, prog); + if (i == 0) { + if (!old_prog) + virtnet_clear_guest_offloads(vi); + if (!prog) + virtnet_restore_guest_offloads(vi); + } if (old_prog) bpf_prog_put(old_prog); virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); @@ -2591,6 +2647,10 @@ static int virtnet_probe(struct virtio_device *vdev) netif_carrier_on(dev); } + for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) + if (virtio_has_feature(vi->vdev, guest_offloads[i])) + set_bit(guest_offloads[i], &vi->guest_offloads); + pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); @@ -2687,7 +2747,7 @@ static struct virtio_device_id id_table[] = { VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ VIRTIO_NET_F_CTRL_MAC_ADDR, \ - VIRTIO_NET_F_MTU + VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS static unsigned int features[] = { VIRTNET_FEATURES, -- cgit v1.2.3-55-g7522 From f7ce91038d52780060ccb0d7ee6dd48967b3e132 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 18 Jul 2017 16:43:19 -0500 Subject: net: Convert to using %pOF instead of full_name Now that we have a custom printf format specifier, convert users of full_name to use %pOF instead. This is preparation to remove storing of the full path string for each node. Signed-off-by: Rob Herring Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- drivers/net/ethernet/apple/mace.c | 8 ++-- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 4 +- drivers/net/ethernet/freescale/fec_mpc52xx.c | 4 +- drivers/net/ethernet/freescale/fman/fman.c | 12 +++--- drivers/net/ethernet/freescale/fman/fman_port.c | 4 +- drivers/net/ethernet/freescale/fman/mac.c | 50 +++++++++++------------ drivers/net/ethernet/freescale/fsl_pq_mdio.c | 20 +++++----- drivers/net/ethernet/ibm/ehea/ehea_main.c | 5 +-- drivers/net/ethernet/ibm/emac/core.c | 53 +++++++++++-------------- drivers/net/ethernet/ibm/emac/debug.h | 2 +- drivers/net/ethernet/ibm/emac/mal.c | 8 ++-- drivers/net/ethernet/ibm/emac/rgmii.c | 18 ++++----- drivers/net/ethernet/ibm/emac/tah.c | 12 ++---- drivers/net/ethernet/ibm/emac/zmii.c | 17 ++++---- drivers/net/ethernet/sun/niu.c | 24 +++++------ drivers/net/ethernet/ti/cpsw.c | 8 ++-- drivers/net/ethernet/ti/davinci_emac.c | 4 +- drivers/net/ethernet/xilinx/ll_temac_main.c | 2 +- drivers/net/phy/mdio-mux-mmioreg.c | 18 ++++----- drivers/net/phy/mdio-mux.c | 16 ++++---- 21 files changed, 134 insertions(+), 157 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 7fa19d4a8e13..647d5d45c1d6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2242,7 +2242,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip, if (np) { bus->name = np->full_name; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pOF", np); } else { bus->name = "mv88e6xxx SMI"; snprintf(bus->id, MII_BUS_ID_SIZE, "mv88e6xxx-%d", index++); diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 96dd5300e0e5..e58b157b7d7c 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -114,8 +114,8 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) int j, rev, rc = -EBUSY; if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { - printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n", - mace->full_name); + printk(KERN_ERR "can't use MACE %pOF: need 3 addrs and 3 irqs\n", + mace); return -ENODEV; } @@ -123,8 +123,8 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match) if (addr == NULL) { addr = of_get_property(mace, "local-mac-address", NULL); if (addr == NULL) { - printk(KERN_ERR "Can't get mac-address for MACE %s\n", - mace->full_name); + printk(KERN_ERR "Can't get mac-address for MACE %pOF\n", + mace); return -ENODEV; } } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 757b873735a5..550ea1ec7b6c 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -398,8 +398,8 @@ static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) of_dev = of_find_device_by_node(mac_node); if (!of_dev) { - dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", - mac_node->full_name); + dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n", + mac_node); of_node_put(mac_node); return ERR_PTR(-EINVAL); } diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index aa8cf5d2a53c..6d7269d87a85 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -960,8 +960,8 @@ static int mpc52xx_fec_probe(struct platform_device *op) /* We're done ! */ platform_set_drvdata(op, ndev); - netdev_info(ndev, "%s MAC %pM\n", - op->dev.of_node->full_name, ndev->dev_addr); + netdev_info(ndev, "%pOF MAC %pM\n", + op->dev.of_node, ndev->dev_addr); return 0; diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c index 4aefe2438969..e714b8fa55eb 100644 --- a/drivers/net/ethernet/freescale/fman/fman.c +++ b/drivers/net/ethernet/freescale/fman/fman.c @@ -1925,8 +1925,8 @@ static int fman_reset(struct fman *fman) guts_regs = of_iomap(guts_node, 0); if (!guts_regs) { - dev_err(fman->dev, "%s: Couldn't map %s regs\n", - __func__, guts_node->full_name); + dev_err(fman->dev, "%s: Couldn't map %pOF regs\n", + __func__, guts_node); goto guts_regs; } #define FMAN1_ALL_MACS_MASK 0xFCC00000 @@ -2780,8 +2780,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) err = of_property_read_u32(fm_node, "cell-index", &val); if (err) { - dev_err(&of_dev->dev, "%s: failed to read cell-index for %s\n", - __func__, fm_node->full_name); + dev_err(&of_dev->dev, "%s: failed to read cell-index for %pOF\n", + __func__, fm_node); goto fman_node_put; } fman->dts_params.id = (u8)val; @@ -2834,8 +2834,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev) err = of_property_read_u32_array(fm_node, "fsl,qman-channel-range", &range[0], 2); if (err) { - dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %s\n", - __func__, fm_node->full_name); + dev_err(&of_dev->dev, "%s: failed to read fsl,qman-channel-range for %pOF\n", + __func__, fm_node); goto fman_node_put; } fman->dts_params.qman_channel_base = range[0]; diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c index 57bf44fa16a1..49bfa11f2d20 100644 --- a/drivers/net/ethernet/freescale/fman/fman_port.c +++ b/drivers/net/ethernet/freescale/fman/fman_port.c @@ -1720,8 +1720,8 @@ static int fman_port_probe(struct platform_device *of_dev) err = of_property_read_u32(port_node, "cell-index", &val); if (err) { - dev_err(port->dev, "%s: reading cell-index for %s failed\n", - __func__, port_node->full_name); + dev_err(port->dev, "%s: reading cell-index for %pOF failed\n", + __func__, port_node); err = -EINVAL; goto return_err; } diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 6e67d22fd0d5..14cd2c8b0024 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -698,8 +698,8 @@ static int mac_probe(struct platform_device *_of_dev) priv->internal_phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0); } else { - dev_err(dev, "MAC node (%s) contains unsupported MAC\n", - mac_node->full_name); + dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n", + mac_node); err = -EINVAL; goto _return; } @@ -712,16 +712,15 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the FM node */ dev_node = of_get_parent(mac_node); if (!dev_node) { - dev_err(dev, "of_get_parent(%s) failed\n", - mac_node->full_name); + dev_err(dev, "of_get_parent(%pOF) failed\n", + mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { - dev_err(dev, "of_find_device_by_node(%s) failed\n", - dev_node->full_name); + dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -729,8 +728,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the FMan cell-index */ err = of_property_read_u32(dev_node, "cell-index", &val); if (err) { - dev_err(dev, "failed to read cell-index for %s\n", - dev_node->full_name); + dev_err(dev, "failed to read cell-index for %pOF\n", dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -739,7 +737,7 @@ static int mac_probe(struct platform_device *_of_dev) priv->fman = fman_bind(&of_dev->dev); if (!priv->fman) { - dev_err(dev, "fman_bind(%s) failed\n", dev_node->full_name); + dev_err(dev, "fman_bind(%pOF) failed\n", dev_node); err = -ENODEV; goto _return_of_node_put; } @@ -749,8 +747,8 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the address of the memory mapped registers */ err = of_address_to_resource(mac_node, 0, &res); if (err < 0) { - dev_err(dev, "of_address_to_resource(%s) = %d\n", - mac_node->full_name, err); + dev_err(dev, "of_address_to_resource(%pOF) = %d\n", + mac_node, err); goto _return_dev_set_drvdata; } @@ -784,8 +782,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the cell-index */ err = of_property_read_u32(mac_node, "cell-index", &val); if (err) { - dev_err(dev, "failed to read cell-index for %s\n", - mac_node->full_name); + dev_err(dev, "failed to read cell-index for %pOF\n", mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -794,8 +791,7 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the MAC address */ mac_addr = of_get_mac_address(mac_node); if (!mac_addr) { - dev_err(dev, "of_get_mac_address(%s) failed\n", - mac_node->full_name); + dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -804,15 +800,15 @@ static int mac_probe(struct platform_device *_of_dev) /* Get the port handles */ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); if (unlikely(nph < 0)) { - dev_err(dev, "of_count_phandle_with_args(%s, fsl,fman-ports) failed\n", - mac_node->full_name); + dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n", + mac_node); err = nph; goto _return_dev_set_drvdata; } if (nph != ARRAY_SIZE(mac_dev->port)) { - dev_err(dev, "Not supported number of fman-ports handles of mac node %s from device tree\n", - mac_node->full_name); + dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n", + mac_node); err = -EINVAL; goto _return_dev_set_drvdata; } @@ -821,24 +817,24 @@ static int mac_probe(struct platform_device *_of_dev) /* Find the port node */ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); if (!dev_node) { - dev_err(dev, "of_parse_phandle(%s, fsl,fman-ports) failed\n", - mac_node->full_name); + dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n", + mac_node); err = -EINVAL; goto _return_of_node_put; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { - dev_err(dev, "of_find_device_by_node(%s) failed\n", - dev_node->full_name); + dev_err(dev, "of_find_device_by_node(%pOF) failed\n", + dev_node); err = -EINVAL; goto _return_of_node_put; } mac_dev->port[i] = fman_port_bind(&of_dev->dev); if (!mac_dev->port[i]) { - dev_err(dev, "dev_get_drvdata(%s) failed\n", - dev_node->full_name); + dev_err(dev, "dev_get_drvdata(%pOF) failed\n", + dev_node); err = -EINVAL; goto _return_of_node_put; } @@ -849,8 +845,8 @@ static int mac_probe(struct platform_device *_of_dev) phy_if = of_get_phy_mode(mac_node); if (phy_if < 0) { dev_warn(dev, - "of_get_phy_mode() for %s failed. Defaulting to SGMII\n", - mac_node->full_name); + "of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n", + mac_node); phy_if = PHY_INTERFACE_MODE_SGMII; } priv->phy_if = phy_if; diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index a10de1e9c157..80ad16acf0f1 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -267,8 +267,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) ret = of_address_to_resource(np, 0, &res); if (ret < 0) { - pr_debug("fsl-pq-mdio: no address range in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: no address range in node %pOF\n", + np); continue; } @@ -280,8 +280,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) if (!iprop) { iprop = of_get_property(np, "device-id", NULL); if (!iprop) { - pr_debug("fsl-pq-mdio: no UCC ID in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: no UCC ID in node %pOF\n", + np); continue; } } @@ -293,8 +293,8 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) * numbered from 1, not 0. */ if (ucc_set_qe_mux_mii_mng(id - 1) < 0) { - pr_debug("fsl-pq-mdio: invalid UCC ID in node %s\n", - np->full_name); + pr_debug("fsl-pq-mdio: invalid UCC ID in node %pOF\n", + np); continue; } @@ -442,8 +442,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (data->get_tbipa) { for_each_child_of_node(np, tbi) { if (strcmp(tbi->type, "tbi-phy") == 0) { - dev_dbg(&pdev->dev, "found TBI PHY node %s\n", - strrchr(tbi->full_name, '/') + 1); + dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n", + tbi); break; } } @@ -454,8 +454,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (!prop) { dev_err(&pdev->dev, - "missing 'reg' property in node %s\n", - tbi->full_name); + "missing 'reg' property in node %pOF\n", + tbi); err = -EBUSY; goto error; } diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index b9d310f20bcc..4878b7169e0f 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3102,8 +3102,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter) dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", NULL); if (!dn_log_port_id) { - pr_err("bad device node: eth_dn name=%s\n", - eth_dn->full_name); + pr_err("bad device node: eth_dn name=%pOF\n", eth_dn); continue; } @@ -3425,7 +3424,7 @@ static int ehea_probe_adapter(struct platform_device *dev) if (!adapter->handle) { dev_err(&dev->dev, "failed getting handle for adapter" - " '%s'\n", dev->dev.of_node->full_name); + " '%pOF'\n", dev->dev.of_node); ret = -ENODEV; goto out_free_ad; } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 259e69a52ec5..95135d20458f 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -133,8 +133,7 @@ static inline void emac_report_timeout_error(struct emac_instance *dev, EMAC_FTR_440EP_PHY_CLK_FIX)) DBG(dev, "%s" NL, error); else if (net_ratelimit()) - printk(KERN_ERR "%s: %s\n", dev->ofdev->dev.of_node->full_name, - error); + printk(KERN_ERR "%pOF: %s\n", dev->ofdev->dev.of_node, error); } /* EMAC PHY clock workaround: @@ -2258,8 +2257,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev, strlcpy(info->driver, "ibm_emac", sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %s", - dev->cell_index, dev->ofdev->dev.of_node->full_name); + snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF", + dev->cell_index, dev->ofdev->dev.of_node); } static const struct ethtool_ops emac_ethtool_ops = { @@ -2431,8 +2430,8 @@ static int emac_read_uint_prop(struct device_node *np, const char *name, const u32 *prop = of_get_property(np, name, &len); if (prop == NULL || len < sizeof(u32)) { if (fatal) - printk(KERN_ERR "%s: missing %s property\n", - np->full_name, name); + printk(KERN_ERR "%pOF: missing %s property\n", + np, name); return -ENODEV; } *val = *prop; @@ -2768,7 +2767,7 @@ static int emac_init_phy(struct emac_instance *dev) #endif mutex_unlock(&emac_phy_map_lock); if (i == 0x20) { - printk(KERN_WARNING "%s: can't find PHY!\n", np->full_name); + printk(KERN_WARNING "%pOF: can't find PHY!\n", np); return -ENXIO; } @@ -2894,8 +2893,8 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_NO_FLOW_CTRL dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x; #else - printk(KERN_ERR "%s: Flow control not disabled!\n", - np->full_name); + printk(KERN_ERR "%pOF: Flow control not disabled!\n", + np); return -ENXIO; #endif } @@ -2918,8 +2917,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_TAH dev->features |= EMAC_FTR_HAS_TAH; #else - printk(KERN_ERR "%s: TAH support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: TAH support not enabled !\n", np); return -ENXIO; #endif } @@ -2928,8 +2926,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_ZMII dev->features |= EMAC_FTR_HAS_ZMII; #else - printk(KERN_ERR "%s: ZMII support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: ZMII support not enabled !\n", np); return -ENXIO; #endif } @@ -2938,8 +2935,7 @@ static int emac_init_config(struct emac_instance *dev) #ifdef CONFIG_IBM_EMAC_RGMII dev->features |= EMAC_FTR_HAS_RGMII; #else - printk(KERN_ERR "%s: RGMII support not enabled !\n", - np->full_name); + printk(KERN_ERR "%pOF: RGMII support not enabled !\n", np); return -ENXIO; #endif } @@ -2947,8 +2943,8 @@ static int emac_init_config(struct emac_instance *dev) /* Read MAC-address */ p = of_get_property(np, "local-mac-address", NULL); if (p == NULL) { - printk(KERN_ERR "%s: Can't find local-mac-address property\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't find local-mac-address property\n", + np); return -ENXIO; } memcpy(dev->ndev->dev_addr, p, ETH_ALEN); @@ -3043,23 +3039,21 @@ static int emac_probe(struct platform_device *ofdev) dev->emac_irq = irq_of_parse_and_map(np, 0); dev->wol_irq = irq_of_parse_and_map(np, 1); if (!dev->emac_irq) { - printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name); + printk(KERN_ERR "%pOF: Can't map main interrupt\n", np); goto err_free; } ndev->irq = dev->emac_irq; /* Map EMAC regs */ if (of_address_to_resource(np, 0, &dev->rsrc_regs)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_irq_unmap; } // TODO : request_mem_region dev->emacp = ioremap(dev->rsrc_regs.start, resource_size(&dev->rsrc_regs)); if (dev->emacp == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); err = -ENOMEM; goto err_irq_unmap; } @@ -3068,8 +3062,7 @@ static int emac_probe(struct platform_device *ofdev) err = emac_wait_deps(dev); if (err) { printk(KERN_ERR - "%s: Timeout waiting for dependent devices\n", - np->full_name); + "%pOF: Timeout waiting for dependent devices\n", np); /* display more info about what's missing ? */ goto err_reg_unmap; } @@ -3084,8 +3077,8 @@ static int emac_probe(struct platform_device *ofdev) dev->commac.rx_chan_mask = MAL_CHAN_MASK(dev->mal_rx_chan); err = mal_register_commac(dev->mal, &dev->commac); if (err) { - printk(KERN_ERR "%s: failed to register with mal %s!\n", - np->full_name, dev->mal_dev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: failed to register with mal %pOF!\n", + np, dev->mal_dev->dev.of_node); goto err_rel_deps; } dev->rx_skb_size = emac_rx_skb_size(ndev->mtu); @@ -3161,8 +3154,8 @@ static int emac_probe(struct platform_device *ofdev) err = register_netdev(ndev); if (err) { - printk(KERN_ERR "%s: failed to register net device (%d)!\n", - np->full_name, err); + printk(KERN_ERR "%pOF: failed to register net device (%d)!\n", + np, err); goto err_detach_tah; } @@ -3176,8 +3169,8 @@ static int emac_probe(struct platform_device *ofdev) wake_up_all(&emac_probe_wait); - printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n", - ndev->name, dev->cell_index, np->full_name, ndev->dev_addr); + printk(KERN_INFO "%s: EMAC-%d %pOF, MAC %pM\n", + ndev->name, dev->cell_index, np, ndev->dev_addr); if (dev->phy_mode == PHY_MODE_SGMII) printk(KERN_NOTICE "%s: in SGMII mode\n", ndev->name); diff --git a/drivers/net/ethernet/ibm/emac/debug.h b/drivers/net/ethernet/ibm/emac/debug.h index 5bdfc174a07e..9d06d3be3161 100644 --- a/drivers/net/ethernet/ibm/emac/debug.h +++ b/drivers/net/ethernet/ibm/emac/debug.h @@ -31,7 +31,7 @@ #endif #define EMAC_DBG(d, name, fmt, arg...) \ - printk(KERN_DEBUG #name "%s: " fmt, d->ofdev->dev.of_node->full_name, ## arg) + printk(KERN_DEBUG #name "%pOF: " fmt, d->ofdev->dev.of_node, ## arg) #if DBG_LEVEL > 0 # define DBG(d,f,x...) EMAC_DBG(d, emac, f, ##x) diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c index 91b1a558f37d..2c74baa2398a 100644 --- a/drivers/net/ethernet/ibm/emac/mal.c +++ b/drivers/net/ethernet/ibm/emac/mal.c @@ -579,8 +579,8 @@ static int mal_probe(struct platform_device *ofdev) mal->features |= (MAL_FTR_CLEAR_ICINTSTAT | MAL_FTR_COMMON_ERR_INT); #else - printk(KERN_ERR "%s: Support for 405EZ not enabled!\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: Support for 405EZ not enabled!\n", + ofdev->dev.of_node); err = -ENODEV; goto fail; #endif @@ -687,8 +687,8 @@ static int mal_probe(struct platform_device *ofdev) mal_enable_eob_irq(mal); printk(KERN_INFO - "MAL v%d %s, %d TX channels, %d RX channels\n", - mal->version, ofdev->dev.of_node->full_name, + "MAL v%d %pOF, %d TX channels, %d RX channels\n", + mal->version, ofdev->dev.of_node, mal->num_tx_chans, mal->num_rx_chans); /* Advertise this instance to the rest of the world */ diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 206ccbbae7bb..c4a1ac38bba8 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -104,8 +104,8 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) /* Check if we need to attach to a RGMII */ if (input < 0 || !rgmii_valid_mode(mode)) { - printk(KERN_ERR "%s: unsupported settings !\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: unsupported settings !\n", + ofdev->dev.of_node); return -ENODEV; } @@ -114,8 +114,8 @@ int rgmii_attach(struct platform_device *ofdev, int input, int mode) /* Enable this input */ out_be32(&p->fer, in_be32(&p->fer) | rgmii_mode_mask(mode, input)); - printk(KERN_NOTICE "%s: input %d in %s mode\n", - ofdev->dev.of_node->full_name, input, rgmii_mode_name(mode)); + printk(KERN_NOTICE "%pOF: input %d in %s mode\n", + ofdev->dev.of_node, input, rgmii_mode_name(mode)); ++dev->users; @@ -249,8 +249,7 @@ static int rgmii_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -258,8 +257,7 @@ static int rgmii_probe(struct platform_device *ofdev) dev->base = (struct rgmii_regs __iomem *)ioremap(regs.start, sizeof(struct rgmii_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -278,8 +276,8 @@ static int rgmii_probe(struct platform_device *ofdev) out_be32(&dev->base->fer, 0); printk(KERN_INFO - "RGMII %s initialized with%s MDIO support\n", - ofdev->dev.of_node->full_name, + "RGMII %pOF initialized with%s MDIO support\n", + ofdev->dev.of_node, (dev->flags & EMAC_RGMII_FLAG_HAS_MDIO) ? "" : "out"); wmb(); diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c index 32cb6c9007c5..9912456dca48 100644 --- a/drivers/net/ethernet/ibm/emac/tah.c +++ b/drivers/net/ethernet/ibm/emac/tah.c @@ -58,8 +58,7 @@ void tah_reset(struct platform_device *ofdev) --n; if (unlikely(!n)) - printk(KERN_ERR "%s: reset timeout\n", - ofdev->dev.of_node->full_name); + printk(KERN_ERR "%pOF: reset timeout\n", ofdev->dev.of_node); /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */ out_be32(&p->mr, @@ -105,8 +104,7 @@ static int tah_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -114,8 +112,7 @@ static int tah_probe(struct platform_device *ofdev) dev->base = (struct tah_regs __iomem *)ioremap(regs.start, sizeof(struct tah_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -124,8 +121,7 @@ static int tah_probe(struct platform_device *ofdev) /* Initialize TAH and enable IPv4 checksum verification, no TSO yet */ tah_reset(ofdev); - printk(KERN_INFO - "TAH %s initialized\n", ofdev->dev.of_node->full_name); + printk(KERN_INFO "TAH %pOF initialized\n", ofdev->dev.of_node); wmb(); return 0; diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c index 8727b865ea02..89c42d362292 100644 --- a/drivers/net/ethernet/ibm/emac/zmii.c +++ b/drivers/net/ethernet/ibm/emac/zmii.c @@ -121,15 +121,15 @@ int zmii_attach(struct platform_device *ofdev, int input, int *mode) } else dev->mode = *mode; - printk(KERN_NOTICE "%s: bridge in %s mode\n", - ofdev->dev.of_node->full_name, + printk(KERN_NOTICE "%pOF: bridge in %s mode\n", + ofdev->dev.of_node, zmii_mode_name(dev->mode)); } else { /* All inputs must use the same mode */ if (*mode != PHY_MODE_NA && *mode != dev->mode) { printk(KERN_ERR - "%s: invalid mode %d specified for input %d\n", - ofdev->dev.of_node->full_name, *mode, input); + "%pOF: invalid mode %d specified for input %d\n", + ofdev->dev.of_node, *mode, input); mutex_unlock(&dev->lock); return -EINVAL; } @@ -250,8 +250,7 @@ static int zmii_probe(struct platform_device *ofdev) rc = -ENXIO; if (of_address_to_resource(np, 0, ®s)) { - printk(KERN_ERR "%s: Can't get registers address\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't get registers address\n", np); goto err_free; } @@ -259,8 +258,7 @@ static int zmii_probe(struct platform_device *ofdev) dev->base = (struct zmii_regs __iomem *)ioremap(regs.start, sizeof(struct zmii_regs)); if (dev->base == NULL) { - printk(KERN_ERR "%s: Can't map device registers!\n", - np->full_name); + printk(KERN_ERR "%pOF: Can't map device registers!\n", np); goto err_free; } @@ -270,8 +268,7 @@ static int zmii_probe(struct platform_device *ofdev) /* Disable all inputs by default */ out_be32(&dev->base->fer, 0); - printk(KERN_INFO - "ZMII %s initialized\n", ofdev->dev.of_node->full_name); + printk(KERN_INFO "ZMII %pOF initialized\n", ofdev->dev.of_node); wmb(); platform_set_drvdata(ofdev, dev); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4bb04aaf9650..6a4e8e1bbd90 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9221,8 +9221,7 @@ static int niu_get_of_props(struct niu *np) phy_type = of_get_property(dp, "phy-type", &prop_len); if (!phy_type) { - netdev_err(dev, "%s: OF node lacks phy-type property\n", - dp->full_name); + netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); return -EINVAL; } @@ -9232,26 +9231,25 @@ static int niu_get_of_props(struct niu *np) strcpy(np->vpd.phy_type, phy_type); if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { - netdev_err(dev, "%s: Illegal phy string [%s]\n", - dp->full_name, np->vpd.phy_type); + netdev_err(dev, "%pOF: Illegal phy string [%s]\n", + dp, np->vpd.phy_type); return -EINVAL; } mac_addr = of_get_property(dp, "local-mac-address", &prop_len); if (!mac_addr) { - netdev_err(dev, "%s: OF node lacks local-mac-address property\n", - dp->full_name); + netdev_err(dev, "%pOF: OF node lacks local-mac-address property\n", + dp); return -EINVAL; } if (prop_len != dev->addr_len) { - netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n", - dp->full_name, prop_len); + netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n", + dp, prop_len); } memcpy(dev->dev_addr, mac_addr, dev->addr_len); if (!is_valid_ether_addr(&dev->dev_addr[0])) { - netdev_err(dev, "%s: OF MAC address is invalid\n", - dp->full_name); - netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->dev_addr); + netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp); + netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr); return -EINVAL; } @@ -10027,8 +10025,8 @@ static int niu_of_probe(struct platform_device *op) reg = of_get_property(op->dev.of_node, "reg", NULL); if (!reg) { - dev_err(&op->dev, "%s: No 'reg' property, aborting\n", - op->dev.of_node->full_name); + dev_err(&op->dev, "%pOF: No 'reg' property, aborting\n", + op->dev.of_node); return -ENODEV; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index badd0a8caeb9..c8776dbf1a55 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1321,8 +1321,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) phy = of_phy_connect(priv->ndev, slave->data->phy_node, &cpsw_adjust_link, 0, slave->data->phy_if); if (!phy) { - dev_err(priv->dev, "phy \"%s\" not found on slave %d\n", - slave->data->phy_node->full_name, + dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n", + slave->data->phy_node, slave->slave_num); return; } @@ -2670,8 +2670,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, parp = of_get_property(slave_node, "phy_id", &lenp); if (slave_data->phy_node) { dev_dbg(&pdev->dev, - "slave[%d] using phy-handle=\"%s\"\n", - i, slave_data->phy_node->full_name); + "slave[%d] using phy-handle=\"%pOF\"\n", + i, slave_data->phy_node); } else if (of_phy_is_fixed_link(slave_node)) { /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 64d5527feb2a..4bb561856af5 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1480,8 +1480,8 @@ static int emac_dev_open(struct net_device *ndev) phydev = of_phy_connect(ndev, priv->phy_node, &emac_adjust_link, 0, 0); if (!phydev) { - dev_err(emac_dev, "could not connect to phy %s\n", - priv->phy_node->full_name); + dev_err(emac_dev, "could not connect to phy %pOF\n", + priv->phy_node); ret = -ENODEV; goto err; } diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index d73da8afe08e..60abc9250f56 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1089,7 +1089,7 @@ static int temac_of_probe(struct platform_device *op) lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); if (lp->phy_node) - dev_dbg(lp->dev, "using PHY node %s (%p)\n", np->full_name, np); + dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np); /* Add the device attributes */ rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c index 6a33646bdf05..c3825c7da038 100644 --- a/drivers/net/phy/mdio-mux-mmioreg.c +++ b/drivers/net/phy/mdio-mux-mmioreg.c @@ -105,7 +105,7 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) const __be32 *iprop; int len, ret; - dev_dbg(&pdev->dev, "probing node %s\n", np->full_name); + dev_dbg(&pdev->dev, "probing node %pOF\n", np); s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) @@ -113,8 +113,8 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) ret = of_address_to_resource(np, 0, &res); if (ret) { - dev_err(&pdev->dev, "could not obtain memory map for node %s\n", - np->full_name); + dev_err(&pdev->dev, "could not obtain memory map for node %pOF\n", + np); return ret; } s->phys = res.start; @@ -145,15 +145,15 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) for_each_available_child_of_node(np, np2) { iprop = of_get_property(np2, "reg", &len); if (!iprop || len != sizeof(uint32_t)) { - dev_err(&pdev->dev, "mdio-mux child node %s is " - "missing a 'reg' property\n", np2->full_name); + dev_err(&pdev->dev, "mdio-mux child node %pOF is " + "missing a 'reg' property\n", np2); of_node_put(np2); return -ENODEV; } if (be32_to_cpup(iprop) & ~s->mask) { - dev_err(&pdev->dev, "mdio-mux child node %s has " + dev_err(&pdev->dev, "mdio-mux child node %pOF has " "a 'reg' value with unmasked bits\n", - np2->full_name); + np2); of_node_put(np2); return -ENODEV; } @@ -162,8 +162,8 @@ static int mdio_mux_mmioreg_probe(struct platform_device *pdev) ret = mdio_mux_init(&pdev->dev, mdio_mux_mmioreg_switch_fn, &s->mux_handle, s, NULL); if (ret) { - dev_err(&pdev->dev, "failed to register mdio-mux bus %s\n", - np->full_name); + dev_err(&pdev->dev, "failed to register mdio-mux bus %pOF\n", + np); return ret; } diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index c608e1dfaf09..942ceaf3fd3f 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -138,16 +138,16 @@ int mdio_mux_init(struct device *dev, r = of_property_read_u32(child_bus_node, "reg", &v); if (r) { dev_err(dev, - "Error: Failed to find reg for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to find reg for child %pOF\n", + child_bus_node); continue; } cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); if (cb == NULL) { dev_err(dev, - "Error: Failed to allocate memory for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to allocate memory for child %pOF\n", + child_bus_node); ret_val = -ENOMEM; continue; } @@ -157,8 +157,8 @@ int mdio_mux_init(struct device *dev, cb->mii_bus = mdiobus_alloc(); if (!cb->mii_bus) { dev_err(dev, - "Error: Failed to allocate MDIO bus for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to allocate MDIO bus for child %pOF\n", + child_bus_node); ret_val = -ENOMEM; devm_kfree(dev, cb); continue; @@ -174,8 +174,8 @@ int mdio_mux_init(struct device *dev, r = of_mdiobus_register(cb->mii_bus, child_bus_node); if (r) { dev_err(dev, - "Error: Failed to register MDIO bus for child %s\n", - of_node_full_name(child_bus_node)); + "Error: Failed to register MDIO bus for child %pOF\n", + child_bus_node); mdiobus_free(cb->mii_bus); devm_kfree(dev, cb); } else { -- cgit v1.2.3-55-g7522 From 5b861f6baa3a22a48d7a4ad0ce38a223d36c978a Mon Sep 17 00:00:00 2001 From: Girish Moodalbail Date: Thu, 20 Jul 2017 22:44:20 -0700 Subject: geneve: add rtnl changelink support This patch adds changelink rtnl operation support for geneve devices and the code changes involve: - added geneve_quiesce() which quiesces the geneve device data path for both TX and RX. This lets us perform the changelink operation atomically w.r.t data path. Also added geneve_unquiesce() to reverse the operation of geneve_quiesce(). - refactor geneve_newlink into geneve_nl2info to be used by both geneve_newlink and geneve_changelink - geneve_nl2info takes a changelink boolean argument to isolate changelink checks. - Allow changing only a few attributes (ttl, tos, and remote tunnel endpoint IP address (within the same address family)): - return -EOPNOTSUPP for attributes that cannot be changed for now. Incremental patches can make the non-supported one available in the future if needed. Signed-off-by: Girish Moodalbail Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- drivers/net/geneve.c | 218 +++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 176 insertions(+), 42 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index de8156c6b292..0436a4283059 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -715,6 +715,7 @@ free_dst: static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, + struct geneve_sock *gs4, struct flowi4 *fl4, const struct ip_tunnel_info *info) { @@ -724,7 +725,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct rtable *rt = NULL; __u8 tos; - if (!rcu_dereference(geneve->sock4)) + if (!gs4) return ERR_PTR(-EIO); memset(fl4, 0, sizeof(*fl4)); @@ -764,6 +765,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, + struct geneve_sock *gs6, struct flowi6 *fl6, const struct ip_tunnel_info *info) { @@ -771,10 +773,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct geneve_dev *geneve = netdev_priv(dev); struct dst_entry *dst = NULL; struct dst_cache *dst_cache; - struct geneve_sock *gs6; __u8 prio; - gs6 = rcu_dereference(geneve->sock6); if (!gs6) return ERR_PTR(-EIO); @@ -827,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 df; int err; - rt = geneve_get_v4_rt(skb, dev, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -866,7 +866,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -951,8 +951,9 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); - rt = geneve_get_v4_rt(skb, dev, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -962,8 +963,9 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); - dst = geneve_get_v6_dst(skb, dev, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1140,6 +1142,15 @@ static bool is_tnl_info_zero(const struct ip_tunnel_info *info) return true; } +static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, + struct ip_tunnel_info *b) +{ + if (ip_tunnel_info_af(a) == AF_INET) + return a->key.u.ipv4.dst == b->key.u.ipv4.dst; + else + return ipv6_addr_equal(&a->key.u.ipv6.dst, &b->key.u.ipv6.dst); +} + static int geneve_configure(struct net *net, struct net_device *dev, const struct ip_tunnel_info *info, bool metadata, bool ipv6_rx_csum) @@ -1197,24 +1208,22 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) info->key.tp_dst = htons(dst_port); } -static int geneve_newlink(struct net *net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) +static int geneve_nl2info(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], struct ip_tunnel_info *info, + bool *metadata, bool *use_udp6_rx_checksums, + bool changelink) { - bool use_udp6_rx_checksums = false; - struct ip_tunnel_info info; - bool metadata = false; - - init_tnl_info(&info, GENEVE_UDP_PORT); - if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) return -EINVAL; if (data[IFLA_GENEVE_REMOTE]) { - info.key.u.ipv4.dst = + if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) + return -EOPNOTSUPP; + + info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); - if (IN_MULTICAST(ntohl(info.key.u.ipv4.dst))) { + if (IN_MULTICAST(ntohl(info->key.u.ipv4.dst))) { netdev_dbg(dev, "multicast remote is unsupported\n"); return -EINVAL; } @@ -1222,21 +1231,24 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_REMOTE6]) { #if IS_ENABLED(CONFIG_IPV6) - info.mode = IP_TUNNEL_INFO_IPV6; - info.key.u.ipv6.dst = + if (changelink && (ip_tunnel_info_af(info) == AF_INET)) + return -EOPNOTSUPP; + + info->mode = IP_TUNNEL_INFO_IPV6; + info->key.u.ipv6.dst = nla_get_in6_addr(data[IFLA_GENEVE_REMOTE6]); - if (ipv6_addr_type(&info.key.u.ipv6.dst) & + if (ipv6_addr_type(&info->key.u.ipv6.dst) & IPV6_ADDR_LINKLOCAL) { netdev_dbg(dev, "link-local remote is unsupported\n"); return -EINVAL; } - if (ipv6_addr_is_multicast(&info.key.u.ipv6.dst)) { + if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) { netdev_dbg(dev, "multicast remote is unsupported\n"); return -EINVAL; } - info.key.tun_flags |= TUNNEL_CSUM; - use_udp6_rx_checksums = true; + info->key.tun_flags |= TUNNEL_CSUM; + *use_udp6_rx_checksums = true; #else return -EPFNOSUPPORT; #endif @@ -1245,48 +1257,169 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_ID]) { __u32 vni; __u8 tvni[3]; + __be64 tunid; vni = nla_get_u32(data[IFLA_GENEVE_ID]); tvni[0] = (vni & 0x00ff0000) >> 16; tvni[1] = (vni & 0x0000ff00) >> 8; tvni[2] = vni & 0x000000ff; - info.key.tun_id = vni_to_tunnel_id(tvni); + tunid = vni_to_tunnel_id(tvni); + if (changelink && (tunid != info->key.tun_id)) + return -EOPNOTSUPP; + info->key.tun_id = tunid; } + if (data[IFLA_GENEVE_TTL]) - info.key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); + info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]); if (data[IFLA_GENEVE_TOS]) - info.key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); if (data[IFLA_GENEVE_LABEL]) { - info.key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & + info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; - if (info.key.label && (!(info.mode & IP_TUNNEL_INFO_IPV6))) + if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) return -EINVAL; } - if (data[IFLA_GENEVE_PORT]) - info.key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); + if (data[IFLA_GENEVE_PORT]) { + if (changelink) + return -EOPNOTSUPP; + info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); + } - if (data[IFLA_GENEVE_COLLECT_METADATA]) - metadata = true; + if (data[IFLA_GENEVE_COLLECT_METADATA]) { + if (changelink) + return -EOPNOTSUPP; + *metadata = true; + } - if (data[IFLA_GENEVE_UDP_CSUM] && - nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) - info.key.tun_flags |= TUNNEL_CSUM; + if (data[IFLA_GENEVE_UDP_CSUM]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) + info->key.tun_flags |= TUNNEL_CSUM; + } - if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && - nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) - info.key.tun_flags &= ~TUNNEL_CSUM; + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) + info->key.tun_flags &= ~TUNNEL_CSUM; + } - if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] && - nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) - use_udp6_rx_checksums = false; + if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { + if (changelink) + return -EOPNOTSUPP; + if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) + *use_udp6_rx_checksums = false; + } + + return 0; +} + +static int geneve_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + bool use_udp6_rx_checksums = false; + struct ip_tunnel_info info; + bool metadata = false; + int err; + + init_tnl_info(&info, GENEVE_UDP_PORT); + err = geneve_nl2info(dev, tb, data, &info, &metadata, + &use_udp6_rx_checksums, false); + if (err) + return err; return geneve_configure(net, dev, &info, metadata, use_udp6_rx_checksums); } +/* Quiesces the geneve device data path for both TX and RX. + * + * On transmit geneve checks for non-NULL geneve_sock before it proceeds. + * So, if we set that socket to NULL under RCU and wait for synchronize_net() + * to complete for the existing set of in-flight packets to be transmitted, + * then we would have quiesced the transmit data path. All the future packets + * will get dropped until we unquiesce the data path. + * + * On receive geneve dereference the geneve_sock stashed in the socket. So, + * if we set that to NULL under RCU and wait for synchronize_net() to + * complete, then we would have quiesced the receive data path. + */ +static void geneve_quiesce(struct geneve_dev *geneve, struct geneve_sock **gs4, + struct geneve_sock **gs6) +{ + *gs4 = rtnl_dereference(geneve->sock4); + rcu_assign_pointer(geneve->sock4, NULL); + if (*gs4) + rcu_assign_sk_user_data((*gs4)->sock->sk, NULL); +#if IS_ENABLED(CONFIG_IPV6) + *gs6 = rtnl_dereference(geneve->sock6); + rcu_assign_pointer(geneve->sock6, NULL); + if (*gs6) + rcu_assign_sk_user_data((*gs6)->sock->sk, NULL); +#else + *gs6 = NULL; +#endif + synchronize_net(); +} + +/* Resumes the geneve device data path for both TX and RX. */ +static void geneve_unquiesce(struct geneve_dev *geneve, struct geneve_sock *gs4, + struct geneve_sock __maybe_unused *gs6) +{ + rcu_assign_pointer(geneve->sock4, gs4); + if (gs4) + rcu_assign_sk_user_data(gs4->sock->sk, gs4); +#if IS_ENABLED(CONFIG_IPV6) + rcu_assign_pointer(geneve->sock6, gs6); + if (gs6) + rcu_assign_sk_user_data(gs6->sock->sk, gs6); +#endif + synchronize_net(); +} + +static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct geneve_dev *geneve = netdev_priv(dev); + struct geneve_sock *gs4, *gs6; + struct ip_tunnel_info info; + bool metadata; + bool use_udp6_rx_checksums; + int err; + + /* If the geneve device is configured for metadata (or externally + * controlled, for example, OVS), then nothing can be changed. + */ + if (geneve->collect_md) + return -EOPNOTSUPP; + + /* Start with the existing info. */ + memcpy(&info, &geneve->info, sizeof(info)); + metadata = geneve->collect_md; + use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; + err = geneve_nl2info(dev, tb, data, &info, &metadata, + &use_udp6_rx_checksums, true); + if (err) + return err; + + if (!geneve_dst_addr_equal(&geneve->info, &info)) + dst_cache_reset(&info.dst_cache); + + geneve_quiesce(geneve, &gs4, &gs6); + geneve->info = info; + geneve->collect_md = metadata; + geneve->use_udp6_rx_checksums = use_udp6_rx_checksums; + geneve_unquiesce(geneve, gs4, gs6); + + return 0; +} + static void geneve_dellink(struct net_device *dev, struct list_head *head) { struct geneve_dev *geneve = netdev_priv(dev); @@ -1375,6 +1508,7 @@ static struct rtnl_link_ops geneve_link_ops __read_mostly = { .setup = geneve_setup, .validate = geneve_validate, .newlink = geneve_newlink, + .changelink = geneve_changelink, .dellink = geneve_dellink, .get_size = geneve_get_size, .fill_info = geneve_fill_info, -- cgit v1.2.3-55-g7522 From d764a122cc7af7ab1c40c08745f0fcd33cc2f7db Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 21 Jul 2017 12:49:28 +0200 Subject: net: add new netdevice feature for offload of RX port for UDP tunnels This adds a new netdevice feature, so that the offloading of RX port for UDP tunnels can be disabled by the administrator on some netdevices, using the "rx-udp_tunnel-port-offload" feature in ethtool. This feature is set for all devices that provide ndo_udp_tunnel_add. Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- include/linux/netdev_features.h | 2 ++ net/core/dev.c | 6 ++++++ net/core/ethtool.c | 1 + 3 files changed, 9 insertions(+) diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index ebd273627334..dc8b4896b77b 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -75,6 +75,7 @@ enum { NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */ NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ + NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ /* * Add your fresh new feature above and remember to update @@ -138,6 +139,7 @@ enum { #define NETIF_F_HW_TC __NETIF_F(HW_TC) #define NETIF_F_HW_ESP __NETIF_F(HW_ESP) #define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM) +#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT) #define for_each_netdev_feature(mask_addr, bit) \ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) diff --git a/net/core/dev.c b/net/core/dev.c index 509af6ce8831..9081134adc0d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7530,6 +7530,12 @@ int register_netdevice(struct net_device *dev) */ dev->hw_features |= NETIF_F_SOFT_FEATURES; dev->features |= NETIF_F_SOFT_FEATURES; + + if (dev->netdev_ops->ndo_udp_tunnel_add) { + dev->features |= NETIF_F_RX_UDP_TUNNEL_PORT; + dev->hw_features |= NETIF_F_RX_UDP_TUNNEL_PORT; + } + dev->wanted_features = dev->features & dev->hw_features; if (!(dev->flags & IFF_LOOPBACK)) diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 78408ab77a10..b987bc475fc8 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -105,6 +105,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_HW_TC_BIT] = "hw-tc-offload", [NETIF_F_HW_ESP_BIT] = "esp-hw-offload", [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload", + [NETIF_F_RX_UDP_TUNNEL_PORT_BIT] = "rx-udp_tunnel-port-offload", }; static const char -- cgit v1.2.3-55-g7522 From 7a27fc6d536b36a29c0ed4bfff7035420f4df216 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 21 Jul 2017 12:49:29 +0200 Subject: net: check UDP tunnel RX port offload feature before calling tunnel ndo ndo If NETIF_F_RX_UDP_TUNNEL_PORT was disabled on a given netdevice, skip the tunnel offload ndo call during tunnel port creation and deletion. Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/ipv4/udp_tunnel.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 58bd39fb14b4..0d3f14cdc524 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -82,7 +82,8 @@ void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, struct sock *sk = sock->sk; struct udp_tunnel_info ti; - if (!dev->netdev_ops->ndo_udp_tunnel_add) + if (!dev->netdev_ops->ndo_udp_tunnel_add || + !(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) return; ti.type = type; @@ -109,6 +110,8 @@ void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type) for_each_netdev_rcu(net, dev) { if (!dev->netdev_ops->ndo_udp_tunnel_add) continue; + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + continue; dev->netdev_ops->ndo_udp_tunnel_add(dev, &ti); } rcu_read_unlock(); @@ -131,6 +134,8 @@ void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type) for_each_netdev_rcu(net, dev) { if (!dev->netdev_ops->ndo_udp_tunnel_del) continue; + if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + continue; dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); } rcu_read_unlock(); -- cgit v1.2.3-55-g7522 From 296d8ee37c50f139d934bdefbab85509b2e4a525 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 21 Jul 2017 12:49:30 +0200 Subject: net: add infrastructure to un-offload UDP tunnel port This adds a new NETDEV_UDP_TUNNEL_DROP_INFO event, similar to NETDEV_UDP_TUNNEL_PUSH_INFO, to signal to un-offload ports. This also adds udp_tunnel_drop_rx_port(), which calls ndo_udp_tunnel_del. Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + include/net/udp_tunnel.h | 8 ++++++++ net/ipv4/udp_tunnel.c | 18 ++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 614642eb7eb7..3a3cdc1b1f31 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2317,6 +2317,7 @@ struct netdev_lag_lower_state_info { #define NETDEV_PRECHANGEUPPER 0x001A #define NETDEV_CHANGELOWERSTATE 0x001B #define NETDEV_UDP_TUNNEL_PUSH_INFO 0x001C +#define NETDEV_UDP_TUNNEL_DROP_INFO 0x001D #define NETDEV_CHANGE_TX_QUEUE_LEN 0x001E int register_netdevice_notifier(struct notifier_block *nb); diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 02c5be037451..10cce0dd4450 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -115,6 +115,8 @@ struct udp_tunnel_info { /* Notify network devices of offloadable types */ void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, unsigned short type); +void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, + unsigned short type); void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type); void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type); @@ -124,6 +126,12 @@ static inline void udp_tunnel_get_rx_info(struct net_device *dev) call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev); } +static inline void udp_tunnel_drop_rx_info(struct net_device *dev) +{ + ASSERT_RTNL(); + call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev); +} + /* Transmit the skb using UDP encapsulation. */ void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c index 0d3f14cdc524..6539ff15e9a3 100644 --- a/net/ipv4/udp_tunnel.c +++ b/net/ipv4/udp_tunnel.c @@ -94,6 +94,24 @@ void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock, } EXPORT_SYMBOL_GPL(udp_tunnel_push_rx_port); +void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock, + unsigned short type) +{ + struct sock *sk = sock->sk; + struct udp_tunnel_info ti; + + if (!dev->netdev_ops->ndo_udp_tunnel_del || + !(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT)) + return; + + ti.type = type; + ti.sa_family = sk->sk_family; + ti.port = inet_sk(sk)->inet_sport; + + dev->netdev_ops->ndo_udp_tunnel_del(dev, &ti); +} +EXPORT_SYMBOL_GPL(udp_tunnel_drop_rx_port); + /* Notify netdevs that UDP port started listening */ void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type) { -- cgit v1.2.3-55-g7522 From ae847f40b6418a7d6e197f6ef0d85f40e313c4d4 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 21 Jul 2017 12:49:31 +0200 Subject: net: call udp_tunnel_get_rx_info when NETIF_F_RX_UDP_TUNNEL_PORT is toggled NETIF_F_RX_UDP_TUNNEL_PORT is special, in that we need to do more than just flip the bit in dev->features. When disabling we must also clear currently offloaded ports from the device, and when enabling we must tell the device to offload the ports it can. Because vxlan stores its sockets in a hashtable and they are inserted at the head of per-bucket lists, switching the feature off and then on can result in a different set of ports being offloaded (depending on the HW's limits). Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- net/core/dev.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/net/core/dev.c b/net/core/dev.c index 9081134adc0d..8ea6b4b42611 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -144,6 +144,7 @@ #include #include #include +#include #include "net-sysfs.h" @@ -7327,8 +7328,27 @@ sync_lower: netdev_for_each_lower_dev(dev, lower, iter) netdev_sync_lower_features(dev, lower, features); - if (!err) + if (!err) { + netdev_features_t diff = features ^ dev->features; + + if (diff & NETIF_F_RX_UDP_TUNNEL_PORT) { + /* udp_tunnel_{get,drop}_rx_info both need + * NETIF_F_RX_UDP_TUNNEL_PORT enabled on the + * device, or they won't do anything. + * Thus we need to update dev->features + * *before* calling udp_tunnel_get_rx_info, + * but *after* calling udp_tunnel_drop_rx_info. + */ + if (features & NETIF_F_RX_UDP_TUNNEL_PORT) { + dev->features = features; + udp_tunnel_get_rx_info(dev); + } else { + udp_tunnel_drop_rx_info(dev); + } + } + dev->features = features; + } return err < 0 ? 0 : 1; } -- cgit v1.2.3-55-g7522 From 2d2b13fcfff101daa5388c5edac0ae5e3bcc6878 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 21 Jul 2017 12:49:32 +0200 Subject: geneve/vxlan: add support for NETDEV_UDP_TUNNEL_DROP_INFO Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- drivers/net/geneve.c | 19 +++++++++++++------ drivers/net/vxlan.c | 25 +++++++++++++++++-------- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 0436a4283059..74545ecb9431 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1016,16 +1016,22 @@ static struct device_type geneve_type = { * supply the listening GENEVE udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ -static void geneve_push_rx_ports(struct net_device *dev) +static void geneve_offload_rx_ports(struct net_device *dev, bool push) { struct net *net = dev_net(dev); struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_sock *gs; rcu_read_lock(); - list_for_each_entry_rcu(gs, &gn->sock_list, list) - udp_tunnel_push_rx_port(dev, gs->sock, - UDP_TUNNEL_TYPE_GENEVE); + list_for_each_entry_rcu(gs, &gn->sock_list, list) { + if (push) { + udp_tunnel_push_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); + } else { + udp_tunnel_drop_rx_port(dev, gs->sock, + UDP_TUNNEL_TYPE_GENEVE); + } + } rcu_read_unlock(); } @@ -1560,8 +1566,9 @@ static int geneve_netdevice_event(struct notifier_block *unused, { struct net_device *dev = netdev_notifier_info_to_dev(ptr); - if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) - geneve_push_rx_ports(dev); + if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || + event == NETDEV_UDP_TUNNEL_DROP_INFO) + geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); return NOTIFY_DONE; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 96aa7e6cf214..4642d5be2fa0 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2608,7 +2608,7 @@ static struct device_type vxlan_type = { * supply the listening VXLAN udp ports. Callers are expected * to implement the ndo_udp_tunnel_add. */ -static void vxlan_push_rx_ports(struct net_device *dev) +static void vxlan_offload_rx_ports(struct net_device *dev, bool push) { struct vxlan_sock *vs; struct net *net = dev_net(dev); @@ -2617,11 +2617,19 @@ static void vxlan_push_rx_ports(struct net_device *dev) spin_lock(&vn->sock_lock); for (i = 0; i < PORT_HASH_SIZE; ++i) { - hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) - udp_tunnel_push_rx_port(dev, vs->sock, - (vs->flags & VXLAN_F_GPE) ? - UDP_TUNNEL_TYPE_VXLAN_GPE : - UDP_TUNNEL_TYPE_VXLAN); + hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) { + unsigned short type; + + if (vs->flags & VXLAN_F_GPE) + type = UDP_TUNNEL_TYPE_VXLAN_GPE; + else + type = UDP_TUNNEL_TYPE_VXLAN; + + if (push) + udp_tunnel_push_rx_port(dev, vs->sock, type); + else + udp_tunnel_drop_rx_port(dev, vs->sock, type); + } } spin_unlock(&vn->sock_lock); } @@ -3632,8 +3640,9 @@ static int vxlan_netdevice_event(struct notifier_block *unused, if (event == NETDEV_UNREGISTER) vxlan_handle_lowerdev_unregister(vn, dev); - else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO) - vxlan_push_rx_ports(dev); + else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || + event == NETDEV_UDP_TUNNEL_DROP_INFO) + vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); return NOTIFY_DONE; } -- cgit v1.2.3-55-g7522 From 04584957b5f9b036caa9603525e12d1840d42d58 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 21 Jul 2017 12:49:33 +0200 Subject: geneve/vxlan: offload ports on register/unregister events This improves consistency of handling when moving a netdev to another netns. Most drivers currently do a full reset when the device goes up, so that will flush the offload state anyway. Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller --- drivers/net/geneve.c | 7 ++++++- drivers/net/vxlan.c | 10 +++++++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 74545ecb9431..745d57ae95d7 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1567,8 +1567,13 @@ static int geneve_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || - event == NETDEV_UDP_TUNNEL_DROP_INFO) + event == NETDEV_UDP_TUNNEL_DROP_INFO) { geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); + } else if (event == NETDEV_UNREGISTER) { + geneve_offload_rx_ports(dev, false); + } else if (event == NETDEV_REGISTER) { + geneve_offload_rx_ports(dev, true); + } return NOTIFY_DONE; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 4642d5be2fa0..dbca067540d0 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -3638,11 +3638,15 @@ static int vxlan_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); - if (event == NETDEV_UNREGISTER) + if (event == NETDEV_UNREGISTER) { + vxlan_offload_rx_ports(dev, false); vxlan_handle_lowerdev_unregister(vn, dev); - else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || - event == NETDEV_UDP_TUNNEL_DROP_INFO) + } else if (event == NETDEV_REGISTER) { + vxlan_offload_rx_ports(dev, true); + } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO || + event == NETDEV_UDP_TUNNEL_DROP_INFO) { vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO); + } return NOTIFY_DONE; } -- cgit v1.2.3-55-g7522 From cb1844c47279fb59129f8a021a0b09bcf2011ad7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:26 +0800 Subject: sctp: remove the typedef sctp_initack_chunk_t This patch is to remove the typedef sctp_initack_chunk_t, and replace with struct sctp_initack_chunk in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 5 ++++- net/sctp/sm_statefuns.c | 13 ++++++------- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 913474dfc96c..05c2099be537 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -336,7 +336,10 @@ struct sctp_hmac_algo_param { * The INIT ACK chunk is used to acknowledge the initiation of an SCTP * association. */ -typedef struct sctp_init_chunk sctp_initack_chunk_t; +struct sctp_initack_chunk { + struct sctp_chunkhdr chunk_hdr; + struct sctp_inithdr init_hdr; +}; /* Section 3.3.3.1 State Cookie (7) */ typedef struct sctp_cookie_param { diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ae4c48c4f657..6568fc395901 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -518,7 +518,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); /* Make sure that the INIT-ACK chunk has a valid length */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_initack_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); /* Grab the INIT header. */ @@ -4453,11 +4453,10 @@ static sctp_disposition_t sctp_sf_abort_violation( /* Treat INIT-ACK as a special case during COOKIE-WAIT. */ if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK && !asoc->peer.i.init_tag) { - sctp_initack_chunk_t *initack; + struct sctp_initack_chunk *initack; - initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; - if (!sctp_chunk_length_valid(chunk, - sizeof(sctp_initack_chunk_t))) + initack = (struct sctp_initack_chunk *)chunk->chunk_hdr; + if (!sctp_chunk_length_valid(chunk, sizeof(*initack))) abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T; else { unsigned int inittag; @@ -6106,9 +6105,9 @@ static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, switch (chunk->chunk_hdr->type) { case SCTP_CID_INIT_ACK: { - sctp_initack_chunk_t *initack; + struct sctp_initack_chunk *initack; - initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; + initack = (struct sctp_initack_chunk *)chunk->chunk_hdr; vtag = ntohl(initack->init_hdr.init_tag); break; } -- cgit v1.2.3-55-g7522 From f48ef4c7f7979e8e658b7e038a82f096ab292d70 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:27 +0800 Subject: sctp: remove the typedef sctp_cookie_param_t This patch is to remove the typedef sctp_cookie_param_t, and replace with struct sctp_cookie_param in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_make_chunk.c | 18 ++++++++++-------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 05c2099be537..9e77abda2111 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -342,10 +342,10 @@ struct sctp_initack_chunk { }; /* Section 3.3.3.1 State Cookie (7) */ -typedef struct sctp_cookie_param { +struct sctp_cookie_param { struct sctp_paramhdr p; __u8 body[0]; -} sctp_cookie_param_t; +}; /* Section 3.3.3.1 Unrecognized Parameters (8) */ typedef struct sctp_unrecognized_param { diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 0b36e96cb0df..163004e7047c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -69,7 +69,8 @@ static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, __u8 type, __u8 flags, int paylen, gfp_t gfp); -static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, +static struct sctp_cookie_param *sctp_pack_cookie( + const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, int *cookie_len, @@ -387,7 +388,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, union sctp_params addrs; struct sctp_sock *sp; int addrs_len; - sctp_cookie_param_t *cookie; + struct sctp_cookie_param *cookie; int cookie_len; size_t chunksize; struct sctp_adaptation_ind_param aiparam; @@ -1595,14 +1596,15 @@ nodata: /* Build a cookie representing asoc. * This INCLUDES the param header needed to put the cookie in the INIT ACK. */ -static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const struct sctp_chunk *init_chunk, - int *cookie_len, - const __u8 *raw_addrs, int addrs_len) +static struct sctp_cookie_param *sctp_pack_cookie( + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const struct sctp_chunk *init_chunk, + int *cookie_len, + const __u8 *raw_addrs, int addrs_len) { - sctp_cookie_param_t *retval; struct sctp_signed_cookie *cookie; + struct sctp_cookie_param *retval; int headersize, bodysize; /* Header size is static data prior to the actual cookie, including -- cgit v1.2.3-55-g7522 From 62e6b7e4ee244f8043c169c049d3b2c2c798cd60 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:28 +0800 Subject: sctp: remove the typedef sctp_unrecognized_param_t This patch is to remove the typedef sctp_unrecognized_param_t, and replace with struct sctp_unrecognized_param in the places where it's using this typedef. It is also to fix some indents in sctp_sf_do_unexpected_init() and sctp_sf_do_5_1B_init(). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_statefuns.c | 18 +++++++----------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 9e77abda2111..c323b3e3ecdb 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -348,10 +348,10 @@ struct sctp_cookie_param { }; /* Section 3.3.3.1 Unrecognized Parameters (8) */ -typedef struct sctp_unrecognized_param { +struct sctp_unrecognized_param { struct sctp_paramhdr param_hdr; struct sctp_paramhdr unrecognized; -} sctp_unrecognized_param_t; +}; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 6568fc395901..7f852392f56a 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -306,12 +306,10 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - struct sctp_chunk *chunk = arg; - struct sctp_chunk *repl; + struct sctp_chunk *chunk = arg, *repl, *err_chunk; + struct sctp_unrecognized_param *unk_param; struct sctp_association *new_asoc; - struct sctp_chunk *err_chunk; struct sctp_packet *packet; - sctp_unrecognized_param_t *unk_param; int len; /* 6.10 Bundling @@ -435,7 +433,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, * construct the parameters in INIT ACK by copying the * ERROR causes over. */ - unk_param = (sctp_unrecognized_param_t *) + unk_param = (struct sctp_unrecognized_param *) ((__u8 *)(err_chunk->chunk_hdr) + sizeof(struct sctp_chunkhdr)); /* Replace the cause code with the "Unrecognized parameter" @@ -1419,13 +1417,11 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( const sctp_subtype_t type, void *arg, sctp_cmd_seq_t *commands) { - sctp_disposition_t retval; - struct sctp_chunk *chunk = arg; - struct sctp_chunk *repl; + struct sctp_chunk *chunk = arg, *repl, *err_chunk; + struct sctp_unrecognized_param *unk_param; struct sctp_association *new_asoc; - struct sctp_chunk *err_chunk; struct sctp_packet *packet; - sctp_unrecognized_param_t *unk_param; + sctp_disposition_t retval; int len; /* 6.10 Bundling @@ -1555,7 +1551,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( * construct the parameters in INIT ACK by copying the * ERROR causes over. */ - unk_param = (sctp_unrecognized_param_t *) + unk_param = (struct sctp_unrecognized_param *) ((__u8 *)(err_chunk->chunk_hdr) + sizeof(struct sctp_chunkhdr)); /* Replace the cause code with the "Unrecognized parameter" -- cgit v1.2.3-55-g7522 From fe9a0fe7210d803adb3d5817da029fe39b8a4133 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:29 +0800 Subject: sctp: remove the typedef sctp_gap_ack_block_t This patch is to remove the typedef sctp_gap_ack_block_t, and replace with struct sctp_gap_ack_block in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index c323b3e3ecdb..b84b8e8340da 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -363,16 +363,16 @@ struct sctp_unrecognized_param { * subsequences of DATA chunks as represented by their TSNs. */ -typedef struct sctp_gap_ack_block { +struct sctp_gap_ack_block { __be16 start; __be16 end; -} sctp_gap_ack_block_t; +}; typedef __be32 sctp_dup_tsn_t; typedef union { - sctp_gap_ack_block_t gab; - sctp_dup_tsn_t dup; + struct sctp_gap_ack_block gab; + sctp_dup_tsn_t dup; } sctp_sack_variable_t; typedef struct sctp_sackhdr { -- cgit v1.2.3-55-g7522 From 9b41515636563ae76e730dbcb97fd303b94ed7d9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:30 +0800 Subject: sctp: remove the typedef sctp_dup_tsn_t This patch is to remove the typedef sctp_dup_tsn_t, and replace with __be32 in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index b84b8e8340da..8faf74eff63d 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -368,11 +368,9 @@ struct sctp_gap_ack_block { __be16 end; }; -typedef __be32 sctp_dup_tsn_t; - typedef union { struct sctp_gap_ack_block gab; - sctp_dup_tsn_t dup; + __be32 dup; } sctp_sack_variable_t; typedef struct sctp_sackhdr { -- cgit v1.2.3-55-g7522 From afd93b7be6e24731d82d9fd84b8a5ea73a68214b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:31 +0800 Subject: sctp: remove the typedef sctp_sack_variable_t This patch is to remove the typedef sctp_sack_variable_t, and replace with union sctp_sack_variable in the places where it's using this typedef. It is also to fix some indents in sctp_acked(). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/outqueue.c | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8faf74eff63d..8df6ac53f05b 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -368,17 +368,17 @@ struct sctp_gap_ack_block { __be16 end; }; -typedef union { +union sctp_sack_variable { struct sctp_gap_ack_block gab; __be32 dup; -} sctp_sack_variable_t; +}; typedef struct sctp_sackhdr { __be32 cum_tsn_ack; __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; - sctp_sack_variable_t variable[0]; + union sctp_sack_variable variable[0]; } sctp_sackhdr_t; typedef struct sctp_sack_chunk { diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index e8762702a313..d2a8adfd4a6f 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -1197,7 +1197,7 @@ sctp_flush_out: static void sctp_sack_update_unack_data(struct sctp_association *assoc, struct sctp_sackhdr *sack) { - sctp_sack_variable_t *frags; + union sctp_sack_variable *frags; __u16 unack_data; int i; @@ -1224,7 +1224,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) struct sctp_transport *transport; struct sctp_chunk *tchunk = NULL; struct list_head *lchunk, *transport_list, *temp; - sctp_sack_variable_t *frags = sack->variable; + union sctp_sack_variable *frags = sack->variable; __u32 sack_ctsn, ctsn, tsn; __u32 highest_tsn, highest_new_tsn; __u32 sack_a_rwnd; @@ -1736,10 +1736,10 @@ static void sctp_mark_missing(struct sctp_outq *q, /* Is the given TSN acked by this packet? */ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) { - int i; - sctp_sack_variable_t *frags; - __u16 tsn_offset, blocks; __u32 ctsn = ntohl(sack->cum_tsn_ack); + union sctp_sack_variable *frags; + __u16 tsn_offset, blocks; + int i; if (TSN_lte(tsn, ctsn)) goto pass; -- cgit v1.2.3-55-g7522 From 787310859d8d1a72545db2343fb3ac8f765b0f35 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:32 +0800 Subject: sctp: remove the typedef sctp_sackhdr_t This patch is to remove the typedef sctp_sackhdr_t, and replace with struct sctp_sackhdr in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- include/net/sctp/command.h | 4 ++-- net/sctp/sm_statefuns.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 8df6ac53f05b..a2e43129d11a 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -373,17 +373,17 @@ union sctp_sack_variable { __be32 dup; }; -typedef struct sctp_sackhdr { +struct sctp_sackhdr { __be32 cum_tsn_ack; __be32 a_rwnd; __be16 num_gap_ack_blocks; __be16 num_dup_tsns; union sctp_sack_variable variable[0]; -} sctp_sackhdr_t; +}; typedef struct sctp_sack_chunk { struct sctp_chunkhdr chunk_hdr; - sctp_sackhdr_t sack_hdr; + struct sctp_sackhdr sack_hdr; } sctp_sack_chunk_t; diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index d4679e7a5ed5..1d5f6ff3f440 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -135,7 +135,7 @@ typedef union { struct sctp_init_chunk *init; struct sctp_ulpevent *ulpevent; struct sctp_packet *packet; - sctp_sackhdr_t *sackh; + struct sctp_sackhdr *sackh; struct sctp_datamsg *msg; } sctp_arg_t; @@ -176,7 +176,7 @@ SCTP_ARG_CONSTRUCTOR(BA, struct sctp_bind_addr *, bp) SCTP_ARG_CONSTRUCTOR(PEER_INIT, struct sctp_init_chunk *, init) SCTP_ARG_CONSTRUCTOR(ULPEVENT, struct sctp_ulpevent *, ulpevent) SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet) -SCTP_ARG_CONSTRUCTOR(SACKH, sctp_sackhdr_t *, sackh) +SCTP_ARG_CONSTRUCTOR(SACKH, struct sctp_sackhdr *, sackh) SCTP_ARG_CONSTRUCTOR(DATAMSG, struct sctp_datamsg *, msg) static inline sctp_arg_t SCTP_FORCE(void) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7f852392f56a..c09dfe6ebac2 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3187,7 +3187,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - sctp_sackhdr_t *sackh; + struct sctp_sackhdr *sackh; __u32 ctsn; if (!sctp_vtag_verify(chunk, asoc)) -- cgit v1.2.3-55-g7522 From d4d6c61489e7e4a8944360312e572988889558a8 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:33 +0800 Subject: sctp: remove the typedef sctp_sack_chunk_t This patch is to remove the typedef sctp_sack_chunk_t, and replace with struct sctp_sack_chunk in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/chunk.c | 2 +- net/sctp/sm_statefuns.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index a2e43129d11a..48f6560dd880 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -381,10 +381,10 @@ struct sctp_sackhdr { union sctp_sack_variable variable[0]; }; -typedef struct sctp_sack_chunk { +struct sctp_sack_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_sackhdr sack_hdr; -} sctp_sack_chunk_t; +}; /* RFC 2960. Section 3.3.5 Heartbeat Request (HEARTBEAT) (4): diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 1323d41e68b8..681b181e7ae3 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -221,7 +221,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, asoc->outqueue.out_qlen == 0 && list_empty(&asoc->outqueue.retransmit) && msg_len > max_data) - first_len -= SCTP_PAD4(sizeof(sctp_sack_chunk_t)); + first_len -= SCTP_PAD4(sizeof(struct sctp_sack_chunk)); /* Encourage Cookie-ECHO bundling. */ if (asoc->state < SCTP_STATE_COOKIE_ECHOED) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index c09dfe6ebac2..08ebe8cd96c7 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3194,7 +3194,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SACK chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_sack_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); @@ -4515,7 +4515,7 @@ nomem: * Handle a protocol violation when the chunk length is invalid. * "Invalid" length is identified as smaller than the minimal length a * given chunk can be. For example, a SACK chunk has invalid length - * if its length is set to be smaller than the size of sctp_sack_chunk_t. + * if its length is set to be smaller than the size of struct sctp_sack_chunk. * * We inform the other end by sending an ABORT with a Protocol Violation * error code. -- cgit v1.2.3-55-g7522 From 4d2dcdf4e04c938b266f06f271000e4b0f3a288f Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:34 +0800 Subject: sctp: remove the typedef sctp_heartbeathdr_t This patch is to remove the typedef sctp_heartbeathdr_t, and replace with struct sctp_heartbeathdr in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/sm_statefuns.c | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 48f6560dd880..6e26b86770f1 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -394,13 +394,13 @@ struct sctp_sack_chunk { * the present association. */ -typedef struct sctp_heartbeathdr { +struct sctp_heartbeathdr { struct sctp_paramhdr info; -} sctp_heartbeathdr_t; +}; typedef struct sctp_heartbeat_chunk { struct sctp_chunkhdr chunk_hdr; - sctp_heartbeathdr_t hb_hdr; + struct sctp_heartbeathdr hb_hdr; } sctp_heartbeat_chunk_t; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 08ebe8cd96c7..32ac90b22654 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1096,7 +1096,7 @@ sctp_disposition_t sctp_sf_beat_8_3(struct net *net, * respond with a HEARTBEAT ACK that contains the Heartbeat * Information field copied from the received HEARTBEAT chunk. */ - chunk->subh.hb_hdr = (sctp_heartbeathdr_t *)chunk->skb->data; + chunk->subh.hb_hdr = (struct sctp_heartbeathdr *)chunk->skb->data; param_hdr = (struct sctp_paramhdr *)chunk->subh.hb_hdr; paylen = ntohs(chunk->chunk_hdr->length) - sizeof(struct sctp_chunkhdr); -- cgit v1.2.3-55-g7522 From 38c00f7482281801d6f7fe410c7a4b61ae25218e Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:35 +0800 Subject: sctp: remove the typedef sctp_heartbeat_chunk_t This patch is to remove the typedef sctp_heartbeat_chunk_t, and replace with struct sctp_heartbeat_chunk in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_statefuns.c | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 6e26b86770f1..bfda7c66960c 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -398,10 +398,10 @@ struct sctp_heartbeathdr { struct sctp_paramhdr info; }; -typedef struct sctp_heartbeat_chunk { +struct sctp_heartbeat_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_heartbeathdr hb_hdr; -} sctp_heartbeat_chunk_t; +}; /* For the abort and shutdown ACK we must carry the init tag in the diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 32ac90b22654..7bbee085b476 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1088,7 +1088,8 @@ sctp_disposition_t sctp_sf_beat_8_3(struct net *net, return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the HEARTBEAT chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) + if (!sctp_chunk_length_valid(chunk, + sizeof(struct sctp_heartbeat_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); -- cgit v1.2.3-55-g7522 From 441ae65ae000dd325a609306bd0cd850df50cfc4 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 23 Jul 2017 09:34:36 +0800 Subject: sctp: remove the typedef sctp_abort_chunk_t This patch is to remove the typedef sctp_abort_chunk_t, and replace with struct sctp_abort_chunk in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_statefuns.c | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index bfda7c66960c..ba007163acfd 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -408,9 +408,9 @@ struct sctp_heartbeat_chunk { * common header. Just the common header is all that is needed with a * chunk descriptor. */ -typedef struct sctp_abort_chunk { +struct sctp_abort_chunk { struct sctp_chunkhdr uh; -} sctp_abort_chunk_t; +}; /* For the graceful shutdown we must carry the tag (in common header) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7bbee085b476..dc0c2c4188d8 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2164,7 +2164,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort( * as we do not know its true length. So, to be safe, discard the * packet. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* ADD-IP: Special case for ABORT chunks @@ -2206,7 +2206,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net, * as we do not know its true length. So, to be safe, discard the * packet. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* ADD-IP: Special case for ABORT chunks @@ -2470,7 +2470,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, * as we do not know its true length. So, to be safe, discard the * packet. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* ADD-IP: Special case for ABORT chunks @@ -2546,7 +2546,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, * as we do not know its true length. So, to be safe, discard the * packet. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_abort_chunk))) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* See if we have an error cause code in the chunk. */ -- cgit v1.2.3-55-g7522 From 4a3c67a6e7cd212fe799ab3d07782c7c8688b4cc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 21 Jul 2017 20:31:38 +0200 Subject: mlxsw: spectrum_router: Don't batch neighbour deletion Current firmware supported by the driver doesn't support batch deletion of IPv6 neighbours on a given router interface (RIF). Until a new version that supports this functionality is made available, delete neighbours one by one. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index e6d629f40f93..548552ccec3d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1419,25 +1419,16 @@ static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp) rhashtable_destroy(&mlxsw_sp->router->neigh_ht); } -static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_rif *rif) -{ - char rauht_pl[MLXSW_REG_RAUHT_LEN]; - - mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL, - rif->rif_index, rif->addr); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl); -} - static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_rif *rif) { struct mlxsw_sp_neigh_entry *neigh_entry, *tmp; - mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif); list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list, - rif_list_node) + rif_list_node) { + mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false); mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry); + } } struct mlxsw_sp_nexthop_key { -- cgit v1.2.3-55-g7522 From 1bb79284fcf9ebacff16ea0b81b3d5b86528cfd4 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Sat, 22 Jul 2017 02:54:43 +0800 Subject: netvsc: fix ptr_ret.cocci warnings drivers/net/hyperv/netvsc_drv.c:737:8-14: WARNING: PTR_ERR_OR_ZERO can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: scripts/coccinelle/api/ptr_ret.cocci Fixes: 9749fed5d43d ("netvsc: use ERR_PTR to avoid dereference issues") CC: stephen hemminger Signed-off-by: Fengguang Wu Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a164981c15f7..9a9e269a25ae 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -734,7 +734,7 @@ static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, return ret; net_device = rndis_filter_device_add(dev, &device_info); - return IS_ERR(net_device) ? PTR_ERR(net_device) : 0; + return PTR_ERR_OR_ZERO(net_device); } static int netvsc_set_channels(struct net_device *net, -- cgit v1.2.3-55-g7522 From 241a974ba2c0d98e2104012cb80ed4494c0e66a7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 22 Jul 2017 10:40:04 +0300 Subject: bpf: dev_map_alloc() shouldn't return NULL We forgot to set the error code on two error paths which means that we return ERR_PTR(0) which is NULL. The caller, find_and_alloc_map(), is not expecting that and will have a NULL dereference. Fixes: 546ac1ffb70d ("bpf: add devmap, a map for storing net device references") Signed-off-by: Dan Carpenter Acked-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: David S. Miller --- kernel/bpf/devmap.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 899364d097f5..d439ee0eadb1 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -114,6 +114,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) if (err) goto free_dtab; + err = -ENOMEM; /* A per cpu bitfield with a bit per possible net device */ dtab->flush_needed = __alloc_percpu( BITS_TO_LONGS(attr->max_entries) * -- cgit v1.2.3-55-g7522 From ba3fb1022154d93fe71ee78e28e195207d511bc0 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Sun, 23 Jul 2017 10:44:52 -0400 Subject: liquidio: fix implicit irq include causing build failures To fix In file included from drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c:24:0: drivers/net/ethernet/cavium/liquidio/octeon_device.h:216:2: error: expected specifier-qualifier-list before ‘irqreturn_t’ irqreturn_t (*process_interrupt_regs)(void *); ^ as seen on arm64 allmodconfig builds. Cc: Derek Chickles Cc: Satanand Burla Cc: Felix Manlunas Cc: Raghu Vatsavayi Signed-off-by: Paul Gortmaker Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_device.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index c90ed48ae8ab..ad464788c923 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -22,6 +22,8 @@ #ifndef _OCTEON_DEVICE_H_ #define _OCTEON_DEVICE_H_ +#include + /** PCI VendorId Device Id */ #define OCTEON_CN68XX_PCIID 0x91177d #define OCTEON_CN66XX_PCIID 0x92177d -- cgit v1.2.3-55-g7522 From a28b1b90de8322ecc45d58f1c08da12197dad17f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 23 Jul 2017 19:54:47 +0200 Subject: skbuff: re-add check for NULL skb->head in kfree_skb path A null check is needed after all. netlink skbs can have skb->head be backed by vmalloc. The netlink destructor vfree()s head, then sets it to NULL. We then panic in skb_release_data with a NULL dereference. Re-add such a test. Alternative would be to switch to kvfree to free skb->head memory and remove the special handling in netlink destructor. Reported-by: kernel test robot Fixes: 06dc75ab06943 ("net: Revert "net: add function to allocate sk_buff head without data area") Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/core/skbuff.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 84bdfa229b0d..c27da51d14e4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -638,7 +638,8 @@ void skb_release_head_state(struct sk_buff *skb) static void skb_release_all(struct sk_buff *skb) { skb_release_head_state(skb); - skb_release_data(skb); + if (likely(skb->head)) + skb_release_data(skb); } /** -- cgit v1.2.3-55-g7522 From e470e4f787b94ba0a08cd3b49948e823416f5a6d Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 23 Jul 2017 21:45:47 +0300 Subject: of_mdio: kill useless variable in of_phy_register_fixed_link() of_phy_register_fixed_link() declares the 'err' variable to hold the result of of_property_read_string() but only uses it once after that, while that function can be called directly from the *if* statement... Remove that variable and move/regroup 'link_gpio' and 'len' variables in order to sort the declarations in the reverse Xmas tree order -- to please DaveM. ;-) Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index e0dbd6e48a98..a0d27c04e22f 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -422,13 +422,11 @@ int of_phy_register_fixed_link(struct device_node *np) struct fixed_phy_status status = {}; struct device_node *fixed_link_node; const __be32 *fixed_link_prop; - int link_gpio; - int len, err; struct phy_device *phy; const char *managed; + int link_gpio, len; - err = of_property_read_string(np, "managed", &managed); - if (err == 0) { + if (of_property_read_string(np, "managed", &managed) == 0) { if (strcmp(managed, "in-band-status") == 0) { /* status is zeroed, namely its .link member */ phy = fixed_phy_register(PHY_POLL, &status, -1, np); -- cgit v1.2.3-55-g7522 From f3eebe88195478b1bfbe8510d58a8c226ea759a8 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Sun, 23 Jul 2017 23:02:53 -0400 Subject: mlx4_en: remove unnecessary returned value The function mlx4_en_arm_cq always returns zero. So change the return type of the function mlx4_en_arm_cq to void. CC: Joe Jin CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_cq.c | 4 +--- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 85fe17e4dcfb..87d1f4d2a77b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -208,12 +208,10 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) cq->moder_cnt, cq->moder_time); } -int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) +void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map, &priv->mdev->uar_lock); - - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d350b2158104..fdb3ad0cbe54 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -685,7 +685,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, int cq_idx); void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); -int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); +void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, -- cgit v1.2.3-55-g7522 From b5f3e0d430122a551cbc2b88068dae8b2c2c0031 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 24 Jul 2017 09:56:00 +0200 Subject: mlxsw: spectrum_router: Fix build when IPv6 isn't enabled When IPv6 isn't enabled the following error is generated: ERROR: "nd_tbl" [drivers/net/ethernet/mellanox/mlxsw/mlxsw_spectrum.ko] undefined! Fix it by replacing 'arp_tbl' and 'nd_tbl' with 'tbl->family' wherever possible and reference 'nd_tbl' only when IPV6 is enabled. Fixes: d5eb89cf68d6 ("mlxsw: spectrum_router: Reflect IPv6 neighbours to the device") Signed-off-by: Ido Schimmel Reported-by: kbuild test robot Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 548552ccec3d..8bf076d22fb6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -949,9 +950,13 @@ mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp) { unsigned long interval; +#if IS_ENABLED(CONFIG_IPV6) interval = min_t(unsigned long, NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME), NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME)); +#else + interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME); +#endif mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval); } @@ -986,6 +991,7 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } +#if IS_ENABLED(IPV6) static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) @@ -1015,6 +1021,13 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, neigh_event_send(n, NULL); neigh_release(n); } +#else +static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, + char *rauhtd_pl, + int rec_index) +{ +} +#endif static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, @@ -1260,10 +1273,10 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp, if (!adding && !neigh_entry->connected) return; neigh_entry->connected = adding; - if (neigh_entry->key.n->tbl == &arp_tbl) { + if (neigh_entry->key.n->tbl->family == AF_INET) { mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry, mlxsw_sp_rauht_op(adding)); - } else if (neigh_entry->key.n->tbl == &nd_tbl) { + } else if (neigh_entry->key.n->tbl->family == AF_INET6) { if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry->key.n)) return; mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry, @@ -1339,7 +1352,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, p = ptr; /* We don't care about changes in the default table. */ - if (!p->dev || (p->tbl != &arp_tbl && p->tbl != &nd_tbl)) + if (!p->dev || (p->tbl->family != AF_INET && + p->tbl->family != AF_INET6)) return NOTIFY_DONE; /* We are in atomic context and can't take RTNL mutex, @@ -1358,7 +1372,7 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, case NETEVENT_NEIGH_UPDATE: n = ptr; - if (n->tbl != &arp_tbl && n->tbl != &nd_tbl) + if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6) return NOTIFY_DONE; mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev); -- cgit v1.2.3-55-g7522 From f575a02ee76cda1eff5e6949aff7cf5c386e3977 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Mon, 24 Jul 2017 04:22:44 -0400 Subject: mlx4_en: remove unnecessary error check The function mlx4_en_get_profile always return zero. So it is not necessary to check its return value. CC: Joe Jin CC: Junxiao Bi Signed-off-by: Zhu Yanjun Reviewed-by: Tariq Toukan Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_main.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index 2b0cbca4beb5..686e18de9a97 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -147,7 +147,7 @@ void mlx4_en_update_loopback_state(struct net_device *dev, mutex_unlock(&priv->mdev->state_lock); } -static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) +static void mlx4_en_get_profile(struct mlx4_en_dev *mdev) { struct mlx4_en_profile *params = &mdev->profile; int i; @@ -176,8 +176,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) params->prof[i].rss_rings = 0; params->prof[i].inline_thold = inline_thold; } - - return 0; } static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port) @@ -309,10 +307,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev) } /* Build device profile according to supplied module parameters */ - if (mlx4_en_get_profile(mdev)) { - mlx4_err(mdev, "Bad module parameters, aborting\n"); - goto err_mr; - } + mlx4_en_get_profile(mdev); /* Configure which ports to start according to module parameters */ mdev->port_cnt = 0; -- cgit v1.2.3-55-g7522 From e42e24c3cc072088756d84ef07b492ac2a3ae2e5 Mon Sep 17 00:00:00 2001 From: Matvejchikov Ilya Date: Mon, 24 Jul 2017 16:02:12 +0400 Subject: tcp: remove redundant argument from tcp_rcv_established() The last (4th) argument of tcp_rcv_established() is redundant as it always equals to skb->len and the skb itself is always passed as 2th agrument. There is no reason to have it. Signed-off-by: Ilya V. Matveychikov Signed-off-by: David S. Miller --- include/net/tcp.h | 2 +- net/ipv4/tcp_input.c | 3 ++- net/ipv4/tcp_ipv4.c | 2 +- net/ipv4/tcp_probe.c | 5 +++-- net/ipv6/tcp_ipv6.c | 2 +- 5 files changed, 8 insertions(+), 6 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 4f056ea79df2..12d68335acd4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -362,7 +362,7 @@ void tcp_delack_timer_handler(struct sock *sk); int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, unsigned int len); + const struct tcphdr *th); void tcp_rcv_space_adjust(struct sock *sk); int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); void tcp_twsk_destructor(struct sock *sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2920e0cb09f8..adc3f3e9468c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5358,8 +5358,9 @@ discard: * tcp_data_queue when everything is OK. */ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, unsigned int len) + const struct tcphdr *th) { + unsigned int len = skb->len; struct tcp_sock *tp = tcp_sk(sk); tcp_mstamp_refresh(tp); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a20e7f03d5f7..3a19ea28339f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1458,7 +1458,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) sk->sk_rx_dst = NULL; } } - tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); + tcp_rcv_established(sk, skb, tcp_hdr(skb)); return 0; } diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index f6c50af24a64..697f4c67b2e3 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -105,8 +105,9 @@ static inline int tcp_probe_avail(void) * Note: arguments must match tcp_rcv_established()! */ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, - const struct tcphdr *th, unsigned int len) + const struct tcphdr *th) { + unsigned int len = skb->len; const struct tcp_sock *tp = tcp_sk(sk); const struct inet_sock *inet = inet_sk(sk); @@ -145,7 +146,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, BUG(); } - p->length = skb->len; + p->length = len; p->snd_nxt = tp->snd_nxt; p->snd_una = tp->snd_una; p->snd_cwnd = tp->snd_cwnd; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2521690d62d6..90a32576c3d0 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1296,7 +1296,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) } } - tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); + tcp_rcv_established(sk, skb, tcp_hdr(skb)); if (opt_skb) goto ipv6_pktoptions; return 0; -- cgit v1.2.3-55-g7522 From acb2005463612930b07723e852b2483d669ff856 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 24 Jul 2017 12:34:20 -0400 Subject: bnxt_en: Update firmware interface spec to 1.8.0. VF representors and PTP are added features in the new firmware spec. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 8 +- drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 322 +++++++++++++++++++++--- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 9 +- 4 files changed, 308 insertions(+), 39 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index e7c8539cbddf..2103f1412359 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5646,7 +5646,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) if (rc) goto hwrm_phy_qcaps_exit; - if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) { + if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) { struct ethtool_eee *eee = &bp->eee; u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode); @@ -5686,13 +5686,15 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp)); link_info->phy_link_status = resp->link; - link_info->duplex = resp->duplex; + link_info->duplex = resp->duplex_cfg; + if (bp->hwrm_spec_code >= 0x10800) + link_info->duplex = resp->duplex_state; link_info->pause = resp->pause; link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; link_info->lp_pause = resp->link_partner_adv_pause; link_info->force_pause_setting = resp->force_pause; - link_info->duplex_setting = resp->duplex; + link_info->duplex_setting = resp->duplex_cfg; if (link_info->phy_link_status == BNXT_LINK_LINK) link_info->link_speed = le16_to_cpu(resp->link_speed); else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f34691f85602..505691a33a7c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -12,10 +12,10 @@ #define BNXT_H #define DRV_MODULE_NAME "bnxt_en" -#define DRV_MODULE_VERSION "1.7.0" +#define DRV_MODULE_VERSION "1.8.0" #define DRV_VER_MAJ 1 -#define DRV_VER_MIN 7 +#define DRV_VER_MIN 8 #define DRV_VER_UPD 0 #include @@ -825,8 +825,8 @@ struct bnxt_link_info { u8 loop_back; u8 link_up; u8 duplex; -#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF -#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL +#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF +#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL u8 pause; #define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX #define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 7dc71bb95837..3ba22e8ee914 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -11,14 +11,14 @@ #ifndef BNXT_HSI_H #define BNXT_HSI_H -/* HSI and HWRM Specification 1.7.6 */ +/* HSI and HWRM Specification 1.8.0 */ #define HWRM_VERSION_MAJOR 1 -#define HWRM_VERSION_MINOR 7 -#define HWRM_VERSION_UPDATE 6 +#define HWRM_VERSION_MINOR 8 +#define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */ +#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */ -#define HWRM_VERSION_STR "1.7.6.2" +#define HWRM_VERSION_STR "1.8.0.0" /* * Following is the signature for HWRM message field that indicates not * applicable (All F's). Need to cast it the size of the field if needed. @@ -813,7 +813,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL - #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL + #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -835,9 +835,8 @@ struct hwrm_func_qcfg_output { u8 port_pf_cnt; #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL __le16 dflt_vnic_id; - u8 host_cnt; - #define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL u8 unused_0; + u8 unused_1; __le32 min_bw; #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0 @@ -874,12 +873,56 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL - u8 unused_1; + u8 unused_2; __le16 alloc_vfs; __le32 alloc_mcast_filters; __le32 alloc_hw_ring_grps; __le16 alloc_sp_tx_rings; + u8 unused_3; + u8 valid; +}; + +/* hwrm_func_vlan_cfg */ +/* Input (48 bytes) */ +struct hwrm_func_vlan_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 fid; + u8 unused_0; + u8 unused_1; + __le32 enables; + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL + #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL + #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL + __le16 stag_vid; + u8 stag_pcp; u8 unused_2; + __be16 stag_tpid; + __le16 ctag_vid; + u8 ctag_pcp; + u8 unused_3; + __be16 ctag_tpid; + __le32 rsvd1; + __le32 rsvd2; + __le32 unused_4; +}; + +/* Output (16 bytes) */ +struct hwrm_func_vlan_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; u8 valid; }; @@ -902,6 +945,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL + #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1456,9 +1500,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL - u8 duplex; - #define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL - #define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL + u8 duplex_cfg; + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL u8 pause; #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL @@ -1573,6 +1617,9 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL + #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL u8 media_type; #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL @@ -1651,14 +1698,16 @@ struct hwrm_port_phy_qcfg_output { #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL + u8 duplex_state; + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL + #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL u8 unused_1; - u8 unused_2; char phy_vendor_name[16]; char phy_vendor_partnumber[16]; - __le32 unused_3; + __le32 unused_2; + u8 unused_3; u8 unused_4; u8 unused_5; - u8 unused_6; u8 valid; }; @@ -1744,6 +1793,51 @@ struct hwrm_port_mac_cfg_output { u8 valid; }; +/* hwrm_port_mac_ptp_qcfg */ +/* Input (24 bytes) */ +struct hwrm_port_mac_ptp_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 port_id; + __le16 unused_0[3]; +}; + +/* Output (80 bytes) */ +struct hwrm_port_mac_ptp_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL + #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL + u8 unused_0; + __le16 unused_1; + __le32 rx_ts_reg_off_lower; + __le32 rx_ts_reg_off_upper; + __le32 rx_ts_reg_off_seq_id; + __le32 rx_ts_reg_off_src_id_0; + __le32 rx_ts_reg_off_src_id_1; + __le32 rx_ts_reg_off_src_id_2; + __le32 rx_ts_reg_off_domain_id; + __le32 rx_ts_reg_off_fifo; + __le32 rx_ts_reg_off_fifo_adv; + __le32 rx_ts_reg_off_granularity; + __le32 tx_ts_reg_off_lower; + __le32 tx_ts_reg_off_upper; + __le32 tx_ts_reg_off_seq_id; + __le32 tx_ts_reg_off_fifo; + __le32 tx_ts_reg_off_granularity; + __le32 unused_2; + u8 unused_3; + u8 unused_4; + u8 unused_5; + u8 valid; +}; + /* hwrm_port_qstats */ /* Input (40 bytes) */ struct hwrm_port_qstats_input { @@ -1874,10 +1968,10 @@ struct hwrm_port_phy_qcaps_output { __le16 req_type; __le16 seq_id; __le16 resp_len; - u8 eee_supported; - #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL - #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL - #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1 + u8 flags; + #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL + #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1 u8 unused_0; __le16 supported_speeds_force_mode; #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL @@ -3152,6 +3246,95 @@ struct hwrm_queue_cos2bw_cfg_output { u8 valid; }; +/* hwrm_queue_dscp_qcaps */ +/* Input (24 bytes) */ +struct hwrm_queue_dscp_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 port_id; + u8 unused_0[7]; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 num_dscp_bits; + u8 unused_0; + __le16 max_entries; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_qcfg */ +/* Input (32 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 dest_data_addr; + u8 port_id; + u8 unused_0; + __le16 dest_data_buffer_size; + __le32 unused_1; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 entry_cnt; + u8 default_pri; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + +/* hwrm_queue_dscp2pri_cfg */ +/* Input (40 bytes) */ +struct hwrm_queue_dscp2pri_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le64 src_data_addr; + __le32 flags; + #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL + __le32 enables; + #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL + u8 port_id; + u8 default_pri; + __le16 entry_cnt; + __le32 unused_0; +}; + +/* Output (16 bytes) */ +struct hwrm_queue_dscp2pri_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_vnic_alloc */ /* Input (24 bytes) */ struct hwrm_vnic_alloc_input { @@ -4038,7 +4221,7 @@ struct hwrm_cfa_encap_record_alloc_input { #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL u8 unused_0; __le16 unused_1; - __le32 encap_data[16]; + __le32 encap_data[20]; }; /* Output (16 bytes) */ @@ -4120,8 +4303,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input { #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL u8 ip_protocol; #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; @@ -4224,6 +4407,58 @@ struct hwrm_cfa_ntuple_filter_cfg_output { u8 valid; }; +/* hwrm_cfa_vfr_alloc */ +/* Input (32 bytes) */ +struct hwrm_cfa_vfr_alloc_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 vf_id; + __le16 reserved; + __le32 unused_0; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_alloc_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 rx_cfa_code; + __le16 tx_cfa_action; + u8 unused_0; + u8 unused_1; + u8 unused_2; + u8 valid; +}; + +/* hwrm_cfa_vfr_free */ +/* Input (24 bytes) */ +struct hwrm_cfa_vfr_free_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + char vfr_name[32]; +}; + +/* Output (16 bytes) */ +struct hwrm_cfa_vfr_free_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 unused_0; + u8 unused_1; + u8 unused_2; + u8 unused_3; + u8 valid; +}; + /* hwrm_tunnel_dst_port_query */ /* Input (24 bytes) */ struct hwrm_tunnel_dst_port_query_input { @@ -4448,12 +4683,13 @@ struct hwrm_fw_reset_input { #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL u8 selfrst_status; #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL - __le16 unused_0[3]; + u8 host_idx; + u8 unused_0[5]; }; /* Output (16 bytes) */ @@ -4487,7 +4723,7 @@ struct hwrm_fw_qstatus_input { #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL - #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL + #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL u8 unused_0[7]; }; @@ -4572,6 +4808,16 @@ struct hwrm_fw_set_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_set_structured_data_cmd_err { + u8 code; + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL + #define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_fw_get_structured_data */ /* Input (32 bytes) */ struct hwrm_fw_get_structured_data_input { @@ -4611,6 +4857,14 @@ struct hwrm_fw_get_structured_data_output { u8 valid; }; +/* Command specific Error Codes (8 bytes) */ +struct hwrm_fw_get_structured_data_cmd_err { + u8 code; + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL + #define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL + u8 unused_0[7]; +}; + /* hwrm_exec_fwd_resp */ /* Input (128 bytes) */ struct hwrm_exec_fwd_resp_input { @@ -5411,7 +5665,7 @@ struct cmd_nums { #define HWRM_PORT_LPBK_CLR_STATS (0x26UL) #define HWRM_PORT_PHY_QCFG (0x27UL) #define HWRM_PORT_MAC_QCFG (0x28UL) - #define RESERVED7 (0x29UL) + #define HWRM_PORT_MAC_PTP_QCFG (0x29UL) #define HWRM_PORT_PHY_QCAPS (0x2aUL) #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL) #define HWRM_PORT_PHY_I2C_READ (0x2cUL) @@ -5421,14 +5675,17 @@ struct cmd_nums { #define HWRM_QUEUE_QPORTCFG (0x30UL) #define HWRM_QUEUE_QCFG (0x31UL) #define HWRM_QUEUE_CFG (0x32UL) - #define RESERVED2 (0x33UL) - #define RESERVED3 (0x34UL) + #define HWRM_FUNC_VLAN_CFG (0x33UL) + #define HWRM_FUNC_VLAN_QCFG (0x34UL) #define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL) #define HWRM_QUEUE_PFCENABLE_CFG (0x36UL) #define HWRM_QUEUE_PRI2COS_QCFG (0x37UL) #define HWRM_QUEUE_PRI2COS_CFG (0x38UL) #define HWRM_QUEUE_COS2BW_QCFG (0x39UL) #define HWRM_QUEUE_COS2BW_CFG (0x3aUL) + #define HWRM_QUEUE_DSCP_QCAPS (0x3bUL) + #define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL) + #define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL) #define HWRM_VNIC_ALLOC (0x40UL) #define HWRM_VNIC_FREE (0x41UL) #define HWRM_VNIC_CFG (0x42UL) @@ -5455,7 +5712,7 @@ struct cmd_nums { #define HWRM_CFA_L2_FILTER_FREE (0x91UL) #define HWRM_CFA_L2_FILTER_CFG (0x92UL) #define HWRM_CFA_L2_SET_RX_MASK (0x93UL) - #define RESERVED4 (0x94UL) + #define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL) #define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL) #define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL) #define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL) @@ -5494,6 +5751,8 @@ struct cmd_nums { #define HWRM_CFA_METER_PROFILE_CFG (0xf7UL) #define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL) #define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL) + #define HWRM_CFA_VFR_ALLOC (0xfdUL) + #define HWRM_CFA_VFR_FREE (0xfeUL) #define HWRM_CFA_VF_PAIR_ALLOC (0x100UL) #define HWRM_CFA_VF_PAIR_FREE (0x101UL) #define HWRM_CFA_VF_PAIR_INFO (0x102UL) @@ -5502,6 +5761,9 @@ struct cmd_nums { #define HWRM_CFA_FLOW_FLUSH (0x105UL) #define HWRM_CFA_FLOW_STATS (0x106UL) #define HWRM_CFA_FLOW_INFO (0x107UL) + #define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL) + #define HWRM_CFA_DECAP_FILTER_FREE (0x109UL) + #define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL) #define HWRM_SELFTEST_QLIST (0x200UL) #define HWRM_SELFTEST_EXEC (0x201UL) #define HWRM_SELFTEST_IRQ (0x202UL) @@ -5510,6 +5772,8 @@ struct cmd_nums { #define HWRM_DBG_WRITE_DIRECT (0xff12UL) #define HWRM_DBG_WRITE_INDIRECT (0xff13UL) #define HWRM_DBG_DUMP (0xff14UL) + #define HWRM_DBG_ERASE_NVM (0xff15UL) + #define HWRM_DBG_CFG (0xff16UL) #define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL) #define HWRM_NVM_VALIDATE_OPTION (0xffefUL) #define HWRM_NVM_FLUSH (0xfff0UL) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index b8e7248294d9..fde7256cad1b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -794,8 +794,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) PORT_PHY_QCFG_RESP_LINK_LINK; phy_qcfg_resp.link_speed = cpu_to_le16( PORT_PHY_QCFG_RESP_LINK_SPEED_10GB); - phy_qcfg_resp.duplex = - PORT_PHY_QCFG_RESP_DUPLEX_FULL; + phy_qcfg_resp.duplex_cfg = + PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL; phy_qcfg_resp.pause = (PORT_PHY_QCFG_RESP_PAUSE_TX | PORT_PHY_QCFG_RESP_PAUSE_RX); @@ -804,7 +806,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) /* force link down */ phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK; phy_qcfg_resp.link_speed = 0; - phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF; + phy_qcfg_resp.duplex_state = + PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF; phy_qcfg_resp.pause = 0; } rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp, -- cgit v1.2.3-55-g7522 From 32e8239c9138a050bc1feeea7cf41f27d79e6664 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 24 Jul 2017 12:34:21 -0400 Subject: bnxt_en: Retrieve the hardware bridge mode from the firmware. Retrieve and store the hardware bridge mode, so that we can implement ndo_bridge_{get|set)link methods in the next patch. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 9 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 2103f1412359..ec8a1958ba12 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -4610,6 +4611,13 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) bp->port_partition_type = resp->port_partition_type; break; } + if (bp->hwrm_spec_code < 0x10707 || + resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB) + bp->br_mode = BRIDGE_MODE_VEB; + else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA) + bp->br_mode = BRIDGE_MODE_VEPA; + else + bp->br_mode = BRIDGE_MODE_UNDEF; func_qcfg_exit: mutex_unlock(&bp->hwrm_cmd_lock); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 505691a33a7c..15d5a10122ed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1164,6 +1164,7 @@ struct bnxt { u8 nge_port_cnt; __le16 nge_fw_dst_port_id; u8 port_partition_type; + u16 br_mode; u16 rx_coal_ticks; u16 rx_coal_ticks_irq; -- cgit v1.2.3-55-g7522 From 39d8ba2e71fbdde686d7e31ad141a01994dc0793 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 24 Jul 2017 12:34:22 -0400 Subject: bnxt_en: Implement ndo_bridge_{get|set}link methods. To allow users to set the hardware bridging mode to VEB or VEPA. Only single function PF can change the bridging mode. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ec8a1958ba12..4acaeaf87f18 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4919,6 +4919,26 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, } } +static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) +{ + struct hwrm_func_cfg_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + req.fid = cpu_to_le16(0xffff); + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + if (br_mode == BRIDGE_MODE_VEB) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + else if (br_mode == BRIDGE_MODE_VEPA) + req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + else + return -EINVAL; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + rc = -EIO; + return rc; +} + static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; @@ -7432,6 +7452,51 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, schedule_work(&bp->sp_task); } +static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct bnxt *bp = netdev_priv(dev); + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0, + nlflags, filter_mask, NULL); +} + +static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags) +{ + struct bnxt *bp = netdev_priv(dev); + struct nlattr *attr, *br_spec; + int rem, rc = 0; + + if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp)) + return -EOPNOTSUPP; + + br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; + + nla_for_each_nested(attr, br_spec, rem) { + u16 mode; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + if (nla_len(attr) < sizeof(mode)) + return -EINVAL; + + mode = nla_get_u16(attr); + if (mode == bp->br_mode) + break; + + rc = bnxt_hwrm_set_br_mode(bp, mode); + if (!rc) + bp->br_mode = mode; + break; + } + return rc; +} + static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -7463,6 +7528,8 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del, .ndo_xdp = bnxt_xdp, + .ndo_bridge_getlink = bnxt_bridge_getlink, + .ndo_bridge_setlink = bnxt_bridge_setlink, }; static void bnxt_remove_one(struct pci_dev *pdev) -- cgit v1.2.3-55-g7522 From 5c8227d0d3b1eb1ad8f98d0b6dc619d70f2cfa04 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 24 Jul 2017 12:34:23 -0400 Subject: bnxt_en: Add bnxt_get_num_stats() to centrally get the number of ethtool stats. Instead of duplicating the logic multiple times. Also, it is unnecessary to zero the buffer in .get_ethtool_stats() because it is already zeroed by the caller. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 25 ++++++++++++----------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index be6acadcb202..4661b1185790 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -198,19 +198,23 @@ static const struct { #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +static int bnxt_get_num_stats(struct bnxt *bp) +{ + int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + + if (bp->flags & BNXT_FLAG_PORT_STATS) + num_stats += BNXT_NUM_PORT_STATS; + + return num_stats; +} + static int bnxt_get_sset_count(struct net_device *dev, int sset) { struct bnxt *bp = netdev_priv(dev); switch (sset) { - case ETH_SS_STATS: { - int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; - - if (bp->flags & BNXT_FLAG_PORT_STATS) - num_stats += BNXT_NUM_PORT_STATS; - - return num_stats; - } + case ETH_SS_STATS: + return bnxt_get_num_stats(bp); case ETH_SS_TEST: if (!bp->num_tests) return -EOPNOTSUPP; @@ -225,11 +229,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, { u32 i, j = 0; struct bnxt *bp = netdev_priv(dev); - u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings; u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; - memset(buf, 0, buf_size); - if (!bp->bnapi) return; @@ -835,7 +836,7 @@ static void bnxt_get_drvinfo(struct net_device *dev, strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); - info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + info->n_stats = bnxt_get_num_stats(bp); info->testinfo_len = bp->num_tests; /* TODO CHIMP_FW: eeprom dump details */ info->eedump_len = 0; -- cgit v1.2.3-55-g7522 From adcc331e42e639ea44ac3c746db6c7207c3f69c0 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 24 Jul 2017 12:34:24 -0400 Subject: bnxt_en: Allow the user to set ethtool stats-block-usecs to 0. For debugging purpose, it is sometimes useful to disable periodic port statistics updates, so that the firmware logs will not be filled with statistics update messages. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 8 +++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4acaeaf87f18..5df967037d10 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6843,7 +6843,8 @@ static void bnxt_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; - if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { + if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && + bp->stats_coal_ticks) { set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); schedule_work(&bp->sp_task); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 4661b1185790..140e76904af9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -86,9 +86,11 @@ static int bnxt_set_coalesce(struct net_device *dev, if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) { u32 stats_ticks = coal->stats_block_coalesce_usecs; - stats_ticks = clamp_t(u32, stats_ticks, - BNXT_MIN_STATS_COAL_TICKS, - BNXT_MAX_STATS_COAL_TICKS); + /* Allow 0, which means disable. */ + if (stats_ticks) + stats_ticks = clamp_t(u32, stats_ticks, + BNXT_MIN_STATS_COAL_TICKS, + BNXT_MAX_STATS_COAL_TICKS); stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS); bp->stats_coal_ticks = stats_ticks; update_stats = true; -- cgit v1.2.3-55-g7522 From 9315edca9b1d0daf41f81e1f5d4fb995d3cbc634 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 24 Jul 2017 12:34:25 -0400 Subject: bnxt_en: Report firmware DCBX agent. Report DCB_CAP_DCBX_LLD_MANAGED only if the firmware DCBX agent is enabled and running for PF or VF. Otherwise, if both LLDP and DCBX agents are disabled in firmware, we report DCB_CAP_DCBX_LLD_HOST and allow host IEEE DCB settings. This patch refines the current logic in the driver. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19 ++++++++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 11 ++++++++--- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5df967037d10..95fea2622d64 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4578,6 +4578,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { struct hwrm_func_qcfg_input req = {0}; struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + u16 flags; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); @@ -4594,15 +4595,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK; } #endif - if (BNXT_PF(bp)) { - u16 flags = le16_to_cpu(resp->flags); - - if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | - FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) - bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; - if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST) - bp->flags |= BNXT_FLAG_MULTI_HOST; - } + flags = le16_to_cpu(resp->flags); + if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED | + FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) { + bp->flags |= BNXT_FLAG_FW_LLDP_AGENT; + if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) + bp->flags |= BNXT_FLAG_FW_DCBX_AGENT; + } + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) + bp->flags |= BNXT_FLAG_MULTI_HOST; switch (resp->port_partition_type) { case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 15d5a10122ed..6b781be66722 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1027,6 +1027,7 @@ struct bnxt { #define BNXT_FLAG_MULTI_HOST 0x100000 #define BNXT_FLAG_SHORT_CMD 0x200000 #define BNXT_FLAG_DOUBLE_DB 0x400000 + #define BNXT_FLAG_FW_DCBX_AGENT 0x800000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 5c6dd0ce209f..c0145898f64c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -549,13 +549,18 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct bnxt *bp = netdev_priv(dev); - /* only support IEEE */ - if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE)) + /* All firmware DCBX settings are set in NVRAM */ + if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) return 1; if (mode & DCB_CAP_DCBX_HOST) { if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) return 1; + + /* only support IEEE */ + if ((mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE)) + return 1; } if (mode == bp->dcbx_cap) @@ -584,7 +589,7 @@ void bnxt_dcb_init(struct bnxt *bp) bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE; if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT)) bp->dcbx_cap |= DCB_CAP_DCBX_HOST; - else + else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT) bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED; bp->dev->dcbnl_ops = &dcbnl_ops; } -- cgit v1.2.3-55-g7522 From 70098a47bbf131b65c64ca935c2480e64c9c7c51 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 24 Jul 2017 12:34:26 -0400 Subject: bnxt_en: Set ETS min_bw parameter for older firmware. In addition to the ETS weight, older firmware also requires the min_bw parameter to be set for it to work properly. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 6 ++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h | 1 + 2 files changed, 7 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index c0145898f64c..aa1f3a2c7a78 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -93,6 +93,12 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cos2bw.tsa = QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS; cos2bw.bw_weight = ets->tc_tx_bw[i]; + /* older firmware requires min_bw to be set to the + * same weight value in percent. + */ + cos2bw.min_bw = + cpu_to_le32((ets->tc_tx_bw[i] * 100) | + BW_VALUE_UNIT_PERCENT1_100); } memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (i == 0) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h index ecd0a5e46a49..d2e0af960bf5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h @@ -26,6 +26,7 @@ struct bnxt_cos2bw_cfg { u8 queue_id; __le32 min_bw; __le32 max_bw; +#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29) u8 tsa; u8 pri_lvl; u8 bw_weight; -- cgit v1.2.3-55-g7522 From 4ab0c6a8ffd7d25475dd9eb06614eec1ae53a443 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Mon, 24 Jul 2017 12:34:27 -0400 Subject: bnxt_en: add support to enable VF-representors This patch is a part of a patch-set that introduces support for VF-reps in the bnxt_en driver. The driver registers eswitch mode get/set methods with the devlink interface that allow a user to enable SRIOV switchdev mode. When enabled, the driver registers a VF-rep netdev object for each VF with the stack. This can essentially bring the VFs unders the management perview of the hypervisor and applications such as OVS. The next patch in the series, adds the RX/TX routines and a slim netdev implementation for the VF-reps. Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 1 + drivers/net/ethernet/broadcom/bnxt/Makefile | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 +- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 32 ++++ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 6 + drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 244 ++++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h | 38 ++++ 7 files changed, 330 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c create mode 100644 drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 96413808c726..285f8bc25682 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -193,6 +193,7 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI + depends on MAY_USE_DEVLINK select FW_LOADER select LIBCRC32C ---help--- diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index a7ca45b251cb..d141a22ac50b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,3 +1,3 @@ obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o +bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 95fea2622d64..ebdeeb4a5756 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -57,6 +57,7 @@ #include "bnxt_ethtool.h" #include "bnxt_dcb.h" #include "bnxt_xdp.h" +#include "bnxt_vfr.h" #define BNXT_TX_TIMEOUT (5 * HZ) @@ -7539,8 +7540,10 @@ static void bnxt_remove_one(struct pci_dev *pdev) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (BNXT_PF(bp)) + if (BNXT_PF(bp)) { bnxt_sriov_disable(bp); + bnxt_dl_unregister(bp); + } pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); @@ -7843,6 +7846,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) #ifdef CONFIG_BNXT_SRIOV init_waitqueue_head(&bp->sriov_cfg_wait); + mutex_init(&bp->sriov_lock); #endif bp->gro_func = bnxt_gro_func_5730x; if (BNXT_CHIP_P4_PLUS(bp)) @@ -7934,6 +7938,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_clr_int; + if (BNXT_PF(bp)) + bnxt_dl_register(bp); + netdev_info(dev, "%s found at mem %lx, node addr %pM\n", board_info[ent->driver_data].name, (long)pci_resource_start(pdev, 0), dev->dev_addr); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6b781be66722..a7d5f42fb6a3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -19,6 +19,7 @@ #define DRV_VER_UPD 0 #include +#include struct tx_bd { __le32 tx_bd_len_flags_type; @@ -618,6 +619,8 @@ struct bnxt_tpa_info { #define BNXT_TPA_OUTER_L3_OFF(hdr_info) \ ((hdr_info) & 0x1ff) + + u16 cfa_code; /* cfa_code in TPA start compl */ }; struct bnxt_rx_ring_info { @@ -928,6 +931,23 @@ struct bnxt_test_info { #define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_CAG_REG_BASE 0x300000 +struct bnxt_vf_rep_stats { + u64 packets; + u64 bytes; + u64 dropped; +}; + +struct bnxt_vf_rep { + struct bnxt *bp; + struct net_device *dev; + u16 vf_idx; + u16 tx_cfa_action; + u16 rx_cfa_code; + + struct bnxt_vf_rep_stats rx_stats; + struct bnxt_vf_rep_stats tx_stats; +}; + struct bnxt { void __iomem *bar0; void __iomem *bar1; @@ -1208,6 +1228,12 @@ struct bnxt { wait_queue_head_t sriov_cfg_wait; bool sriov_cfg; #define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000) + + /* lock to protect VF-rep creation/cleanup via + * multiple paths such as ->sriov_configure() and + * devlink ->eswitch_mode_set() + */ + struct mutex sriov_lock; #endif #define BNXT_NTP_FLTR_MAX_FLTR 4096 @@ -1234,6 +1260,12 @@ struct bnxt { struct bnxt_led_info leds[BNXT_MAX_LED]; struct bpf_prog *xdp_prog; + + /* devlink interface and vf-rep structs */ + struct devlink *dl; + enum devlink_eswitch_mode eswitch_mode; + struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */ + u16 *cfa_code_map; /* cfa_code -> vf_idx map */ }; #define BNXT_RX_STATS_OFFSET(counter) \ diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index fde7256cad1b..d37925a8a65b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -18,6 +18,7 @@ #include "bnxt.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" +#include "bnxt_vfr.h" #include "bnxt_ethtool.h" #ifdef CONFIG_BNXT_SRIOV @@ -587,6 +588,10 @@ void bnxt_sriov_disable(struct bnxt *bp) if (!num_vfs) return; + /* synchronize VF and VF-rep create and destroy */ + mutex_lock(&bp->sriov_lock); + bnxt_vf_reps_destroy(bp); + if (pci_vfs_assigned(bp->pdev)) { bnxt_hwrm_fwd_async_event_cmpl( bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); @@ -597,6 +602,7 @@ void bnxt_sriov_disable(struct bnxt *bp) /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } + mutex_unlock(&bp->sriov_lock); bnxt_free_vf_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c new file mode 100644 index 000000000000..eab358c2ac97 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -0,0 +1,244 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ +#include +#include +#include +#include +#include + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_vfr.h" + +#define CFA_HANDLE_INVALID 0xffff + +static void __bnxt_vf_reps_destroy(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + int i; + + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (vf_rep) { + if (vf_rep->dev) { + /* if register_netdev failed, then netdev_ops + * would have been set to NULL + */ + if (vf_rep->dev->netdev_ops) + unregister_netdev(vf_rep->dev); + free_netdev(vf_rep->dev); + } + } + } + + kfree(bp->vf_reps); + bp->vf_reps = NULL; +} + +void bnxt_vf_reps_destroy(struct bnxt *bp) +{ + bool closed = false; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + if (!bp->vf_reps) + return; + + /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced + * before proceeding with VF-rep cleanup. + */ + rtnl_lock(); + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + closed = true; + } + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + + if (closed) + bnxt_open_nic(bp, false, false); + rtnl_unlock(); + + /* Need to call vf_reps_destroy() outside of rntl_lock + * as unregister_netdev takes rtnl_lock + */ + __bnxt_vf_reps_destroy(bp); +} + +/* Use the OUI of the PF's perm addr and report the same mac addr + * for the same VF-rep each time + */ +static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac) +{ + u32 addr; + + ether_addr_copy(mac, src_mac); + + addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx; + mac[3] = (u8)(addr & 0xFF); + mac[4] = (u8)((addr >> 8) & 0xFF); + mac[5] = (u8)((addr >> 16) & 0xFF); +} + +static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, + struct net_device *dev) +{ + struct net_device *pf_dev = bp->dev; + + /* Just inherit all the featues of the parent PF as the VF-R + * uses the RX/TX rings of the parent PF + */ + dev->hw_features = pf_dev->hw_features; + dev->gso_partial_features = pf_dev->gso_partial_features; + dev->vlan_features = pf_dev->vlan_features; + dev->hw_enc_features = pf_dev->hw_enc_features; + dev->features |= pf_dev->features; + bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx, + dev->perm_addr); + ether_addr_copy(dev->dev_addr, dev->perm_addr); +} + +static int bnxt_vf_reps_create(struct bnxt *bp) +{ + u16 num_vfs = pci_num_vf(bp->pdev); + struct bnxt_vf_rep *vf_rep; + struct net_device *dev; + int rc, i; + + bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL); + if (!bp->vf_reps) + return -ENOMEM; + + for (i = 0; i < num_vfs; i++) { + dev = alloc_etherdev(sizeof(*vf_rep)); + if (!dev) { + rc = -ENOMEM; + goto err; + } + + vf_rep = netdev_priv(dev); + bp->vf_reps[i] = vf_rep; + vf_rep->dev = dev; + vf_rep->bp = bp; + vf_rep->vf_idx = i; + vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + + bnxt_vf_rep_netdev_init(bp, vf_rep, dev); + rc = register_netdev(dev); + if (rc) { + /* no need for unregister_netdev in cleanup */ + dev->netdev_ops = NULL; + goto err; + } + } + + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + return 0; + +err: + netdev_info(bp->dev, "%s error=%d", __func__, rc); + __bnxt_vf_reps_destroy(bp); + return rc; +} + +/* Devlink related routines */ +static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + + *mode = bp->eswitch_mode; + return 0; +} + +static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct bnxt *bp = bnxt_get_bp_from_dl(devlink); + int rc = 0; + + mutex_lock(&bp->sriov_lock); + if (bp->eswitch_mode == mode) { + netdev_info(bp->dev, "already in %s eswitch mode", + mode == DEVLINK_ESWITCH_MODE_LEGACY ? + "legacy" : "switchdev"); + rc = -EINVAL; + goto done; + } + + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + bnxt_vf_reps_destroy(bp); + break; + + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (pci_num_vf(bp->pdev) == 0) { + netdev_info(bp->dev, + "Enable VFs before setting swtichdev mode"); + rc = -EPERM; + goto done; + } + rc = bnxt_vf_reps_create(bp); + break; + + default: + rc = -EINVAL; + goto done; + } +done: + mutex_unlock(&bp->sriov_lock); + return rc; +} + +static const struct devlink_ops bnxt_dl_ops = { + .eswitch_mode_set = bnxt_dl_eswitch_mode_set, + .eswitch_mode_get = bnxt_dl_eswitch_mode_get +}; + +int bnxt_dl_register(struct bnxt *bp) +{ + struct devlink *dl; + int rc; + + if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV)) + return 0; + + if (bp->hwrm_spec_code < 0x10800) { + netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n"); + return -ENOTSUPP; + } + + dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + if (!dl) { + netdev_warn(bp->dev, "devlink_alloc failed"); + return -ENOMEM; + } + + bnxt_link_bp_to_dl(dl, bp); + bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + rc = devlink_register(dl, &bp->pdev->dev); + if (rc) { + bnxt_link_bp_to_dl(dl, NULL); + devlink_free(dl); + netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc); + return rc; + } + + return 0; +} + +void bnxt_dl_unregister(struct bnxt *bp) +{ + struct devlink *dl = bp->dl; + + if (!dl) + return; + + devlink_unregister(dl); + devlink_free(dl); +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h new file mode 100644 index 000000000000..310c9c567152 --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -0,0 +1,38 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2016-2017 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_VFR_H +#define BNXT_VFR_H + +#define MAX_CFA_CODE 65536 + +/* Struct to hold housekeeping info needed by devlink interface */ +struct bnxt_dl { + struct bnxt *bp; /* back ptr to the controlling dev */ +}; + +static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl) +{ + return ((struct bnxt_dl *)devlink_priv(dl))->bp; +} + +static inline void bnxt_link_bp_to_dl(struct devlink *dl, struct bnxt *bp) +{ + struct bnxt_dl *bp_dl = devlink_priv(dl); + + bp_dl->bp = bp; + if (bp) + bp->dl = dl; +} + +int bnxt_dl_register(struct bnxt *bp); +void bnxt_dl_unregister(struct bnxt *bp); +void bnxt_vf_reps_destroy(struct bnxt *bp); + +#endif /* BNXT_VFR_H */ -- cgit v1.2.3-55-g7522 From ee5c7fb3404724b9e25fe24c81fbcda60f3f2659 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Mon, 24 Jul 2017 12:34:28 -0400 Subject: bnxt_en: add vf-rep RX/TX and netdev implementation This patch introduces the RX/TX and a simple netdev implementation for VF-reps. The VF-reps use the RX/TX rings of the PF. For each VF-rep the PF driver issues a VFR_ALLOC FW cmd that returns "cfa_code" and "cfa_action" values. The FW sets up the filter tables in such a way that VF traffic by default (in absence of other rules) gets punted to the parent PF. The cfa_code value in the RX-compl informs the driver of the source VF. For traffic being transmitted from the VF-rep, the TX BD is tagged with a cfa_action value that informs the HW to punt it to the corresponding VF. Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 68 ++++++-- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 10 ++ drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 218 +++++++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h | 4 + 4 files changed, 288 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ebdeeb4a5756..f262fe6092d7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -245,6 +245,16 @@ const u16 bnxt_lhint_arr[] = { TX_BD_FLAGS_LHINT_2048_AND_LARGER, }; +static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb) +{ + struct metadata_dst *md_dst = skb_metadata_dst(skb); + + if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX) + return 0; + + return md_dst->u.port_info.port_id; +} + static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -289,7 +299,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_buf->nr_frags = last_frag; vlan_tag_flags = 0; - cfa_action = 0; + cfa_action = bnxt_xmit_get_cfa_action(skb); if (skb_vlan_tag_present(skb)) { vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN | skb_vlan_tag_get(skb); @@ -324,7 +334,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_push1->tx_bd_hsize_lflags = 0; tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + tx_push1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); end = pdata + length; end = PTR_ALIGN(end, 8) - 1; @@ -429,7 +440,8 @@ normal_tx: txbd->tx_bd_len_flags_type = cpu_to_le32(flags); txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); - txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action); + txbd1->tx_bd_cfa_action = + cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT); for (i = 0; i < last_frag; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -1034,7 +1046,10 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, bnxt_sched_reset(bp, rxr); return; } - + /* Store cfa_code in tpa_info to use in tpa_end + * completion processing. + */ + tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1); prod_rx_buf->data = tpa_info->data; prod_rx_buf->data_ptr = tpa_info->data_ptr; @@ -1269,6 +1284,17 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, return skb; } +/* Given the cfa_code of a received packet determine which + * netdev (vf-rep or PF) the packet is destined to. + */ +static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code) +{ + struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code); + + /* if vf-rep dev is NULL, the must belongs to the PF */ + return dev ? dev : bp->dev; +} + static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, @@ -1362,7 +1388,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return NULL; } } - skb->protocol = eth_type_trans(skb, bp->dev); + + skb->protocol = + eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code)); if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); @@ -1389,6 +1417,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, return skb; } +static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi, + struct sk_buff *skb) +{ + if (skb->dev != bp->dev) { + /* this packet belongs to a vf-rep */ + bnxt_vf_rep_rx(bp, skb); + return; + } + skb_record_rx_queue(skb, bnapi->index); + napi_gro_receive(&bnapi->napi, skb); +} + /* returns the following: * 1 - 1 packet successfully received * 0 - successful TPA_START, packet not completed yet @@ -1405,7 +1445,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, struct rx_cmp *rxcmp; struct rx_cmp_ext *rxcmp1; u32 tmp_raw_cons = *raw_cons; - u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons); + u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons); struct bnxt_sw_rx_bd *rx_buf; unsigned int len; u8 *data_ptr, agg_bufs, cmp_type; @@ -1447,8 +1487,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, rc = -ENOMEM; if (likely(skb)) { - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; } *event |= BNXT_RX_EVENT; @@ -1537,7 +1576,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type); } - skb->protocol = eth_type_trans(skb, dev); + cfa_code = RX_CMP_CFA_CODE(rxcmp1); + skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code)); if ((rxcmp1->rx_cmp_flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && @@ -1562,8 +1602,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, } } - skb_record_rx_queue(skb, bnapi->index); - napi_gro_receive(&bnapi->napi, skb); + bnxt_deliver_skb(bp, bnapi, skb); rc = 1; next_rx: @@ -6246,6 +6285,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Poll link status and check for SFP+ module status */ bnxt_get_port_module_status(bp); + /* VF-reps may need to be re-opened after the PF is re-opened */ + if (BNXT_PF(bp)) + bnxt_vf_reps_open(bp); return 0; open_err: @@ -6334,6 +6376,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) if (rc) netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n"); } + + /* Close the VF-reps before closing PF */ + if (BNXT_PF(bp)) + bnxt_vf_reps_close(bp); #endif /* Change device state to avoid TX queue wake up's */ bnxt_tx_disable(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a7d5f42fb6a3..63756f0389d7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -20,6 +20,7 @@ #include #include +#include struct tx_bd { __le32 tx_bd_len_flags_type; @@ -243,6 +244,10 @@ struct rx_cmp_ext { ((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \ RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3) +#define RX_CMP_CFA_CODE(rxcmpl1) \ + ((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \ + RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT) + struct rx_agg_cmp { __le32 rx_agg_cmp_len_flags_type; #define RX_AGG_CMP_TYPE (0x3f << 0) @@ -312,6 +317,10 @@ struct rx_tpa_start_cmp_ext { __le32 rx_tpa_start_cmp_hdr_info; }; +#define TPA_START_CFA_CODE(rx_tpa_start) \ + ((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \ + RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT) + struct rx_tpa_end_cmp { __le32 rx_tpa_end_cmp_len_flags_type; #define RX_TPA_END_CMP_TYPE (0x3f << 0) @@ -940,6 +949,7 @@ struct bnxt_vf_rep_stats { struct bnxt_vf_rep { struct bnxt *bp; struct net_device *dev; + struct metadata_dst *dst; u16 vf_idx; u16 tx_cfa_action; u16 rx_cfa_code; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index eab358c2ac97..60bdb181358e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -17,6 +17,178 @@ #include "bnxt_vfr.h" #define CFA_HANDLE_INVALID 0xffff +#define VF_IDX_INVALID 0xffff + +static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, + u16 *tx_cfa_action, u16 *rx_cfa_code) +{ + struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_vfr_alloc_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); + req.vf_id = cpu_to_le16(vf_idx); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } else { + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + } + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + +static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) +{ + struct hwrm_cfa_vfr_free_input req = { 0 }; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); + sprintf(req.vfr_name, "vfr%d", vf_idx); + + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc) + netdev_info(bp->dev, "%s error rc=%d", __func__, rc); + return rc; +} + +static int bnxt_vf_rep_open(struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct bnxt *bp = vf_rep->bp; + + /* Enable link and TX only if the parent PF is open. */ + if (netif_running(bp->dev)) { + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + } + return 0; +} + +static int bnxt_vf_rep_close(struct net_device *dev) +{ + netif_carrier_off(dev); + netif_tx_disable(dev); + + return 0; +} + +static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + int rc, len = skb->len; + + skb_dst_drop(skb); + dst_hold((struct dst_entry *)vf_rep->dst); + skb_dst_set(skb, (struct dst_entry *)vf_rep->dst); + skb->dev = vf_rep->dst->u.port_info.lower_dev; + + rc = dev_queue_xmit(skb); + if (!rc) { + vf_rep->tx_stats.packets++; + vf_rep->tx_stats.bytes += len; + } + return rc; +} + +static void +bnxt_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + stats->rx_packets = vf_rep->rx_stats.packets; + stats->rx_bytes = vf_rep->rx_stats.bytes; + stats->tx_packets = vf_rep->tx_stats.packets; + stats->tx_bytes = vf_rep->tx_stats.bytes; +} + +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + u16 vf_idx; + + if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) { + vf_idx = bp->cfa_code_map[cfa_code]; + if (vf_idx != VF_IDX_INVALID) + return bp->vf_reps[vf_idx]->dev; + } + return NULL; +} + +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev); + struct bnxt_vf_rep_stats *rx_stats; + + rx_stats = &vf_rep->rx_stats; + vf_rep->rx_stats.bytes += skb->len; + vf_rep->rx_stats.packets++; + + netif_receive_skb(skb); +} + +static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { + .get_drvinfo = bnxt_vf_rep_get_drvinfo +}; + +static const struct net_device_ops bnxt_vf_rep_netdev_ops = { + .ndo_open = bnxt_vf_rep_open, + .ndo_stop = bnxt_vf_rep_close, + .ndo_start_xmit = bnxt_vf_rep_xmit, + .ndo_get_stats64 = bnxt_vf_rep_get_stats64 +}; + +/* Called when the parent PF interface is closed: + * As the mode transition from SWITCHDEV to LEGACY + * happens under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_close(struct bnxt *bp) +{ + struct bnxt_vf_rep *vf_rep; + u16 num_vfs, i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + num_vfs = pci_num_vf(bp->pdev); + for (i = 0; i < num_vfs; i++) { + vf_rep = bp->vf_reps[i]; + if (netif_running(vf_rep->dev)) + bnxt_vf_rep_close(vf_rep->dev); + } +} + +/* Called when the parent PF interface is opened (re-opened): + * As the mode transition from SWITCHDEV to LEGACY + * happen under the rtnl_lock() this routine is safe + * under the rtnl_lock() + */ +void bnxt_vf_reps_open(struct bnxt *bp) +{ + int i; + + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + for (i = 0; i < pci_num_vf(bp->pdev); i++) + bnxt_vf_rep_open(bp->vf_reps[i]->dev); +} static void __bnxt_vf_reps_destroy(struct bnxt *bp) { @@ -27,6 +199,11 @@ static void __bnxt_vf_reps_destroy(struct bnxt *bp) for (i = 0; i < num_vfs; i++) { vf_rep = bp->vf_reps[i]; if (vf_rep) { + dst_release((struct dst_entry *)vf_rep->dst); + + if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID) + hwrm_cfa_vfr_free(bp, vf_rep->vf_idx); + if (vf_rep->dev) { /* if register_netdev failed, then netdev_ops * would have been set to NULL @@ -60,6 +237,9 @@ void bnxt_vf_reps_destroy(struct bnxt *bp) bnxt_close_nic(bp, false, false); closed = true; } + /* un-publish cfa_code_map so that RX path can't see it anymore */ + kfree(bp->cfa_code_map); + bp->cfa_code_map = NULL; bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; if (closed) @@ -92,6 +272,8 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, { struct net_device *pf_dev = bp->dev; + dev->netdev_ops = &bnxt_vf_rep_netdev_ops; + dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; /* Just inherit all the featues of the parent PF as the VF-R * uses the RX/TX rings of the parent PF */ @@ -107,7 +289,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, static int bnxt_vf_reps_create(struct bnxt *bp) { - u16 num_vfs = pci_num_vf(bp->pdev); + u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev); struct bnxt_vf_rep *vf_rep; struct net_device *dev; int rc, i; @@ -116,6 +298,16 @@ static int bnxt_vf_reps_create(struct bnxt *bp) if (!bp->vf_reps) return -ENOMEM; + /* storage for cfa_code to vf-idx mapping */ + cfa_code_map = kmalloc(sizeof(*bp->cfa_code_map) * MAX_CFA_CODE, + GFP_KERNEL); + if (!cfa_code_map) { + rc = -ENOMEM; + goto err; + } + for (i = 0; i < MAX_CFA_CODE; i++) + cfa_code_map[i] = VF_IDX_INVALID; + for (i = 0; i < num_vfs; i++) { dev = alloc_etherdev(sizeof(*vf_rep)); if (!dev) { @@ -130,6 +322,26 @@ static int bnxt_vf_reps_create(struct bnxt *bp) vf_rep->vf_idx = i; vf_rep->tx_cfa_action = CFA_HANDLE_INVALID; + /* get cfa handles from FW */ + rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx, + &vf_rep->tx_cfa_action, + &vf_rep->rx_cfa_code); + if (rc) { + rc = -ENOLINK; + goto err; + } + cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx; + + vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, + GFP_KERNEL); + if (!vf_rep->dst) { + rc = -ENOMEM; + goto err; + } + /* only cfa_action is needed to mux a packet while TXing */ + vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action; + vf_rep->dst->u.port_info.lower_dev = bp->dev; + bnxt_vf_rep_netdev_init(bp, vf_rep, dev); rc = register_netdev(dev); if (rc) { @@ -139,11 +351,15 @@ static int bnxt_vf_reps_create(struct bnxt *bp) } } + /* publish cfa_code_map only after all VF-reps have been initialized */ + bp->cfa_code_map = cfa_code_map; bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; + netif_keep_dst(bp->dev); return 0; err: netdev_info(bp->dev, "%s error=%d", __func__, rc); + kfree(cfa_code_map); __bnxt_vf_reps_destroy(bp); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index 310c9c567152..c6cd55afbb89 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -34,5 +34,9 @@ static inline void bnxt_link_bp_to_dl(struct devlink *dl, struct bnxt *bp) int bnxt_dl_register(struct bnxt *bp); void bnxt_dl_unregister(struct bnxt *bp); void bnxt_vf_reps_destroy(struct bnxt *bp); +void bnxt_vf_reps_close(struct bnxt *bp); +void bnxt_vf_reps_open(struct bnxt *bp); +void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); +struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); #endif /* BNXT_VFR_H */ -- cgit v1.2.3-55-g7522 From c124a62ff2dde9eaa9e8083de8206a142535c04e Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Mon, 24 Jul 2017 12:34:29 -0400 Subject: bnxt_en: add support for port_attr_get and and get_phys_port_name This patch adds support for the switchdev_port_attr_get() and ndo_get_phys_port_name() methods for the PF and the VF-reps. Using this support a user application can deduce that the PF (when in the ESWITCH_SWDEV mode) and it's VF-reps form a switch. Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 57 +++++++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 + drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 31 ++++++++++++++- 3 files changed, 89 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f262fe6092d7..82cbe1804821 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7546,6 +7546,61 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, return rc; } +static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + /* The switch-id that the pf belongs to is exported by + * the switchdev ndo. This name is just to distinguish from the + * vf-rep ports. + */ + rc = snprintf(buf, len, "pf%d", bp->pf.port_id); + + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr) +{ + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return -EOPNOTSUPP; + + /* The PF and it's VF-reps only support the switchdev framework */ + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + /* In SRIOV each PF-pool (PF + child VFs) serves as a + * switching domain, the PF's perm mac-addr can be used + * as the unique parent-id + */ + attr->u.ppid.id_len = ETH_ALEN; + ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr); + break; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int bnxt_swdev_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + return bnxt_port_attr_get(netdev_priv(dev), attr); +} + +static const struct switchdev_ops bnxt_switchdev_ops = { + .switchdev_port_attr_get = bnxt_swdev_port_attr_get +}; + static const struct net_device_ops bnxt_netdev_ops = { .ndo_open = bnxt_open, .ndo_start_xmit = bnxt_start_xmit, @@ -7579,6 +7634,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_xdp = bnxt_xdp, .ndo_bridge_getlink = bnxt_bridge_getlink, .ndo_bridge_setlink = bnxt_bridge_setlink, + .ndo_get_phys_port_name = bnxt_get_phys_port_name }; static void bnxt_remove_one(struct pci_dev *pdev) @@ -7838,6 +7894,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; + dev->switchdev_ops = &bnxt_switchdev_ops; pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 63756f0389d7..2d84d5719b70 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -21,6 +21,7 @@ #include #include #include +#include struct tx_bd { __le32 tx_bd_len_flags_type; @@ -1350,4 +1351,5 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); +int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 60bdb181358e..83478e912ee5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -135,6 +135,18 @@ void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) netif_receive_skb(skb); } +static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf, + size_t len) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + int rc; + + rc = snprintf(buf, len, "vfr%d", vf_rep->vf_idx); + if (rc >= len) + return -EOPNOTSUPP; + return 0; +} + static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -142,6 +154,21 @@ static void bnxt_vf_rep_get_drvinfo(struct net_device *dev, strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); } +static int bnxt_vf_rep_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + + /* as only PORT_PARENT_ID is supported currently use common code + * between PF and VF-rep for now. + */ + return bnxt_port_attr_get(vf_rep->bp, attr); +} + +static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = { + .switchdev_port_attr_get = bnxt_vf_rep_port_attr_get +}; + static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = { .get_drvinfo = bnxt_vf_rep_get_drvinfo }; @@ -150,7 +177,8 @@ static const struct net_device_ops bnxt_vf_rep_netdev_ops = { .ndo_open = bnxt_vf_rep_open, .ndo_stop = bnxt_vf_rep_close, .ndo_start_xmit = bnxt_vf_rep_xmit, - .ndo_get_stats64 = bnxt_vf_rep_get_stats64 + .ndo_get_stats64 = bnxt_vf_rep_get_stats64, + .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name }; /* Called when the parent PF interface is closed: @@ -274,6 +302,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->netdev_ops = &bnxt_vf_rep_netdev_ops; dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; + dev->switchdev_ops = &bnxt_vf_rep_switchdev_ops; /* Just inherit all the featues of the parent PF as the VF-R * uses the RX/TX rings of the parent PF */ -- cgit v1.2.3-55-g7522 From fd763ad96ae5a4aa76ce00b4090b6044c63c18ed Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:57:25 -0700 Subject: netvsc: remove bogus rtnl_unlock Remove accidental rtnl_unlock from earlier testing. Fixes: 3962981f4822 ("netvsc: add rtnl annotations in rndis") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 9a9e269a25ae..70b7cc37103c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1548,7 +1548,6 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); - rtnl_unlock(); netdev_lockdep_set_classes(net); -- cgit v1.2.3-55-g7522 From 37b9dfa0d833227bc65353eec9dd0b00e1545a00 Mon Sep 17 00:00:00 2001 From: Mohammed Gamal Date: Mon, 24 Jul 2017 10:57:26 -0700 Subject: netvsc: Remove redundant use of ipv6_hdr() This condition already uses an object of type ipv6hdr in the line above. Use the object directly instead of calling ipv6_hdr Signed-off-by: Mohammed Gamal Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 70b7cc37103c..c266d427f934 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -339,7 +339,7 @@ static u32 net_checksum_info(struct sk_buff *skb) if (ip6->nexthdr == IPPROTO_TCP) return TRANSPORT_INFO_IPV6_TCP; - else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP) + else if (ip6->nexthdr == IPPROTO_UDP) return TRANSPORT_INFO_IPV6_UDP; } -- cgit v1.2.3-55-g7522 From 43bf99ce009de710b68473480a611f3b5ddd11d5 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:57:27 -0700 Subject: netvsc: prefetch the first incoming ring element In interrupt handler, prefetch the first incoming ring element so that it is in cache by the time NAPI poll gets to it. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0a9d9feedc3f..06f39a99da7c 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -30,6 +30,7 @@ #include #include #include +#include #include @@ -1265,10 +1266,15 @@ int netvsc_poll(struct napi_struct *napi, int budget) void netvsc_channel_cb(void *context) { struct netvsc_channel *nvchan = context; + struct vmbus_channel *channel = nvchan->channel; + struct hv_ring_buffer_info *rbi = &channel->inbound; + + /* preload first vmpacket descriptor */ + prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); if (napi_schedule_prep(&nvchan->napi)) { /* disable interupts from host */ - hv_begin_read(&nvchan->channel->inbound); + hv_begin_read(rbi); __napi_schedule(&nvchan->napi); } -- cgit v1.2.3-55-g7522 From 7ca4593338e19595cad48fa8dcd1da28c81352d6 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:57:28 -0700 Subject: netvsc: fix netvsc_set_channels The number of channels returned by rndis_filter_device_add maybe less than the number requested. Therefore set correct real number of queues. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 52 +++++++++++++++-------------------------- 1 file changed, 19 insertions(+), 33 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c266d427f934..e212cbeb6333 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -713,39 +713,16 @@ static void netvsc_get_channels(struct net_device *net, } } -static int netvsc_set_queues(struct net_device *net, struct hv_device *dev, - u32 num_chn) -{ - struct netvsc_device_info device_info; - struct netvsc_device *net_device; - int ret; - - memset(&device_info, 0, sizeof(device_info)); - device_info.num_chn = num_chn; - device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = num_chn; - - ret = netif_set_real_num_tx_queues(net, num_chn); - if (ret) - return ret; - - ret = netif_set_real_num_rx_queues(net, num_chn); - if (ret) - return ret; - - net_device = rndis_filter_device_add(dev, &device_info); - return PTR_ERR_OR_ZERO(net_device); -} - static int netvsc_set_channels(struct net_device *net, struct ethtool_channels *channels) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_device *dev = net_device_ctx->device_ctx; struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); - unsigned int count = channels->combined_count; + unsigned int orig, count = channels->combined_count; + struct netvsc_device_info device_info; bool was_opened; - int ret; + int ret = 0; /* We do not support separate count for rx, tx, or other */ if (count == 0 || @@ -764,19 +741,29 @@ static int netvsc_set_channels(struct net_device *net, if (count > nvdev->max_chn) return -EINVAL; + orig = nvdev->num_chn; was_opened = rndis_filter_opened(nvdev); if (was_opened) rndis_filter_close(nvdev); rndis_filter_device_remove(dev, nvdev); - ret = netvsc_set_queues(net, dev, count); - if (ret == 0) - nvdev->num_chn = count; - else - netvsc_set_queues(net, dev, nvdev->num_chn); + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = count; + device_info.ring_size = ring_size; + device_info.max_num_vrss_chns = count; + + nvdev = rndis_filter_device_add(dev, &device_info); + if (!IS_ERR(nvdev)) { + netif_set_real_num_tx_queues(net, nvdev->num_chn); + netif_set_real_num_rx_queues(net, nvdev->num_chn); + ret = PTR_ERR(nvdev); + } else { + device_info.num_chn = orig; + device_info.max_num_vrss_chns = count; + rndis_filter_device_add(dev, &device_info); + } - nvdev = rtnl_dereference(net_device_ctx->nvdev); if (was_opened) rndis_filter_open(nvdev); @@ -863,7 +850,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; - device_info.max_num_vrss_chns = nvdev->num_chn; rndis_filter_device_remove(hdev, nvdev); -- cgit v1.2.3-55-g7522 From 27f5aa92ccafbe1bbc695307e3dee41a0e924c28 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:57:29 -0700 Subject: netvsc: include rtnetlink.h Since these files use rtnl_derefernce make sure and include rtnetlink.h Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 ++ drivers/net/hyperv/rndis_filter.c | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e212cbeb6333..a94fd545a650 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -33,6 +33,8 @@ #include #include #include +#include + #include #include #include diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index e439886f72c1..eaa3f0d5682a 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "hyperv_net.h" -- cgit v1.2.3-55-g7522 From 658677f17c5cbe84ec24fd7be69b4da1ed580596 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:57:30 -0700 Subject: netvsc: remove no longer used max_num_rss queues This value has been calculated in rndis_device_attach since 4.11. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 1 - drivers/net/hyperv/netvsc_drv.c | 2 -- 2 files changed, 3 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index afb65f753574..4e7ff348327e 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -147,7 +147,6 @@ struct hv_netvsc_packet { struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; int ring_size; - u32 max_num_vrss_chns; u32 num_chn; }; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a94fd545a650..262486ce8e2a 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -753,7 +753,6 @@ static int netvsc_set_channels(struct net_device *net, memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; device_info.ring_size = ring_size; - device_info.max_num_vrss_chns = count; nvdev = rndis_filter_device_add(dev, &device_info); if (!IS_ERR(nvdev)) { @@ -762,7 +761,6 @@ static int netvsc_set_channels(struct net_device *net, ret = PTR_ERR(nvdev); } else { device_info.num_chn = orig; - device_info.max_num_vrss_chns = count; rndis_filter_device_add(dev, &device_info); } -- cgit v1.2.3-55-g7522 From a248878d7a1d35ea3bb874891997144ad16d7c27 Mon Sep 17 00:00:00 2001 From: John Allen Date: Mon, 24 Jul 2017 13:26:06 -0500 Subject: ibmvnic: Check for transport event on driver resume On resume, the ibmvnic driver will fail to resume normal operation. The main crq gets closed on suspend by the vnic server and doesn't get reopened again as the interrupt for the transport event that would reset the main crq comes in after the driver has been suspended. This patch resolves the issue by removing the calls to kick the receive interrupts handlers and instead directly invoking the main crq interrupt handler. This will ensure that we see the transport event necessary to properly resume the driver. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a3e694679635..9d8af464dc44 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3851,10 +3851,7 @@ static int ibmvnic_resume(struct device *dev) if (adapter->state != VNIC_OPEN) return 0; - /* kick the interrupt handlers just in case we lost an interrupt */ - for (i = 0; i < adapter->req_rx_queues; i++) - ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq, - adapter->rx_scrq[i]); + tasklet_schedule(&adapter->tasklet); return 0; } -- cgit v1.2.3-55-g7522 From bc88055ab72c0eaa080926c888628b77d2055513 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Mon, 24 Jul 2017 21:20:16 -0700 Subject: bnxt_en: Use SWITCHDEV_SET_OPS(). Suggested by Jakub Kicinski. Fixes: c124a62ff2dd ("bnxt_en: add support for port_attr_get and and get_phys_port_name") Reported-by: kbuild test robot Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 82cbe1804821..badbc3550338 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7894,7 +7894,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->netdev_ops = &bnxt_netdev_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; - dev->switchdev_ops = &bnxt_switchdev_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops); pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); -- cgit v1.2.3-55-g7522 From 98b5798499b6614963e693dff5262de8d33b2cd0 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 8 Jul 2017 10:40:13 +0200 Subject: mrf24j40: Fix en error handling path in 'mrf24j40_probe()' If this check fails, we must release some resources as done everywhere else in this function before returning an error code. Signed-off-by: Christophe JAILLET Acked-by: Alan Ott Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/mrf24j40.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 7d334963dc08..ee7084b2d52d 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -1330,7 +1330,8 @@ static int mrf24j40_probe(struct spi_device *spi) if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) { dev_warn(&spi->dev, "spi clock above possible maximum: %d", MAX_SPI_SPEED_HZ); - return -EINVAL; + ret = -EINVAL; + goto err_register_device; } ret = mrf24j40_hw_init(devrec); -- cgit v1.2.3-55-g7522 From 81fc9b5ccf84674ad70b1cdf29abd36397097d66 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Tue, 25 Jul 2017 00:00:26 -0700 Subject: drivers/net: Fix ptr_ret.cocci warnings. we can use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR 1. drivers/net/appletalk/ipddp.c 2. drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c Generated by: scripts/coccinelle/api/ptr_ret.cocci Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller --- drivers/net/appletalk/ipddp.c | 4 +--- drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c | 5 +---- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c index a306de4318d7..9375cef22420 100644 --- a/drivers/net/appletalk/ipddp.c +++ b/drivers/net/appletalk/ipddp.c @@ -311,9 +311,7 @@ module_param(ipddp_mode, int, 0); static int __init ipddp_init_module(void) { dev_ipddp = ipddp_init(); - if (IS_ERR(dev_ipddp)) - return PTR_ERR(dev_ipddp); - return 0; + return PTR_ERR_OR_ZERO(dev_ipddp); } static void __exit ipddp_cleanup_module(void) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c index 1447a8352383..2d3e5e263a32 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c @@ -78,10 +78,7 @@ int brcmf_debug_attach(struct brcmf_pub *drvr) return -ENODEV; drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder); - if (IS_ERR(drvr->dbgfs_dir)) - return PTR_ERR(drvr->dbgfs_dir); - - return 0; + return PTR_ERR_OR_ZERO(drvr->dbgfs_dir); } void brcmf_debug_detach(struct brcmf_pub *drvr) -- cgit v1.2.3-55-g7522 From 447e9ebfc19950559ebb9aa08302a7f11ab1508c Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Tue, 25 Jul 2017 00:51:08 -0700 Subject: nfp: set config bit (ifup/ifdown) on netdev open/close When a netdev (PF netdev or representor) is opened or closed, set the physical port config bit appropriately - which powers UP/DOWN the PHY module for the physical interface. The PHY is powered first in the HW/FW configuration step when opening the netdev and again last in the HW/FW configuration step when closing the netdev. This is only applicable when there is a physical port associated with the netdev and if the NSP support this. Otherwise we silently ignore this step. The 'nfp_eth_set_configured' can actually return positive values - updated the function documentation appropriately. Signed-off-by: Dirk van der Merwe Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_common.c | 10 +++++++- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 23 ++++++++++++++++-- drivers/net/ethernet/netronome/nfp/nfp_port.c | 27 ++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/nfp_port.h | 1 + .../ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c | 18 +++++++++++++-- 5 files changed, 74 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 18750ff0ede6..ea471604450e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2658,6 +2658,7 @@ static int nfp_net_netdev_close(struct net_device *netdev) /* Step 2: Tell NFP */ nfp_net_clear_config_and_disable(nn); + nfp_port_configure(netdev, false); /* Step 3: Free resources */ @@ -2775,16 +2776,21 @@ static int nfp_net_netdev_open(struct net_device *netdev) goto err_free_all; /* Step 2: Configure the NFP + * - Ifup the physical interface if it exists * - Enable rings from 0 to tx_rings/rx_rings - 1. * - Write MAC address (in case it changed) * - Set the MTU * - Set the Freelist buffer size * - Enable the FW */ - err = nfp_net_set_config_and_enable(nn); + err = nfp_port_configure(netdev, true); if (err) goto err_free_all; + err = nfp_net_set_config_and_enable(nn); + if (err) + goto err_port_disable; + /* Step 3: Enable for kernel * - put some freelist descriptors on each RX ring * - enable NAPI on each ring @@ -2795,6 +2801,8 @@ static int nfp_net_netdev_open(struct net_device *netdev) return 0; +err_port_disable: + nfp_port_configure(netdev, false); err_free_all: nfp_net_close_free_all(nn); return err; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 8ec5474f4b18..47daad30756c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -239,15 +239,34 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) static int nfp_repr_stop(struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); + int err; + + err = nfp_app_repr_stop(repr->app, repr); + if (err) + return err; - return nfp_app_repr_stop(repr->app, repr); + nfp_port_configure(netdev, false); + return 0; } static int nfp_repr_open(struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); + int err; + + err = nfp_port_configure(netdev, true); + if (err) + return err; + + err = nfp_app_repr_open(repr->app, repr); + if (err) + goto err_port_disable; - return nfp_app_repr_open(repr->app, repr); + return 0; + +err_port_disable: + nfp_port_configure(netdev, false); + return err; } const struct net_device_ops nfp_repr_netdev_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index e42644dbb865..d16a7b78ba9b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -181,6 +181,33 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len) return 0; } +/** + * nfp_port_configure() - helper to set the interface configured bit + * @netdev: net_device instance + * @configed: Desired state + * + * Helper to set the ifup/ifdown state on the PHY only if there is a physical + * interface associated with the netdev. + * + * Return: + * 0 - configuration successful (or no change); + * -ERRNO - configuration failed. + */ +int nfp_port_configure(struct net_device *netdev, bool configed) +{ + struct nfp_eth_table_port *eth_port; + struct nfp_port *port; + int err; + + port = nfp_port_from_netdev(netdev); + eth_port = __nfp_port_get_eth_port(port); + if (!eth_port) + return 0; + + err = nfp_eth_set_configured(port->app->cpp, eth_port->index, configed); + return err < 0 && err != -EOPNOTSUPP ? err : 0; +} + int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, struct nfp_port *port, unsigned int id) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index a33d22e18f94..56c76926c82a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -120,6 +120,7 @@ struct nfp_eth_table_port *nfp_port_get_eth_port(struct nfp_port *port); int nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len); +int nfp_port_configure(struct net_device *netdev, bool configed); struct nfp_port * nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type, diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c index c2bc36e8649f..f6f7c085f8e0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -391,7 +391,10 @@ int nfp_eth_config_commit_end(struct nfp_nsp *nsp) * Enable or disable PHY module (this usually means setting the TX lanes * disable bits). * - * Return: 0 or -ERRNO. + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. */ int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) { @@ -427,7 +430,10 @@ int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) * * Set the ifup/ifdown state on the PHY. * - * Return: 0 or -ERRNO. + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. */ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) { @@ -439,6 +445,14 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) if (IS_ERR(nsp)) return PTR_ERR(nsp); + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 20. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 20) { + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + entries = nfp_nsp_config_entries(nsp); /* Check if we are already in requested state */ -- cgit v1.2.3-55-g7522 From b721cfaf03bcaac0a3abf702c4240326eed9e4b1 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:25:18 -0700 Subject: bnxt: fix unsigned comparsion with 0 Fixes warning because location is u32 and can never be netative warning: comparison of unsigned expression < 0 is always false [-Wtype-limits] Signed-off-by: Stephen Hemminger Acked-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 140e76904af9..08b870d7d466 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -523,7 +523,7 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) struct flow_keys *fkeys; int i, rc = -EINVAL; - if (fs->location < 0 || fs->location >= BNXT_NTP_FLTR_MAX_FLTR) + if (fs->location >= BNXT_NTP_FLTR_MAX_FLTR) return rc; for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) { -- cgit v1.2.3-55-g7522 From 351bac30613378c4684d4673aac0c7917980a652 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:25:19 -0700 Subject: bnxt: fix unused variable warnings Fix a couple of warnings where variable ‘txq’ set but not used Signed-off-by: Stephen Hemminger Acked-by: Michael Chan v, i); Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index badbc3550338..9835ddf1685b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5628,12 +5628,10 @@ void bnxt_tx_disable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; if (bp->tx_ring) { for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = BNXT_DEV_STATE_CLOSING; } } @@ -5646,11 +5644,9 @@ void bnxt_tx_enable(struct bnxt *bp) { int i; struct bnxt_tx_ring_info *txr; - struct netdev_queue *txq; for (i = 0; i < bp->tx_nr_rings; i++) { txr = &bp->tx_ring[i]; - txq = netdev_get_tx_queue(bp->dev, i); txr->dev_state = 0; } netif_tx_wake_all_queues(bp->dev); -- cgit v1.2.3-55-g7522 From 8916829366c17a12c996a10da43c7f9d2e5e7039 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:25:20 -0700 Subject: benet: fix set but not used warning warning: variable ‘netdev’ set but not used Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_roce.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c index 2b62841c4c63..05989aafaf32 100644 --- a/drivers/net/ethernet/emulex/benet/be_roce.c +++ b/drivers/net/ethernet/emulex/benet/be_roce.c @@ -139,10 +139,7 @@ int be_roce_register_driver(struct ocrdma_driver *drv) } ocrdma_drv = drv; list_for_each_entry(dev, &be_adapter_list, entry) { - struct net_device *netdev; - _be_roce_dev_add(dev); - netdev = dev->netdev; } mutex_unlock(&be_adapter_list_lock); return 0; -- cgit v1.2.3-55-g7522 From 3754b87a4e2d8cad644cea9713fed7842504991a Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:25:21 -0700 Subject: netfilter: remove unused variable warning: ‘recent_old_fops’ defined but not used Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/netfilter/xt_recent.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 3f6c4fa78bdb..245fa350a7a8 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(recent_lock); static DEFINE_MUTEX(recent_mutex); #ifdef CONFIG_PROC_FS -static const struct file_operations recent_old_fops, recent_mt_fops; +static const struct file_operations recent_mt_fops; #endif static u_int32_t hash_rnd __read_mostly; -- cgit v1.2.3-55-g7522 From 614d79c09e03d4a421f6d7eab1804600db33e04e Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:25:22 -0700 Subject: socket: fix set not used warning The variable owned_by_user is always set, but only used when kernel is configured with LOCKDEP enabled. Get rid of the warning by moving the code to put the call to owned_by_user into the the rcu_protected call. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/socket.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/socket.c b/net/socket.c index bf2122691fba..79d9bb964cd8 100644 --- a/net/socket.c +++ b/net/socket.c @@ -3404,7 +3404,6 @@ u32 kernel_sock_ip_overhead(struct sock *sk) struct inet_sock *inet; struct ip_options_rcu *opt; u32 overhead = 0; - bool owned_by_user; #if IS_ENABLED(CONFIG_IPV6) struct ipv6_pinfo *np; struct ipv6_txoptions *optv6 = NULL; @@ -3413,13 +3412,12 @@ u32 kernel_sock_ip_overhead(struct sock *sk) if (!sk) return overhead; - owned_by_user = sock_owned_by_user(sk); switch (sk->sk_family) { case AF_INET: inet = inet_sk(sk); overhead += sizeof(struct iphdr); opt = rcu_dereference_protected(inet->inet_opt, - owned_by_user); + sock_owned_by_user(sk)); if (opt) overhead += opt->opt.optlen; return overhead; @@ -3429,7 +3427,7 @@ u32 kernel_sock_ip_overhead(struct sock *sk) overhead += sizeof(struct ipv6hdr); if (np) optv6 = rcu_dereference_protected(np->opt, - owned_by_user); + sock_owned_by_user(sk)); if (optv6) overhead += (optv6->opt_flen + optv6->opt_nflen); return overhead; -- cgit v1.2.3-55-g7522 From ce7426ca70de80b4a8c9ada8f14ce6d651cccf5e Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 24 Jul 2017 10:25:23 -0700 Subject: 6lowpan: fix set not used warning Signed-off-by: Stephen Hemminger Acked-by: Luiz Augusto von Dentz Signed-off-by: David S. Miller --- net/bluetooth/6lowpan.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 2af4f1cc0ab4..4e2576fc0c59 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -273,9 +273,6 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, struct lowpan_peer *peer) { const u8 *saddr; - struct lowpan_btle_dev *dev; - - dev = lowpan_btle_dev(netdev); saddr = peer->lladdr; -- cgit v1.2.3-55-g7522 From d3e3becedc43adc8b8fb12e7507dd4e5aae4d17d Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 25 Jul 2017 13:28:39 -0400 Subject: bnxt_en: include bnxt_vfr.c code under CONFIG_BNXT_SRIOV switch And define empty functions in bnxt_vfr.h when CONFIG_BNXT_SRIOV is not defined. This fixes build error when CONFIG_BNXT_SRIOV is switched off: >> drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c:165:16: error: 'struct >> bnxt' has no member named 'sriov_lock' Reported-by: kbuild test robot Fixes: 4ab0c6a8ffd7 ("bnxt_en: add support to enable VF-representors") Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 4 ++++ drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h | 30 +++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index 83478e912ee5..a52e292be052 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -16,6 +16,8 @@ #include "bnxt.h" #include "bnxt_vfr.h" +#ifdef CONFIG_BNXT_SRIOV + #define CFA_HANDLE_INVALID 0xffff #define VF_IDX_INVALID 0xffff @@ -487,3 +489,5 @@ void bnxt_dl_unregister(struct bnxt *bp) devlink_unregister(dl); devlink_free(dl); } + +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h index c6cd55afbb89..e55a3b693e20 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h @@ -10,6 +10,8 @@ #ifndef BNXT_VFR_H #define BNXT_VFR_H +#ifdef CONFIG_BNXT_SRIOV + #define MAX_CFA_CODE 65536 /* Struct to hold housekeeping info needed by devlink interface */ @@ -39,4 +41,32 @@ void bnxt_vf_reps_open(struct bnxt *bp); void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb); struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code); +#else + +static inline int bnxt_dl_register(struct bnxt *bp) +{ + return 0; +} + +static inline void bnxt_dl_unregister(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_close(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_reps_open(struct bnxt *bp) +{ +} + +static inline void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb) +{ +} + +static inline struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code) +{ + return NULL; +} +#endif /* CONFIG_BNXT_SRIOV */ #endif /* BNXT_VFR_H */ -- cgit v1.2.3-55-g7522 From e408ebdc41aa53f0aa552132384daaa5f5c6301d Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 25 Jul 2017 13:28:40 -0400 Subject: bnxt_en: use SWITCHDEV_SET_OPS() for setting vf_rep_switchdev_ops This fixes the build error: ‘struct net_device’ has no member named ‘switchdev_ops’ Reported-by: kbuild test robot Fixes: c124a62ff2dd ("bnxt_en: add support for port_attr_get and and get_phys_port_name") Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index a52e292be052..c00352a4c1f0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -304,7 +304,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, dev->netdev_ops = &bnxt_vf_rep_netdev_ops; dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops; - dev->switchdev_ops = &bnxt_vf_rep_switchdev_ops; + SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops); /* Just inherit all the featues of the parent PF as the VF-R * uses the RX/TX rings of the parent PF */ -- cgit v1.2.3-55-g7522 From 53f70b8b5aa06db53eb06f092342e6073891729a Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Tue, 25 Jul 2017 13:28:41 -0400 Subject: bnxt_en: fix switchdev port naming for external-port-rep and vf-reps Fix the phys_port_name for the external physical port to be in "pA" format and that of VF-rep to be in "pCvfD" format as suggested by Jakub Kicinski. Fixes: c124a62ff2dd ("bnxt_en: add support for port_attr_get and get_phys_port_name") Signed-off-by: Sathya Perla Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 +----- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 4 +++- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9835ddf1685b..156fb374522b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7552,11 +7552,7 @@ static int bnxt_get_phys_port_name(struct net_device *dev, char *buf, if (!BNXT_PF(bp)) return -EOPNOTSUPP; - /* The switch-id that the pf belongs to is exported by - * the switchdev ndo. This name is just to distinguish from the - * vf-rep ports. - */ - rc = snprintf(buf, len, "pf%d", bp->pf.port_id); + rc = snprintf(buf, len, "p%d", bp->pf.port_id); if (rc >= len) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index c00352a4c1f0..b05c5d0ee3f9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -141,9 +141,11 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf, size_t len) { struct bnxt_vf_rep *vf_rep = netdev_priv(dev); + struct pci_dev *pf_pdev = vf_rep->bp->pdev; int rc; - rc = snprintf(buf, len, "vfr%d", vf_rep->vf_idx); + rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn), + vf_rep->vf_idx); if (rc >= len) return -EOPNOTSUPP; return 0; -- cgit v1.2.3-55-g7522 From 0e1ff3061cb529a70f03f63988a48f9fda8ed419 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 19 Jul 2017 15:00:26 -0700 Subject: ixgbe: Ensure MAC filter was added before setting MACVLAN This patch adds a check to ensure that adding the MAC filter was successful before setting the MACVLAN. If it was unsuccessful, propagate the error. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 0760bd7eeb01..112d24c6c9ce 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -679,8 +679,9 @@ update_vlvfb: static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, int vf, int index, unsigned char *mac_addr) { - struct list_head *pos; struct vf_macvlans *entry; + struct list_head *pos; + int retval = 0; if (index <= 1) { list_for_each(pos, &adapter->vf_mvs.l) { @@ -721,13 +722,15 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, if (!entry || !entry->free) return -ENOSPC; + retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); + if (retval < 0) + return retval; + entry->free = false; entry->is_macvlan = true; entry->vf = vf; memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); - ixgbe_add_mac_filter(adapter, mac_addr, vf); - return 0; } -- cgit v1.2.3-55-g7522 From 72f740b1013783c81da928cfe2ac82dd767c74f0 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 7 Jun 2017 14:36:18 -0700 Subject: ixgbe: Enable LASI interrupts for X552 devices Enable LASI interrupts on X552 devices in order to receive notifications of link configurations of the external PHY and support the configuration of the internal iXFI link since iXFI does not support auto-negotiation. This is not required for X553 devices; add a check to avoid enabling LASI interrupts for X553 devices. Signed-off-by: Tony Nguyen Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 31 +++++++++++++++++++-------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 72d84a065e34..aa34e0b131bb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2404,17 +2404,30 @@ static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); /* Enable link status change alarm */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, ®); - if (status) - return status; - reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + /* Enable the LASI interrupts on X552 devices to receive notifications + * of the link configurations of the external PHY and correspondingly + * support the configuration of the internal iXFI link, since iXFI does + * not support auto-negotiation. This is not required for X553 devices + * having KR support, which performs auto-negotiations and which is used + * as the internal link to the external PHY. Hence adding a check here + * to avoid enabling LASI interrupts for X553 devices. + */ + if (hw->mac.type != ixgbe_mac_x550em_a) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, ®); + if (status) + return status; + + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; - status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, - MDIO_MMD_AN, reg); - if (status) - return status; + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + MDIO_MMD_AN, reg); + if (status) + return status; + } /* Enable high temperature failure and global fault alarms */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, -- cgit v1.2.3-55-g7522 From 48301cf22fa7d70db3ae777e374edfd4119fc826 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 7 Jun 2017 14:36:19 -0700 Subject: ixgbe: Update NW_MNG_IF_SEL support for X553 The MAC register NW_MNG_IF_SEL fields have been redefined for X553. These changes impact the iXFI driver code flow. Since iXFI is only supported in X552, add MAC checks for iXFI flows. Signed-off-by: Tony Nguyen Signed-off-by: Paul Greenwalt Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 4 ++-- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 14 +++++++++++--- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0f867dcda65f..96606e3eb965 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -386,7 +386,7 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) if (ixgbe_removed(reg_addr)) return IXGBE_FAILED_READ_REG; if (unlikely(hw->phy.nw_mng_if_sel & - IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) { + IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { struct ixgbe_adapter *adapter; int i; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 9c2460c5ef1b..ffa0ee5cd0f5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3778,8 +3778,8 @@ struct ixgbe_info { #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G BIT(19) #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G BIT(20) #define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G BIT(21) -#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23) -#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) +#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE BIT(25) +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) /* X552 only */ #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 #define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index aa34e0b131bb..95adbda36235 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1555,9 +1555,14 @@ static s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) **/ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) { + struct ixgbe_mac_info *mac = &hw->mac; s32 status; u32 reg_val; + /* iXFI is only supported with X552 */ + if (mac->type != ixgbe_mac_X550EM_x) + return IXGBE_ERR_LINK_SETUP; + /* Disable AN and force speed to 10G Serial. */ status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), @@ -1874,8 +1879,10 @@ static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, else force_speed = IXGBE_LINK_SPEED_1GB_FULL; - /* If internal link mode is XFI, then setup XFI internal link. */ - if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + /* If X552 and internal link mode is XFI, then setup XFI internal link. + */ + if (hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { status = ixgbe_setup_ixfi_x550em(hw, &force_speed); if (status) @@ -2628,7 +2635,8 @@ static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) return IXGBE_ERR_CONFIG; - if (hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + if (!(hw->mac.type == ixgbe_mac_X550EM_x && + !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE))) { speed = IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; return ixgbe_setup_kr_speed_x550em(hw, speed); -- cgit v1.2.3-55-g7522 From ae84dbf7ff485b3b59740c6ea69df0613f6cd4f7 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 7 Jun 2017 14:36:20 -0700 Subject: ixgbe: Do not support flow control autonegotiation for X553 Flow control autonegotiation is not supported for fiber on X553. Add device ID checks in ixgbe_device_supports_autoneg_fc() to return the appropriate value. Signed-off-by: Tony Nguyen Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 4e35e7017f3d..40ae7db468ea 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -79,13 +79,22 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) switch (hw->phy.media_type) { case ixgbe_media_type_fiber: - hw->mac.ops.check_link(hw, &speed, &link_up, false); - /* if link is down, assume supported */ - if (link_up) - supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + /* flow control autoneg black list */ + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + supported = false; + break; + default: + hw->mac.ops.check_link(hw, &speed, &link_up, false); + /* if link is down, assume supported */ + if (link_up) + supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? true : false; - else - supported = true; + else + supported = true; + } + break; case ixgbe_media_type_backplane: supported = true; @@ -111,6 +120,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) break; } + if (!supported) + hw_dbg(hw, "Device %x does not support flow control autoneg\n", + hw->device_id); + return supported; } -- cgit v1.2.3-55-g7522 From 7adbccbbb5beabe14f3a02ee41abdaa1801395b8 Mon Sep 17 00:00:00 2001 From: Tony Nguyen Date: Wed, 7 Jun 2017 14:36:21 -0700 Subject: ixgbe: Disable flow control for XFI Flow control autonegotiation is not supported for XFI. Make sure that ixgbe_device_supports_autoneg_fc() returns false and hw->fc.disable_fc_autoneg is set to true to avoid running the fc_autoneg function for that device. Signed-off-by: Tony Nguyen Signed-off-by: Emil Tantilov Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 5 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 57 ++++++++++++++----------- 2 files changed, 35 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 40ae7db468ea..2c19070d2a0b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -97,7 +97,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) break; case ixgbe_media_type_backplane: - supported = true; + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) + supported = false; + else + supported = true; break; case ixgbe_media_type_copper: /* only some copper devices support flow control autoneg */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 95adbda36235..19fbb2f28ea4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -2843,7 +2843,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) { bool pause, asm_dir; u32 reg_val; - s32 rc; + s32 rc = 0; /* Validate the requested mode */ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { @@ -2886,32 +2886,37 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw) return IXGBE_ERR_CONFIG; } - if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR && - hw->device_id != IXGBE_DEV_ID_X550EM_A_KR && - hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L) - return 0; - - rc = hw->mac.ops.read_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - ®_val); - if (rc) - return rc; - - reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); - if (pause) - reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; - if (asm_dir) - reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - rc = hw->mac.ops.write_iosf_sb_reg(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, - reg_val); - - /* This device does not fully support AN. */ - hw->fc.disable_fc_autoneg = true; + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + rc = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + ®_val); + if (rc) + return rc; + reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + if (pause) + reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + if (asm_dir) + reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + rc = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, + reg_val); + + /* This device does not fully support AN. */ + hw->fc.disable_fc_autoneg = true; + break; + case IXGBE_DEV_ID_X550EM_X_XFI: + hw->fc.disable_fc_autoneg = true; + break; + default: + break; + } return rc; } -- cgit v1.2.3-55-g7522 From 67a75194bce07669bf11e14a24a95f64ebde8b47 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 25 Jul 2017 17:35:50 +0200 Subject: virtio-net: mark PM functions as __maybe_unused After removing the reset function, the freeze and restore functions are now unused when CONFIG_PM_SLEEP is disabled: drivers/net/virtio_net.c:1881:12: error: 'virtnet_restore_up' defined but not used [-Werror=unused-function] static int virtnet_restore_up(struct virtio_device *vdev) drivers/net/virtio_net.c:1859:13: error: 'virtnet_freeze_down' defined but not used [-Werror=unused-function] static void virtnet_freeze_down(struct virtio_device *vdev) A more robust way to do this is to remove the #ifdef around the callers and instead mark them as __maybe_unused. The compiler will now just silently drop the unused code. Fixes: 4941d472bf95 ("virtio-net: do not reset during XDP set") Signed-off-by: Arnd Bergmann Acked-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d4751ce23b4f..1902701e15a9 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2702,8 +2702,7 @@ static void virtnet_remove(struct virtio_device *vdev) free_netdev(vi->dev); } -#ifdef CONFIG_PM_SLEEP -static int virtnet_freeze(struct virtio_device *vdev) +static __maybe_unused int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; @@ -2714,7 +2713,7 @@ static int virtnet_freeze(struct virtio_device *vdev) return 0; } -static int virtnet_restore(struct virtio_device *vdev) +static __maybe_unused int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err; @@ -2730,7 +2729,6 @@ static int virtnet_restore(struct virtio_device *vdev) return 0; } -#endif static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, -- cgit v1.2.3-55-g7522 From db1a8f8e833037a8ed0f333243d0c90d18acb62f Mon Sep 17 00:00:00 2001 From: Gustavo A R Silva Date: Wed, 14 Jun 2017 21:38:26 -0500 Subject: i40e: fix incorrect variable assignment Fix incorrect variable assignment. Based on line 1511: aq_ret = I40_ERR_PARAM; the correct variable to be used in this instance is aq_ret instead of ret. Also, variable ret is updated at line 1602 just before return, so assigning a value to this variable in this code block is useless. Addresses-Coverity-ID: 1397693 Signed-off-by: Gustavo A R Silva Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ecbe40ea8ffe..ba327e90f32a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1567,7 +1567,7 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) dev_err(&pf->pdev->dev, "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n", vf->vf_id); - ret = I40E_ERR_PARAM; + aq_ret = I40E_ERR_PARAM; goto err; } vfres->vf_offload_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING; -- cgit v1.2.3-55-g7522 From 7c9ae7f053e9e896c24fd23595ba369a5fe322e1 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Tue, 20 Jun 2017 15:16:53 -0700 Subject: i40e: Fix for trace found with S4 state This patch fixes a problem found in systems when entering S4 state. This patch fixes the problem by ensuring that the misc vector's IRQ is disabled as well. Without this patch a stack trace can be seen upon entering S4 state. Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2db93d3f6d23..933b8e357ee4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12089,7 +12089,10 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); i40e_stop_misc_vector(pf); - + if (pf->msix_entries) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } retval = pci_save_state(pdev); if (retval) return retval; @@ -12129,6 +12132,15 @@ static int i40e_resume(struct pci_dev *pdev) /* handling the reset will rebuild the device state */ if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { clear_bit(__I40E_DOWN, pf->state); + if (pf->msix_entries) { + err = request_irq(pf->msix_entries[0].vector, + i40e_intr, 0, pf->int_name, pf); + if (err) { + dev_err(&pf->pdev->dev, + "request_irq for %s failed: %d\n", + pf->int_name, err); + } + } i40e_reset_and_rebuild(pf, false, false); } -- cgit v1.2.3-55-g7522 From 4d5957cbdecdbb77d24c1465caadd801c07afa4a Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 20 Jun 2017 15:16:54 -0700 Subject: i40e: remove WQ_UNBOUND and the task limit of our workqueue During certain events such as a CORER, multiple devices will run a work task to handle some cleanup. This can cause issues due to a single-threaded workqueue which can mean that a device doesn't cleanup in time. Prevent this by removing the single-threaded restriction on the module workqueue. This avoids the need to add more complex yielding logic in our service task routine. This is also similar to what other drivers such as fm10k do. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 933b8e357ee4..22e60841cb22 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -12180,12 +12180,14 @@ static int __init i40e_init_module(void) i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); - /* we will see if single thread per module is enough for now, - * it can't be any worse than using the system workqueue which - * was already single threaded + /* There is no need to throttle the number of active tasks because + * each device limits its own task using a state bit for scheduling + * the service task, and the device tasks do not interfere with each + * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM + * since we need to be able to guarantee forward progress even under + * memory pressure. */ - i40e_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - i40e_driver_name); + i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name); if (!i40e_wq) { pr_err("%s: Failed to create workqueue\n", i40e_driver_name); return -ENOMEM; -- cgit v1.2.3-55-g7522 From 981e25c32bc22bcaa429420c92bfec860008a1eb Mon Sep 17 00:00:00 2001 From: Paul M Stillwell Jr Date: Tue, 20 Jun 2017 15:16:55 -0700 Subject: i40e: Handle admin Q timeout when releasing NVM There are some rare cases where the release resource call will return an admin Q timeout. In these cases the code needs to try to release the resource again until it succeeds or it times out. Signed-off-by: Paul M Stillwell Jr Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 800bd55d0159..6fdecd70dcbc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -134,8 +134,25 @@ i40e_i40e_acquire_nvm_exit: **/ void i40e_release_nvm(struct i40e_hw *hw) { - if (!hw->nvm.blank_nvm_mode) - i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + i40e_status ret_code = I40E_SUCCESS; + u32 total_delay = 0; + + if (hw->nvm.blank_nvm_mode) + return; + + ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); + + /* there are some rare cases when trying to release the resource + * results in an admin Q timeout, so handle them correctly + */ + while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) && + (total_delay < hw->aq.asq_cmd_timeout)) { + usleep_range(1000, 2000); + ret_code = i40e_aq_release_resource(hw, + I40E_NVM_RESOURCE_ID, + 0, NULL); + total_delay++; + } } /** -- cgit v1.2.3-55-g7522 From 0ac30ce433232944e702876c1288c0d50eee3151 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 20 Jun 2017 15:16:56 -0700 Subject: i40e: fix up 32 bit timespec references As it turns out there was only a small set of errors on 32 bit, and we just needed to be using the right calls for dealing with timespec64 variables. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ptp.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 1a0be835fa06..0129ed3b78ec 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -158,13 +158,12 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then; + struct timespec64 now; - then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); i40e_ptp_read(pf, &now); - now = timespec64_add(now, then); + timespec64_add_ns(&now, delta); i40e_ptp_write(pf, (const struct timespec64 *)&now); mutex_unlock(&pf->tmreg_lock); -- cgit v1.2.3-55-g7522 From 4d433084dd3b8b9ce656c35505e7dc5bd1d929b5 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 20 Jun 2017 15:16:57 -0700 Subject: i40e: fix odd formatting and indent The compiler warned on an oddly indented bit of code, and when investigating that, noted that the functions themselves had an odd flow. The if condition was checked, and would exclude a call to AQ, but then the aq_ret would be checked unconditionally which just looks really weird, and is likely to cause objections. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ba327e90f32a..2e261bb59d10 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1741,16 +1741,14 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, NULL); } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) { hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { - aq_ret = 0; - if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) { - aq_ret = - i40e_aq_set_vsi_uc_promisc_on_vlan(hw, - vsi->seid, - alluni, - f->vlan, - NULL); - aq_err = pf->hw.aq.asq_last_status; - } + if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID) + continue; + aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, + vsi->seid, + alluni, + f->vlan, + NULL); + aq_err = pf->hw.aq.asq_last_status; if (aq_ret) dev_err(&pf->pdev->dev, "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n", -- cgit v1.2.3-55-g7522 From 601a2e7ac5acd4a1681ba7ca6cefe5f9897a3c28 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 20 Jun 2017 15:16:58 -0700 Subject: i40e/i40evf: make IPv6 ATR code clearer This just reorders some local vars and makes the code flow clearer. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index b936febc315a..c9a149678926 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2451,9 +2451,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, hlen = (hdr.network[0] & 0x0F) << 2; l4_proto = hdr.ipv4->protocol; } else { - hlen = hdr.network - skb->data; - l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); - hlen -= hdr.network - skb->data; + /* find the start of the innermost ipv6 header */ + unsigned int inner_hlen = hdr.network - skb->data; + unsigned int h_offset = inner_hlen; + + /* this function updates h_offset to the end of the header */ + l4_proto = + ipv6_find_hdr(skb, &h_offset, IPPROTO_TCP, NULL, NULL); + /* hlen will contain our best estimate of the tcp header */ + hlen = h_offset - inner_hlen; } if (l4_proto != IPPROTO_TCP) -- cgit v1.2.3-55-g7522 From b85c94b617c0004d1f2bd6ca32baa9132a6c2fe5 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 20 Jun 2017 15:16:59 -0700 Subject: i40e/i40evf: remove mismatched type warnings Compiler reported several places where driver compared signed and unsigned types. Cast or change the types to remove the warnings. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 6 +++--- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 +++--- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 6 +++--- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 4 ++-- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9692a5294fa3..1d29152256fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1091,7 +1091,7 @@ static void i40e_get_regs(struct net_device *netdev, struct ethtool_regs *regs, struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; u32 *reg_buf = p; - int i, j, ri; + unsigned int i, j, ri; u32 reg; /* Tell ethtool which driver-version-specific regs output we have. @@ -1550,9 +1550,9 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, struct i40e_ring *tx_ring, *rx_ring; struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; + unsigned int j; int i = 0; char *p; - int j; struct rtnl_link_stats64 *net_stats = i40e_get_vsi_stats_struct(vsi); unsigned int start; @@ -1637,7 +1637,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; char *p = (char *)data; - int i; + unsigned int i; switch (stringset) { case ETH_SS_TEST: diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 22e60841cb22..a2d665161def 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4773,7 +4773,7 @@ static void i40e_detect_recover_hung(struct i40e_pf *pf) { struct net_device *netdev; struct i40e_vsi *vsi; - int i; + unsigned int i; /* Only for LAN VSI */ vsi = pf->vsi[pf->lan_vsi]; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c9a149678926..d464fceb300f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -860,7 +860,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this @@ -2063,7 +2063,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false, xdp_xmit = false; - while (likely(total_rx_packets < budget)) { + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; struct xdp_buff xdp; @@ -2196,7 +2196,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : total_rx_packets; + return failure ? budget : (int)total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 12b02e530503..d91676ccf125 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -275,7 +275,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); -#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2)) if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { /* Make sure that anybody stopping the queue after this @@ -1299,7 +1299,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); bool failure = false; - while (likely(total_rx_packets < budget)) { + while (likely(total_rx_packets < (unsigned int)budget)) { struct i40e_rx_buffer *rx_buffer; union i40e_rx_desc *rx_desc; unsigned int size; @@ -1406,7 +1406,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_bytes += total_rx_bytes; /* guarantee a trip back through this routine if there was a failure */ - return failure ? budget : total_rx_packets; + return failure ? budget : (int)total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 9bb2cc7dd4e4..76fd89c1dbb2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -165,7 +165,7 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct i40evf_adapter *adapter = netdev_priv(netdev); - int i, j; + unsigned int i, j; char *p; for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { @@ -197,7 +197,7 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) int i; if (sset == ETH_SS_STATS) { - for (i = 0; i < I40EVF_GLOBAL_STATS_LEN; i++) { + for (i = 0; i < (int)I40EVF_GLOBAL_STATS_LEN; i++) { memcpy(p, i40evf_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; -- cgit v1.2.3-55-g7522 From d8b2c700a3a07c7108d9031dc88a6298c101e05d Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Tue, 20 Jun 2017 15:17:00 -0700 Subject: i40e: display correct UDP tunnel type name The i40e driver attempts to display the UDP tunnel name by doing a check against the type, where for non-zero types we use "vxlan" and for zero type we use "geneve". This is not future proof, because if new tunnel types get added, we'll incorrectly label them. It also depends on the value of UDP_TUNNEL_TYPE_GENEVE == 0, which is brittle. Instead, replace this with a function that can return a constant string depending on the type. For now we'll use "unknown" for types we don't know about, and we can expand this in the future if new types get added. Signed-off-by: Jacob Keller Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a2d665161def..2b115b0c5296 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7520,6 +7520,18 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_flush(hw); } +static const char *i40e_tunnel_name(struct i40e_udp_port_config *port) +{ + switch (port->type) { + case UDP_TUNNEL_TYPE_VXLAN: + return "vxlan"; + case UDP_TUNNEL_TYPE_GENEVE: + return "geneve"; + default: + return "unknown"; + } +} + /** * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters * @pf: board private structure @@ -7565,14 +7577,14 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_dbg(&pf->pdev->dev, - "%s %s port %d, index %d failed, err %s aq_err %s\n", - pf->udp_ports[i].type ? "vxlan" : "geneve", - port ? "add" : "delete", - port, i, - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, - pf->hw.aq.asq_last_status)); + dev_info(&pf->pdev->dev, + "%s %s port %d, index %d failed, err %s aq_err %s\n", + i40e_tunnel_name(&pf->udp_ports[i]), + port ? "add" : "delete", + port, i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); pf->udp_ports[i].port = 0; } } -- cgit v1.2.3-55-g7522 From 8d9ee66ac0e5212be37204949c8c86eabbadb24d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 20 Jun 2017 15:17:01 -0700 Subject: i40evf: add some missing includes These includes were all being used in the driver, but weren't being directly included. Since the current advised method is to directly include anything that you need, this implements that. Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 6cc92089fecb..7901cc85cbe5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -39,6 +39,17 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -- cgit v1.2.3-55-g7522 From eb23039f6c22a6b240aef7d7ebccceeb52a8452a Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 24 Jun 2017 21:13:52 +0200 Subject: i40e: report BPF prog id during XDP_QUERY_PROG Fill the XDP prog_id with the id just like we do in other XDP enabled drivers such as ixgbe. This is needed so that on dump we can retrieve the attached program based on the id, and dump BPF insns, opcodes, etc back to user space. Only XDP driver missing this is currently i40e. Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2b115b0c5296..4104944ea367 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9601,6 +9601,7 @@ static int i40e_xdp(struct net_device *dev, return i40e_xdp_setup(vsi, xdp->prog); case XDP_QUERY_PROG: xdp->prog_attached = i40e_enabled_xdp_vsi(vsi); + xdp->prog_id = vsi->xdp_prog ? vsi->xdp_prog->aux->id : 0; return 0; default: return -EINVAL; -- cgit v1.2.3-55-g7522 From c969ef4ed9cbf3662fc8709e2581427c191504fb Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Thu, 22 Jun 2017 09:44:32 -0700 Subject: i40evf: Use le32_to_cpu before evaluating HW desc fields i40e hardware descriptor fields are in little-endian format. Driver must use le32_to_cpu while evaluating these fields otherwise on big-endian arch we end up evaluating incorrect values, cause errors like: i40evf 0000:03:0a.0: Expected response 24 from PF, received 402653184 i40evf 0000:03:0a.1: Expected response 7 from PF, received 117440512 Signed-off-by: Tushar Dave Reviewed-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7c213a347909..93536b9fc629 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1957,8 +1957,8 @@ static void i40evf_adminq_task(struct work_struct *work) container_of(work, struct i40evf_adapter, adminq_task); struct i40e_hw *hw = &adapter->hw; struct i40e_arq_event_info event; - struct virtchnl_msg *v_msg; - i40e_status ret; + enum virtchnl_ops v_op; + i40e_status ret, v_ret; u32 val, oldval; u16 pending; @@ -1970,15 +1970,15 @@ static void i40evf_adminq_task(struct work_struct *work) if (!event.msg_buf) goto out; - v_msg = (struct virtchnl_msg *)&event.desc; do { ret = i40evf_clean_arq_element(hw, &event, &pending); - if (ret || !v_msg->v_opcode) + v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); + v_ret = (i40e_status)le32_to_cpu(event.desc.cookie_low); + + if (ret || !v_op) break; /* No event to process or error cleaning ARQ */ - i40evf_virtchnl_completion(adapter, v_msg->v_opcode, - (i40e_status)v_msg->v_retval, - event.msg_buf, + i40evf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf, event.msg_len); if (pending != 0) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); -- cgit v1.2.3-55-g7522 From 07c357f34834a94b8cb4274f69b75905034dc30e Mon Sep 17 00:00:00 2001 From: Tushar Dave Date: Thu, 22 Jun 2017 10:12:11 -0700 Subject: i40evf: remove unnecessary __packed This is similar to 'commit 9588397d24eec ("i40e: remove unnecessary __packed")' to avoid unaligned access. Signed-off-by: Tushar Dave Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40e_osdep.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h index 5e314fd3c016..a90737786c34 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h @@ -54,7 +54,7 @@ struct i40e_dma_mem { void *va; dma_addr_t pa; u32 size; -} __packed; +}; #define i40e_allocate_dma_mem(h, m, unused, s, a) \ i40evf_allocate_dma_mem_d(h, m, s, a) @@ -63,7 +63,7 @@ struct i40e_dma_mem { struct i40e_virt_mem { void *va; u32 size; -} __packed; +}; #define i40e_allocate_virt_mem(h, m, s) i40evf_allocate_virt_mem_d(h, m, s) #define i40e_free_virt_mem(h, m) i40evf_free_virt_mem_d(h, m) -- cgit v1.2.3-55-g7522 From 2f1d86e44c9dac948a79ee7543426e00230564ab Mon Sep 17 00:00:00 2001 From: Stefan Assmann Date: Fri, 23 Jun 2017 09:46:24 +0200 Subject: i40e: handle setting administratively set MAC address back to zero When an administratively set MAC was previously set and should now be switched back to 00:00:00:00:00:00 the pf_set_mac flag did not get toggled back to false. As a result VFs were still treated as if an administratively set MAC was present. Signed-off-by: Stefan Assmann Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 2e261bb59d10..979110d59f67 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2762,7 +2762,6 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) spin_unlock_bh(&vsi->mac_filter_hash_lock); - dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); /* program mac filter */ if (i40e_sync_vsi_filters(vsi)) { dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); @@ -2770,7 +2769,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } ether_addr_copy(vf->default_lan_addr.addr, mac); - vf->pf_set_mac = true; + + if (is_zero_ether_addr(mac)) { + vf->pf_set_mac = false; + dev_info(&pf->pdev->dev, "Removing MAC on VF %d\n", vf_id); + } else { + vf->pf_set_mac = true; + dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", + mac, vf_id); + } + /* Force the VF driver stop so it has to reload with new MAC address */ i40e_vc_disable_vf(pf, vf); dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); -- cgit v1.2.3-55-g7522 From 9877e1058aaf166b578ed44c8e0fc78fe6e67152 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 26 Jul 2017 09:55:33 +0200 Subject: hamradio: dmascc: avoid -Wformat-overflow warning gcc warns that the device name might overflow: drivers/net/hamradio/dmascc.c: In function 'dmascc_init': drivers/net/hamradio/dmascc.c:584:22: error: 'sprintf' may write a terminating nul past the end of the destination [-Werror=format-overflow=] sprintf(dev->name, "dmascc%i", 2 * n + i); drivers/net/hamradio/dmascc.c:584:3: note: 'sprintf' output between 8 and 17 bytes into a destination of size 16 sprintf(dev->name, "dmascc%i", 2 * n + i); >From the static data in this file, I can tell that the index is strictly limited to 16, so it won't overflow. This simply changes the sprintf() to snprintf(), which is a good idea in general, and shuts up this warning. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/hamradio/dmascc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index dec6b76bc0fb..cde41200f40a 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -581,7 +581,7 @@ static int __init setup_adapter(int card_base, int type, int n) priv->param.dma = -1; INIT_WORK(&priv->rx_work, rx_bh); dev->ml_priv = priv; - sprintf(dev->name, "dmascc%i", 2 * n + i); + snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i); dev->base_addr = card_base; dev->irq = irq; dev->netdev_ops = &scc_netdev_ops; -- cgit v1.2.3-55-g7522 From eb54e522a000b8e625e03282fd434fb725a530c0 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 25 Jul 2017 11:17:11 -0700 Subject: bpf: install libbpf headers on 'make install' Add a new target to install the bpf.h header to $(prefix)/include/bpf/ directory. This is necessary to build standalone applications using libbpf, without the need to clone the kernel sources and point to them. Signed-off-by: Jakub Kicinski Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/lib/bpf/Makefile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index 1f5300e56b44..445289555487 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -189,6 +189,10 @@ install_lib: all_cmd $(call QUIET_INSTALL, $(LIB_FILE)) \ $(call do_install,$(LIB_FILE),$(libdir_SQ)) +install_headers: + $(call QUIET_INSTALL, headers) \ + $(call do_install,bpf.h,$(prefix)/include/bpf,644) + install: install_lib ### Cleaning rules -- cgit v1.2.3-55-g7522 From ec9b8dbd825dd3a0667003b5ab56386214f9c648 Mon Sep 17 00:00:00 2001 From: Chopra, Manish Date: Wed, 26 Jul 2017 06:07:09 -0700 Subject: qede: Add getter APIs support for RX flow classification This patch adds support for ethtool getter APIs to query RX flow classification rules. Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 10 +- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 22 ++++- drivers/net/ethernet/qlogic/qede/qede_filter.c | 120 +++++++++++++++++++++++- 3 files changed, 144 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 4dfb238221f9..0a2475b65978 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -160,6 +160,8 @@ struct qede_rdma_dev { struct qede_ptp; +#define QEDE_RFS_MAX_FLTR 256 + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -241,9 +243,7 @@ struct qede_dev { u16 vxlan_dst_port; u16 geneve_dst_port; -#ifdef CONFIG_RFS_ACCEL struct qede_arfs *arfs; -#endif bool wol_enabled; struct qede_rdma_dev rdma_info; @@ -455,9 +455,13 @@ int qede_alloc_arfs(struct qede_dev *edev); #define QEDE_SP_ARFS_CONFIG 4 #define QEDE_SP_TASK_POLL_DELAY (5 * HZ) -#define QEDE_RFS_MAX_FLTR 256 #endif +int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); +int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, + u32 *rule_locs); +int qede_get_arfs_filter_count(struct qede_dev *edev); + struct qede_reload_args { void (*func)(struct qede_dev *edev, struct qede_reload_args *args); union { diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6a03d3e66cff..dd39dec62650 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1045,20 +1045,34 @@ static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) } static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, - u32 *rules __always_unused) + u32 *rule_locs) { struct qede_dev *edev = netdev_priv(dev); + int rc = 0; switch (info->cmd) { case ETHTOOL_GRXRINGS: info->data = QEDE_RSS_COUNT(edev); - return 0; + break; case ETHTOOL_GRXFH: - return qede_get_rss_flags(edev, info); + rc = qede_get_rss_flags(edev, info); + break; + case ETHTOOL_GRXCLSRLCNT: + info->rule_cnt = qede_get_arfs_filter_count(edev); + info->data = QEDE_RFS_MAX_FLTR; + break; + case ETHTOOL_GRXCLSRULE: + rc = qede_get_cls_rule_entry(edev, info); + break; + case ETHTOOL_GRXCLSRLALL: + rc = qede_get_cls_rule_all(edev, info, rule_locs); + break; default: DP_ERR(edev, "Command parameters not supported\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; } + + return rc; } static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index f939db5bac5f..a5e5d328e730 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -38,7 +38,6 @@ #include #include "qede.h" -#ifdef CONFIG_RFS_ACCEL struct qede_arfs_tuple { union { __be32 src_ipv4; @@ -80,6 +79,7 @@ struct qede_arfs_fltr_node { }; struct qede_arfs { +#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx]) #define QEDE_ARFS_POLL_COUNT 100 #define QEDE_RFS_FLW_BITSHIFT (4) #define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1) @@ -92,6 +92,7 @@ struct qede_arfs { bool enable; }; +#ifdef CONFIG_RFS_ACCEL static void qede_configure_arfs_fltr(struct qede_dev *edev, struct qede_arfs_fltr_node *n, u16 rxq_id, bool add_fltr) @@ -1263,3 +1264,120 @@ void qede_config_rx_mode(struct net_device *ndev) out: kfree(uc_macs); } + +static struct qede_arfs_fltr_node * +qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) +{ + struct qede_arfs_fltr_node *fltr; + + hlist_for_each_entry(fltr, head, node) + if (location == fltr->sw_id) + return fltr; + + return NULL; +} + +int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, + u32 *rule_locs) +{ + struct qede_arfs_fltr_node *fltr; + struct hlist_head *head; + int cnt = 0, rc = 0; + + info->data = QEDE_RFS_MAX_FLTR; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + + hlist_for_each_entry(fltr, head, node) { + if (cnt == info->rule_cnt) { + rc = -EMSGSIZE; + goto unlock; + } + + rule_locs[cnt] = fltr->sw_id; + cnt++; + } + + info->rule_cnt = cnt; + +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct qede_arfs_fltr_node *fltr = NULL; + int rc = 0; + + cmd->data = QEDE_RFS_MAX_FLTR; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), + fsp->location); + if (!fltr) { + DP_NOTICE(edev, "Rule not found - location=0x%x\n", + fsp->location); + rc = -EINVAL; + goto unlock; + } + + if (fltr->tuple.eth_proto == htons(ETH_P_IP)) { + if (fltr->tuple.ip_proto == IPPROTO_TCP) + fsp->flow_type = TCP_V4_FLOW; + else + fsp->flow_type = UDP_V4_FLOW; + + fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port; + fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4; + fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4; + } else { + if (fltr->tuple.ip_proto == IPPROTO_TCP) + fsp->flow_type = TCP_V6_FLOW; + else + fsp->flow_type = UDP_V6_FLOW; + fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port; + fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port; + memcpy(&fsp->h_u.tcp_ip6_spec.ip6src, + &fltr->tuple.src_ipv6, sizeof(struct in6_addr)); + memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst, + &fltr->tuple.dst_ipv6, sizeof(struct in6_addr)); + } + + fsp->ring_cookie = fltr->rxq_id; + +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_get_arfs_filter_count(struct qede_dev *edev) +{ + int count = 0; + + __qede_lock(edev); + + if (!edev->arfs) + goto unlock; + + count = edev->arfs->filter_count; + +unlock: + __qede_unlock(edev); + return count; +} -- cgit v1.2.3-55-g7522 From 3f2a2b8b7aaadd731e688a23cbd23f7eb085c7fb Mon Sep 17 00:00:00 2001 From: Chopra, Manish Date: Wed, 26 Jul 2017 06:07:10 -0700 Subject: qed/qede: Add setter APIs support for RX flow classification This patch adds support for adding and deleting rx flow classification rules. Using this user can classify RX flow constituting of TCP/UDP 4-tuples [src_ip/dst_ip and src_port/dst_port] to be steered on a given RX queue Signed-off-by: Manish Chopra Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 8 - drivers/net/ethernet/qlogic/qede/qede.h | 11 +- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 14 +- drivers/net/ethernet/qlogic/qede/qede_filter.c | 365 +++++++++++++++++++++--- drivers/net/ethernet/qlogic/qede/qede_main.c | 9 +- 5 files changed, 347 insertions(+), 60 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index b11399606990..1bddf9372fc9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -954,9 +954,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, struct qed_tunnel_info tunn_info; const u8 *data = NULL; struct qed_hwfn *hwfn; -#ifdef CONFIG_RFS_ACCEL struct qed_ptt *p_ptt; -#endif int rc = -EINVAL; if (qed_iov_wq_start(cdev)) @@ -972,7 +970,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err; } -#ifdef CONFIG_RFS_ACCEL if (cdev->num_hwfns == 1) { p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); if (p_ptt) { @@ -983,7 +980,6 @@ static int qed_slowpath_start(struct qed_dev *cdev, goto err; } } -#endif } cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; @@ -1091,12 +1087,10 @@ err: if (IS_PF(cdev)) release_firmware(cdev->firmware); -#ifdef CONFIG_RFS_ACCEL if (IS_PF(cdev) && (cdev->num_hwfns == 1) && QED_LEADING_HWFN(cdev)->p_arfs_ptt) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); -#endif qed_iov_wq_stop(cdev, false); @@ -1111,11 +1105,9 @@ static int qed_slowpath_stop(struct qed_dev *cdev) qed_ll2_dealloc_if(cdev); if (IS_PF(cdev)) { -#ifdef CONFIG_RFS_ACCEL if (cdev->num_hwfns == 1) qed_ptt_release(QED_LEADING_HWFN(cdev), QED_LEADING_HWFN(cdev)->p_arfs_ptt); -#endif qed_free_stream_mem(cdev); if (IS_QED_ETH_IF(cdev)) qed_sriov_disable(cdev, true); diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 0a2475b65978..adb700512baa 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -447,16 +447,17 @@ struct qede_fastpath { #ifdef CONFIG_RFS_ACCEL int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id); +#define QEDE_SP_ARFS_CONFIG 4 +#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) +#endif + void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr); void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev); void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc); void qede_free_arfs(struct qede_dev *edev); int qede_alloc_arfs(struct qede_dev *edev); - -#define QEDE_SP_ARFS_CONFIG 4 -#define QEDE_SP_TASK_POLL_DELAY (5 * HZ) -#endif - +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); +int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info); int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd); int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, u32 *rule_locs); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index dd39dec62650..e31266df8fdd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1182,14 +1182,24 @@ static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info) static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) { struct qede_dev *edev = netdev_priv(dev); + int rc; switch (info->cmd) { case ETHTOOL_SRXFH: - return qede_set_rss_flags(edev, info); + rc = qede_set_rss_flags(edev, info); + break; + case ETHTOOL_SRXCLSRLINS: + rc = qede_add_cls_rule(edev, info); + break; + case ETHTOOL_SRXCLSRLDEL: + rc = qede_del_cls_rule(edev, info); + break; default: DP_INFO(edev, "Command parameters not supported\n"); - return -EOPNOTSUPP; + rc = -EOPNOTSUPP; } + + return rc; } static u32 qede_get_rxfh_indir_size(struct net_device *dev) diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index a5e5d328e730..f79e36e4060a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -75,6 +75,7 @@ struct qede_arfs_fltr_node { u16 next_rxq_id; bool filter_op; bool used; + u8 fw_rc; struct hlist_node node; }; @@ -92,7 +93,6 @@ struct qede_arfs { bool enable; }; -#ifdef CONFIG_RFS_ACCEL static void qede_configure_arfs_fltr(struct qede_dev *edev, struct qede_arfs_fltr_node *n, u16 rxq_id, bool add_fltr) @@ -122,11 +122,56 @@ qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr) kfree(fltr); } +static int +qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr, + u16 bucket_idx) +{ + fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data, + fltr->buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) { + DP_NOTICE(edev, "Failed to map DMA memory for rule\n"); + qede_free_arfs_filter(edev, fltr); + return -ENOMEM; + } + + INIT_HLIST_NODE(&fltr->node); + hlist_add_head(&fltr->node, + QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx)); + edev->arfs->filter_count++; + + if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { + edev->ops->configure_arfs_searcher(edev->cdev, true); + edev->arfs->enable = true; + } + + return 0; +} + +static void +qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) +{ + hlist_del(&fltr->node); + dma_unmap_single(&edev->pdev->dev, fltr->mapping, + fltr->buf_len, DMA_TO_DEVICE); + + qede_free_arfs_filter(edev, fltr); + edev->arfs->filter_count--; + + if (!edev->arfs->filter_count && edev->arfs->enable) { + edev->arfs->enable = false; + edev->ops->configure_arfs_searcher(edev->cdev, false); + } +} + void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc) { struct qede_arfs_fltr_node *fltr = filter; struct qede_dev *edev = dev; + fltr->fw_rc = fw_rc; + if (fw_rc) { DP_NOTICE(edev, "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n", @@ -186,18 +231,17 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) && !fltr->used) || free_fltr) { - hlist_del(&fltr->node); - dma_unmap_single(&edev->pdev->dev, - fltr->mapping, - fltr->buf_len, DMA_TO_DEVICE); - qede_free_arfs_filter(edev, fltr); - edev->arfs->filter_count--; + qede_dequeue_fltr_and_config_searcher(edev, + fltr); } else { - if ((rps_may_expire_flow(edev->ndev, - fltr->rxq_id, - fltr->flow_id, - fltr->sw_id) || del) && - !free_fltr) + bool flow_exp = false; +#ifdef CONFIG_RFS_ACCEL + flow_exp = rps_may_expire_flow(edev->ndev, + fltr->rxq_id, + fltr->flow_id, + fltr->sw_id); +#endif + if ((flow_exp || del) && !free_fltr) qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); @@ -214,10 +258,12 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr) edev->arfs->enable = false; edev->ops->configure_arfs_searcher(edev->cdev, false); } +#ifdef CONFIG_RFS_ACCEL } else { set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, QEDE_SP_TASK_POLL_DELAY); +#endif } spin_unlock_bh(&edev->arfs->arfs_list_lock); @@ -259,25 +305,26 @@ int qede_alloc_arfs(struct qede_dev *edev) spin_lock_init(&edev->arfs->arfs_list_lock); for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) - INIT_HLIST_HEAD(&edev->arfs->arfs_hl_head[i]); + INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i)); - edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); - if (!edev->ndev->rx_cpu_rmap) { + edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) * + sizeof(long)); + if (!edev->arfs->arfs_fltr_bmap) { vfree(edev->arfs); edev->arfs = NULL; return -ENOMEM; } - edev->arfs->arfs_fltr_bmap = vzalloc(BITS_TO_LONGS(QEDE_RFS_MAX_FLTR) * - sizeof(long)); - if (!edev->arfs->arfs_fltr_bmap) { - free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); - edev->ndev->rx_cpu_rmap = NULL; +#ifdef CONFIG_RFS_ACCEL + edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev)); + if (!edev->ndev->rx_cpu_rmap) { + vfree(edev->arfs->arfs_fltr_bmap); + edev->arfs->arfs_fltr_bmap = NULL; vfree(edev->arfs); edev->arfs = NULL; return -ENOMEM; } - +#endif return 0; } @@ -286,16 +333,19 @@ void qede_free_arfs(struct qede_dev *edev) if (!edev->arfs) return; +#ifdef CONFIG_RFS_ACCEL if (edev->ndev->rx_cpu_rmap) free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); edev->ndev->rx_cpu_rmap = NULL; +#endif vfree(edev->arfs->arfs_fltr_bmap); edev->arfs->arfs_fltr_bmap = NULL; vfree(edev->arfs); edev->arfs = NULL; } +#ifdef CONFIG_RFS_ACCEL static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos, const struct sk_buff *skb) { @@ -395,9 +445,8 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, spin_lock_bh(&edev->arfs->arfs_list_lock); - n = qede_arfs_htbl_key_search(&edev->arfs->arfs_hl_head[tbl_idx], + n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx), skb, ports[0], ports[1], ip_proto); - if (n) { /* Filter match */ n->next_rxq_id = rxq_index; @@ -449,23 +498,9 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, n->tuple.ip_proto = ip_proto; memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb)); - n->mapping = dma_map_single(&edev->pdev->dev, n->data, - n->buf_len, DMA_TO_DEVICE); - if (dma_mapping_error(&edev->pdev->dev, n->mapping)) { - DP_NOTICE(edev, "Failed to map DMA memory for arfs\n"); - qede_free_arfs_filter(edev, n); - rc = -ENOMEM; + rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx); + if (rc) goto ret_unlock; - } - - INIT_HLIST_NODE(&n->node); - hlist_add_head(&n->node, &edev->arfs->arfs_hl_head[tbl_idx]); - edev->arfs->filter_count++; - - if (edev->arfs->filter_count == 1 && !edev->arfs->enable) { - edev->ops->configure_arfs_searcher(edev->cdev, true); - edev->arfs->enable = true; - } qede_configure_arfs_fltr(edev, n, n->rxq_id, true); @@ -473,6 +508,7 @@ int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags); schedule_delayed_work(&edev->sp_task, 0); + return n->sw_id; ret_unlock: @@ -1277,6 +1313,38 @@ qede_get_arfs_fltr_by_loc(struct hlist_head *head, u32 location) return NULL; } +static bool +qede_compare_user_flow_ips(struct qede_arfs_fltr_node *tpos, + struct ethtool_rx_flow_spec *fsp, + __be16 proto) +{ + if (proto == htons(ETH_P_IP)) { + struct ethtool_tcpip4_spec *ip; + + ip = &fsp->h_u.tcp_ip4_spec; + + if (tpos->tuple.src_ipv4 == ip->ip4src && + tpos->tuple.dst_ipv4 == ip->ip4dst) + return true; + else + return false; + } else { + struct ethtool_tcpip6_spec *ip6; + struct in6_addr *src; + + ip6 = &fsp->h_u.tcp_ip6_spec; + src = &tpos->tuple.src_ipv6; + + if (!memcmp(src, &ip6->ip6src, sizeof(struct in6_addr)) && + !memcmp(&tpos->tuple.dst_ipv6, &ip6->ip6dst, + sizeof(struct in6_addr))) + return true; + else + return false; + } + return false; +} + int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info, u32 *rule_locs) { @@ -1366,6 +1434,225 @@ unlock: return rc; } +static int +qede_validate_and_check_flow_exist(struct qede_dev *edev, + struct ethtool_rx_flow_spec *fsp, + int *min_hlen) +{ + __be16 src_port = 0x0, dst_port = 0x0; + struct qede_arfs_fltr_node *fltr; + struct hlist_node *temp; + struct hlist_head *head; + __be16 eth_proto; + u8 ip_proto; + + if (fsp->location >= QEDE_RFS_MAX_FLTR || + fsp->ring_cookie >= QEDE_RSS_COUNT(edev)) + return -EINVAL; + + if (fsp->flow_type == TCP_V4_FLOW) { + *min_hlen += sizeof(struct iphdr) + + sizeof(struct tcphdr); + eth_proto = htons(ETH_P_IP); + ip_proto = IPPROTO_TCP; + } else if (fsp->flow_type == UDP_V4_FLOW) { + *min_hlen += sizeof(struct iphdr) + + sizeof(struct udphdr); + eth_proto = htons(ETH_P_IP); + ip_proto = IPPROTO_UDP; + } else if (fsp->flow_type == TCP_V6_FLOW) { + *min_hlen += sizeof(struct ipv6hdr) + + sizeof(struct tcphdr); + eth_proto = htons(ETH_P_IPV6); + ip_proto = IPPROTO_TCP; + } else if (fsp->flow_type == UDP_V6_FLOW) { + *min_hlen += sizeof(struct ipv6hdr) + + sizeof(struct udphdr); + eth_proto = htons(ETH_P_IPV6); + ip_proto = IPPROTO_UDP; + } else { + DP_NOTICE(edev, "Unsupported flow type = 0x%x\n", + fsp->flow_type); + return -EPROTONOSUPPORT; + } + + if (eth_proto == htons(ETH_P_IP)) { + src_port = fsp->h_u.tcp_ip4_spec.psrc; + dst_port = fsp->h_u.tcp_ip4_spec.pdst; + } else { + src_port = fsp->h_u.tcp_ip6_spec.psrc; + dst_port = fsp->h_u.tcp_ip6_spec.pdst; + } + + head = QEDE_ARFS_BUCKET_HEAD(edev, 0); + hlist_for_each_entry_safe(fltr, temp, head, node) { + if ((fltr->tuple.ip_proto == ip_proto && + fltr->tuple.eth_proto == eth_proto && + qede_compare_user_flow_ips(fltr, fsp, eth_proto) && + fltr->tuple.src_port == src_port && + fltr->tuple.dst_port == dst_port) || + fltr->sw_id == fsp->location) + return -EEXIST; + } + + return 0; +} + +static int +qede_poll_arfs_filter_config(struct qede_dev *edev, + struct qede_arfs_fltr_node *fltr) +{ + int count = QEDE_ARFS_POLL_COUNT; + + while (fltr->used && count) { + msleep(20); + count--; + } + + if (count == 0 || fltr->fw_rc) { + qede_dequeue_fltr_and_config_searcher(edev, fltr); + return -EIO; + } + + return fltr->fw_rc; +} + +int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *n; + int min_hlen = ETH_HLEN, rc; + struct ethhdr *eth; + struct iphdr *ip; + __be16 *ports; + + __qede_lock(edev); + + if (!edev->arfs) { + rc = -EPERM; + goto unlock; + } + + rc = qede_validate_and_check_flow_exist(edev, fsp, &min_hlen); + if (rc) + goto unlock; + + n = kzalloc(sizeof(*n), GFP_KERNEL); + if (!n) { + rc = -ENOMEM; + goto unlock; + } + + n->data = kzalloc(min_hlen, GFP_KERNEL); + if (!n->data) { + kfree(n); + rc = -ENOMEM; + goto unlock; + } + + n->sw_id = fsp->location; + set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap); + n->buf_len = min_hlen; + n->rxq_id = fsp->ring_cookie; + n->next_rxq_id = n->rxq_id; + eth = (struct ethhdr *)n->data; + + if (info->fs.flow_type == TCP_V4_FLOW || + info->fs.flow_type == UDP_V4_FLOW) { + ports = (__be16 *)(n->data + ETH_HLEN + + sizeof(struct iphdr)); + eth->h_proto = htons(ETH_P_IP); + n->tuple.eth_proto = htons(ETH_P_IP); + n->tuple.src_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4src; + n->tuple.dst_ipv4 = info->fs.h_u.tcp_ip4_spec.ip4dst; + n->tuple.src_port = info->fs.h_u.tcp_ip4_spec.psrc; + n->tuple.dst_port = info->fs.h_u.tcp_ip4_spec.pdst; + ports[0] = n->tuple.src_port; + ports[1] = n->tuple.dst_port; + ip = (struct iphdr *)(n->data + ETH_HLEN); + ip->saddr = info->fs.h_u.tcp_ip4_spec.ip4src; + ip->daddr = info->fs.h_u.tcp_ip4_spec.ip4dst; + ip->version = 0x4; + ip->ihl = 0x5; + + if (info->fs.flow_type == TCP_V4_FLOW) { + n->tuple.ip_proto = IPPROTO_TCP; + ip->protocol = IPPROTO_TCP; + } else { + n->tuple.ip_proto = IPPROTO_UDP; + ip->protocol = IPPROTO_UDP; + } + ip->tot_len = cpu_to_be16(min_hlen - ETH_HLEN); + } else { + struct ipv6hdr *ip6; + + ip6 = (struct ipv6hdr *)(n->data + ETH_HLEN); + ports = (__be16 *)(n->data + ETH_HLEN + + sizeof(struct ipv6hdr)); + eth->h_proto = htons(ETH_P_IPV6); + n->tuple.eth_proto = htons(ETH_P_IPV6); + memcpy(&n->tuple.src_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&n->tuple.dst_ipv6, &info->fs.h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + n->tuple.src_port = info->fs.h_u.tcp_ip6_spec.psrc; + n->tuple.dst_port = info->fs.h_u.tcp_ip6_spec.pdst; + ports[0] = n->tuple.src_port; + ports[1] = n->tuple.dst_port; + memcpy(&ip6->saddr, &n->tuple.src_ipv6, + sizeof(struct in6_addr)); + memcpy(&ip6->daddr, &n->tuple.dst_ipv6, + sizeof(struct in6_addr)); + ip6->version = 0x6; + + if (info->fs.flow_type == TCP_V6_FLOW) { + n->tuple.ip_proto = IPPROTO_TCP; + ip6->nexthdr = NEXTHDR_TCP; + ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr)); + } else { + n->tuple.ip_proto = IPPROTO_UDP; + ip6->nexthdr = NEXTHDR_UDP; + ip6->payload_len = cpu_to_be16(sizeof(struct udphdr)); + } + } + + rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0); + if (rc) + goto unlock; + + qede_configure_arfs_fltr(edev, n, n->rxq_id, true); + rc = qede_poll_arfs_filter_config(edev, n); +unlock: + __qede_unlock(edev); + return rc; +} + +int qede_del_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info) +{ + struct ethtool_rx_flow_spec *fsp = &info->fs; + struct qede_arfs_fltr_node *fltr = NULL; + int rc = -EPERM; + + __qede_lock(edev); + if (!edev->arfs) + goto unlock; + + fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0), + fsp->location); + if (!fltr) + goto unlock; + + qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false); + + rc = qede_poll_arfs_filter_config(edev, fltr); + if (rc == 0) + qede_dequeue_fltr_and_config_searcher(edev, fltr); + +unlock: + __qede_unlock(edev); + return rc; +} + int qede_get_arfs_filter_count(struct qede_dev *edev) { int count = 0; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 06ca13dd9ddb..e5ee9f274a71 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -873,9 +873,7 @@ static void qede_update_pf_params(struct qed_dev *cdev) */ pf_params.eth_pf_params.num_vf_cons = 48; -#ifdef CONFIG_RFS_ACCEL pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR; -#endif qed_ops->common->update_pf_params(cdev, &pf_params); } @@ -1984,12 +1982,12 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); -#ifdef CONFIG_RFS_ACCEL + if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { qede_poll_for_freeing_arfs_filters(edev); qede_free_arfs(edev); } -#endif + /* Release the interrupts */ qede_sync_free_irqs(edev); edev->ops->common->set_fp_int(edev->cdev, 0); @@ -2041,13 +2039,12 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, if (rc) goto err2; -#ifdef CONFIG_RFS_ACCEL if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { rc = qede_alloc_arfs(edev); if (rc) DP_NOTICE(edev, "aRFS memory allocation failed\n"); } -#endif + qede_napi_add_enable(edev); DP_INFO(edev, "Napi added and enabled\n"); -- cgit v1.2.3-55-g7522 From 645874e5807ae5ffa09fba2ccd23f01e4eb16d58 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Wed, 26 Jul 2017 06:07:11 -0700 Subject: qed: Add support for Energy efficient ethernet. The patch adds required driver support for reading/configuring the Energy Efficient Ethernet (EEE) parameters. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 72 ++++++++++++++++++++++++++++-- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 49 ++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 19 ++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 66 +++++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 37 +++++++++++++-- include/linux/qed/qed_if.h | 20 +++++++++ 6 files changed, 256 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 6c87bed13bd2..f545607100e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1684,6 +1684,8 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) "Load request was sent. Load code: 0x%x\n", load_code); + qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); + qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); p_hwfn->first_on_engine = (load_code == @@ -2472,6 +2474,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + struct qed_mcp_link_capabilities *p_caps; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -2534,6 +2537,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; + p_caps = &p_hwfn->mcp_info->link_capabilities; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); link_temp = qed_rd(p_hwfn, p_ptt, @@ -2588,10 +2592,45 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); link->loopback_mode = 0; - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n", - link->speed.forced_speed, link->speed.advertised_speeds, - link->speed.autoneg, link->pause.autoneg); + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + + offsetof(struct nvm_cfg1_port, ext_phy)); + link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; + link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; + p_caps->default_eee = QED_MCP_EEE_ENABLED; + link->eee.enable = true; + switch (link_temp) { + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: + p_caps->default_eee = QED_MCP_EEE_DISABLED; + link->eee.enable = false; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: + p_caps->eee_lpi_timer = + EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; + break; + } + + link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; + link->eee.tx_lpi_enable = link->eee.enable; + link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV; + } else { + p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n", + link->speed.forced_speed, + link->speed.advertised_speeds, + link->speed.autoneg, + link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer); /* Read Multi-function information from shmem */ addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -2751,6 +2790,27 @@ static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_hw_info_port_num_ah(p_hwfn, p_ptt); } +static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_capabilities *p_caps; + u32 eee_status; + + p_caps = &p_hwfn->mcp_info->link_capabilities; + if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED) + return; + + p_caps->eee_speed_caps = 0; + eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> + EEE_SUPPORTED_SPEED_OFFSET; + + if (eee_status & EEE_1G_SUPPORTED) + p_caps->eee_speed_caps |= QED_EEE_1G_ADV; + if (eee_status & EEE_10G_ADV) + p_caps->eee_speed_caps |= QED_EEE_10G_ADV; +} + static int qed_get_hw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -2767,6 +2827,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_hw_info_port_num(p_hwfn, p_ptt); + qed_mcp_get_capabilities(p_hwfn, p_ptt); + qed_hw_get_nvm_info(p_hwfn, p_ptt); rc = qed_int_igu_read_cam(p_hwfn, p_ptt); @@ -2785,6 +2847,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, p_hwfn->mcp_info->func_info.ovlan; qed_mcp_cmd_port_init(p_hwfn, p_ptt); + + qed_get_eee_caps(p_hwfn, p_ptt); } if (qed_mcp_is_init(p_hwfn)) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 31fb0bffa098..3427fe7049b5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -10825,6 +10825,17 @@ struct eth_phy_cfg { #define ETH_LOOPBACK_EXT (3) #define ETH_LOOPBACK_MAC (4) + u32 eee_cfg; +#define EEE_CFG_EEE_ENABLED BIT(0) +#define EEE_CFG_TX_LPI BIT(1) +#define EEE_CFG_ADV_SPEED_1G BIT(2) +#define EEE_CFG_ADV_SPEED_10G BIT(3) +#define EEE_TX_TIMER_USEC_MASK (0xfffffff0) +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME (0xa00) +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME (0x100) +#define EEE_TX_TIMER_USEC_LATENCY_TIME (0x6000) + u32 feature_config_flags; #define ETH_EEE_MODE_ADV_LPI (1 << 0) }; @@ -11242,6 +11253,25 @@ struct public_port { u32 wol_pkt_len; u32 wol_pkt_details; struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +#define EEE_ACTIVE_BIT BIT(0) +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 +#define EEE_1G_ADV BIT(1) +#define EEE_10G_ADV BIT(2) +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 +#define EEE_1G_SUPPORTED BIT(1) +#define EEE_10G_SUPPORTED BIT(2) + + u32 eee_remote; +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 }; struct public_func { @@ -11570,6 +11600,9 @@ struct public_drv_mb { #define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000 #define DRV_MSG_CODE_OS_WOL 0x002e0000 +#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000 +#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000 + #define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff u32 drv_mb_param; @@ -11653,6 +11686,10 @@ struct public_drv_mb { #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 #define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000FF00 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000FFFF +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 + u32 fw_mb_header; #define FW_MSG_CODE_MASK 0xffff0000 #define FW_MSG_CODE_UNSUPPORTED 0x00000000 @@ -11696,6 +11733,9 @@ struct public_drv_mb { #define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 +/* get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE 0x00000002 + #define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0) u32 drv_pulse_mb; @@ -11891,7 +11931,16 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 u32 phy_cfg; u32 mgmt_traffic; + u32 ext_phy; + /* EEE power saving mode */ +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + u32 mba_cfg1; u32 mba_cfg2; u32 vf_cfg; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 1bddf9372fc9..0a06683abfa0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1297,6 +1297,10 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) } } + if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) + memcpy(&link_params->eee, ¶ms->eee, + sizeof(link_params->eee)); + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); qed_ptt_release(hwfn, ptt); @@ -1483,6 +1487,21 @@ static void qed_fill_link(struct qed_hwfn *hwfn, if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) if_link->lp_caps |= QED_LM_Asym_Pause_BIT; + + if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { + if_link->eee_supported = false; + } else { + if_link->eee_supported = true; + if_link->eee_active = link.eee_active; + if_link->sup_caps = link_caps.eee_speed_caps; + /* MFW clears adv_caps on eee disable; use configured value */ + if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : + params.eee.adv_caps; + if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; + if_link->eee.enable = params.eee.enable; + if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; + if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; + } } static void qed_get_current_link(struct qed_dev *cdev, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 9da91045d167..c1ecce6b9141 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1097,6 +1097,31 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); } +static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link) +{ + u32 eee_status, val; + + p_link->eee_adv_caps = 0; + p_link->eee_lp_adv_caps = 0; + eee_status = qed_rd(p_hwfn, + p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); + val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_adv_caps |= QED_EEE_10G_ADV; + val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) { @@ -1228,6 +1253,9 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) + qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + qed_link_update(p_hwfn); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); @@ -1251,6 +1279,19 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; phy_cfg.adv_speed = params->speed.advertised_speeds; phy_cfg.loopback_mode = params->loopback_mode; + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + if (params->eee.enable) + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + if (params->eee.tx_lpi_enable) + phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; + if (params->eee.adv_caps & QED_EEE_1G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; + if (params->eee.adv_caps & QED_EEE_10G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; + phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << + EEE_TX_TIMER_USEC_OFFSET) & + EEE_TX_TIMER_USEC_MASK; + } p_hwfn->b_drv_link_init = b_up; @@ -2822,3 +2863,28 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, p_unlock->resource = resource; } } + +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, + 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); + if (!rc) + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE), + "MFW supported features: %08x\n", + p_hwfn->mcp_info->capabilities); + + return rc; +} + +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param, features; + + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE; + + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, + features, &mcp_resp, &mcp_param); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index af03b3651411..c7ec2395d1ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -53,15 +53,25 @@ struct qed_mcp_link_pause_params { bool forced_tx; }; +enum qed_mcp_eee_mode { + QED_MCP_EEE_DISABLED, + QED_MCP_EEE_ENABLED, + QED_MCP_EEE_UNSUPPORTED +}; + struct qed_mcp_link_params { - struct qed_mcp_link_speed_params speed; - struct qed_mcp_link_pause_params pause; - u32 loopback_mode; + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; + struct qed_link_eee_params eee; }; struct qed_mcp_link_capabilities { u32 speed_capabilities; bool default_speed_autoneg; + enum qed_mcp_eee_mode default_eee; + u32 eee_lpi_timer; + u8 eee_speed_caps; }; struct qed_mcp_link_state { @@ -102,6 +112,9 @@ struct qed_mcp_link_state { u8 partner_adv_pause; bool sfp_tx_fault; + bool eee_active; + u8 eee_adv_caps; + u8 eee_lp_adv_caps; }; struct qed_mcp_function_info { @@ -546,6 +559,9 @@ struct qed_mcp_info { u8 *mfw_mb_shadow; u16 mfw_mb_length; u32 mcp_hist; + + /* Capabilties negotiated with the MFW */ + u32 capabilities; }; struct qed_mcp_mb_params { @@ -925,5 +941,20 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, enum qed_resc_lock resource, bool b_is_permanent); +/** + * @brief Learn of supported MFW features; To be done during early init + * + * @param p_hwfn + * @param p_ptt + */ +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +/** + * @brief Inform MFW of set of features supported by driver. Should be done + * inside the content of the LOAD_REQ. + * + * @param p_hwfn + * @param p_ptt + */ +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #endif diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index ef39c7f40ae6..9f3276271b02 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -161,6 +161,18 @@ enum qed_nvm_images { QED_NVM_IMAGE_FCOE_CFG, }; +struct qed_link_eee_params { + u32 tx_lpi_timer; +#define QED_EEE_1G_ADV BIT(0) +#define QED_EEE_10G_ADV BIT(1) + + /* Capabilities are represented using QED_EEE_*_ADV values */ + u8 adv_caps; + u8 lp_adv_caps; + bool enable; + bool tx_lpi_enable; +}; + enum qed_led_mode { QED_LED_MODE_OFF, QED_LED_MODE_ON, @@ -408,6 +420,7 @@ struct qed_link_params { #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2) #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3) #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4) +#define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5) u32 override_flags; bool autoneg; u32 adv_speeds; @@ -422,6 +435,7 @@ struct qed_link_params { #define QED_LINK_LOOPBACK_EXT BIT(3) #define QED_LINK_LOOPBACK_MAC BIT(4) u32 loopback_mode; + struct qed_link_eee_params eee; }; struct qed_link_output { @@ -437,6 +451,12 @@ struct qed_link_output { u8 port; /* In PORT defs */ bool autoneg; u32 pause_config; + + /* EEE - capability & param */ + bool eee_supported; + bool eee_active; + u8 sup_caps; + struct qed_link_eee_params eee; }; struct qed_probe_params { -- cgit v1.2.3-55-g7522 From c3dc48f78638a4810678e64ddebf0839de8ea07e Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Wed, 26 Jul 2017 06:07:12 -0700 Subject: qede: Add ethtool support for Energy efficient ethernet. The patch adds ethtool callback implementations for querying/configuring the Energy Efficient Ethernet (EEE) parameters. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 84 +++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index e31266df8fdd..55fa2ef19d8a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1631,6 +1631,87 @@ static int qede_get_tunable(struct net_device *dev, return 0; } +static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (!current_link.eee_supported) { + DP_INFO(edev, "EEE is not supported\n"); + return -EOPNOTSUPP; + } + + if (current_link.eee.adv_caps & QED_EEE_1G_ADV) + edata->advertised = ADVERTISED_1000baseT_Full; + if (current_link.eee.adv_caps & QED_EEE_10G_ADV) + edata->advertised |= ADVERTISED_10000baseT_Full; + if (current_link.sup_caps & QED_EEE_1G_ADV) + edata->supported = ADVERTISED_1000baseT_Full; + if (current_link.sup_caps & QED_EEE_10G_ADV) + edata->supported |= ADVERTISED_10000baseT_Full; + if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV) + edata->lp_advertised = ADVERTISED_1000baseT_Full; + if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV) + edata->lp_advertised |= ADVERTISED_10000baseT_Full; + + edata->tx_lpi_timer = current_link.eee.tx_lpi_timer; + edata->eee_enabled = current_link.eee.enable; + edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable; + edata->eee_active = current_link.eee_active; + + return 0; +} + +static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qed_link_output current_link; + struct qed_link_params params; + + if (!edev->ops->common->can_link_change(edev->cdev)) { + DP_INFO(edev, "Link settings are not allowed to be changed\n"); + return -EOPNOTSUPP; + } + + memset(¤t_link, 0, sizeof(current_link)); + edev->ops->common->get_link(edev->cdev, ¤t_link); + + if (!current_link.eee_supported) { + DP_INFO(edev, "EEE is not supported\n"); + return -EOPNOTSUPP; + } + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG; + + if (!(edata->advertised & (ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full)) || + ((edata->advertised & (ADVERTISED_1000baseT_Full | + ADVERTISED_10000baseT_Full)) != + edata->advertised)) { + DP_VERBOSE(edev, QED_MSG_DEBUG, + "Invalid advertised capabilities %d\n", + edata->advertised); + return -EINVAL; + } + + if (edata->advertised & ADVERTISED_1000baseT_Full) + params.eee.adv_caps = QED_EEE_1G_ADV; + if (edata->advertised & ADVERTISED_10000baseT_Full) + params.eee.adv_caps |= QED_EEE_10G_ADV; + params.eee.enable = edata->eee_enabled; + params.eee.tx_lpi_enable = edata->tx_lpi_enabled; + params.eee.tx_lpi_timer = edata->tx_lpi_timer; + + params.link_up = true; + edev->ops->common->set_link(edev->cdev, ¶ms); + + return 0; +} + static const struct ethtool_ops qede_ethtool_ops = { .get_link_ksettings = qede_get_link_ksettings, .set_link_ksettings = qede_set_link_ksettings, @@ -1664,6 +1745,9 @@ static const struct ethtool_ops qede_ethtool_ops = { .get_channels = qede_get_channels, .set_channels = qede_set_channels, .self_test = qede_self_test, + .get_eee = qede_get_eee, + .set_eee = qede_set_eee, + .get_tunable = qede_get_tunable, .set_tunable = qede_set_tunable, }; -- cgit v1.2.3-55-g7522 From 477f2d1460a636abd08f03eafabe0c51366fa5de Mon Sep 17 00:00:00 2001 From: Rahul Verma Date: Wed, 26 Jul 2017 06:07:13 -0700 Subject: qed: Add support for vf coalesce configuration. This patch add the ethtool support to set RX/Tx coalesce value to the VF associated Rx/Tx queues. Signed-off-by: Rahul Verma Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 73 +++++++++++++++------- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 41 ++++-------- drivers/net/ethernet/qlogic/qed/qed_l2.h | 7 +++ drivers/net/ethernet/qlogic/qed/qed_main.c | 24 +------ drivers/net/ethernet/qlogic/qed/qed_sriov.c | 83 +++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.c | 44 +++++++++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 24 ++++++- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 38 ++++++++--- include/linux/qed/qed_if.h | 4 +- 9 files changed, 251 insertions(+), 87 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index f545607100e4..58a689fb04db 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -3694,7 +3694,7 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } p_coal_timeset = p_eth_qzone; - memset(p_coal_timeset, 0, eth_qzone_size); + memset(p_eth_qzone, 0, eth_qzone_size); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); @@ -3702,12 +3702,46 @@ static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return 0; } -int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id) +int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle) +{ + struct qed_queue_cid *p_cid = p_handle; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0; + + p_hwfn = p_cid->p_owner; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (rx_coal) { + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + } + + if (tx_coal) { + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + } +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) { struct ustorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; - u16 fw_qid = 0; u32 address; int rc; @@ -3724,32 +3758,29 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } timeset = (u8)(coalesce >> timer_res); - rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); - if (rc) - return rc; - - rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false); + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, false); if (rc) goto out; - address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct ustorm_eth_queue_zone), timeset); if (rc) goto out; - p_hwfn->cdev->rx_coalesce_usecs = coalesce; out: return rc; } -int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id) +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) { struct xstorm_eth_queue_zone eth_qzone; u8 timeset, timer_res; - u16 fw_qid = 0; u32 address; int rc; @@ -3766,22 +3797,16 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, } timeset = (u8)(coalesce >> timer_res); - rc = qed_fw_l2_queue(p_hwfn, qid, &fw_qid); - if (rc) - return rc; - - rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true); + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, true); if (rc) goto out; - address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, sizeof(struct xstorm_eth_queue_zone), timeset); - if (rc) - goto out; - - p_hwfn->cdev->tx_coalesce_usecs = coalesce; out: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 1f1df1bf127c..e6b3c83c5db8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -443,38 +443,23 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue - * The fact that we can configure coalescing to up to 511, but on varying - * accuracy [the bigger the value the less accurate] up to a mistake of 3usec - * for the highest values. - * - * @param p_hwfn - * @param p_ptt - * @param coalesce - Coalesce value in micro seconds. - * @param qid - Queue index. - * @param qid - SB Id - * - * @return int - */ -int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id); - -/** - * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue - * While the API allows setting coalescing per-qid, all tx queues sharing a - * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx or + * Tx queue. We can configure coalescing to up to 511, but on + * varying accuracy [the bigger the value the less accurate] up to a mistake + * of 3usec for the highest values. + * While the API allows setting coalescing per-qid, all queues sharing a SB + * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] * otherwise configuration would break. * - * @param p_hwfn - * @param p_ptt - * @param coalesce - Coalesce value in micro seconds. - * @param qid - Queue index. - * @param qid - SB Id + * @param rx_coal - Rx Coalesce value in micro seconds. + * @param tx_coal - TX Coalesce value in micro seconds. + * @param p_handle * * @return int - */ -int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - u16 coalesce, u16 qid, u16 sb_id); + **/ +int +qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); + const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index f8f09aadced7..60ea72ce3e2c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -400,4 +400,11 @@ qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, u8 qed_mcast_bin_from_mac(u8 *mac); +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); #endif /* _QED_L2_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 0a06683abfa0..448810a235b8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1575,29 +1575,9 @@ static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) } static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, - u16 qid, u16 sb_id) + void *handle) { - struct qed_hwfn *hwfn; - struct qed_ptt *ptt; - int hwfn_index; - int status = 0; - - hwfn_index = qid % cdev->num_hwfns; - hwfn = &cdev->hwfns[hwfn_index]; - ptt = qed_ptt_acquire(hwfn); - if (!ptt) - return -EAGAIN; - - status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal, - qid / cdev->num_hwfns, sb_id); - if (status) - goto out; - status = qed_set_txq_coalesce(hwfn, ptt, tx_coal, - qid / cdev->num_hwfns, sb_id); -out: - qed_ptt_release(hwfn, ptt); - - return status; + return qed_set_queue_coalesce(rx_coal, tx_coal, handle); } static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 2cfd3bd9a031..5feef783623b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3400,6 +3400,86 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, length, status); } +static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_coalesce *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_queue_cid *p_cid; + u16 rx_coal, tx_coal; + int rc = 0, i; + u16 qid; + + req = &mbx->req_virt->update_coalesce; + + rx_coal = req->rx_coal; + tx_coal = req->tx_coal; + qid = req->qid; + + if (!qed_iov_validate_rxq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + if (!qed_iov_validate_txq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", + vf->abs_vf_id, rx_coal, tx_coal, qid); + + if (rx_coal) { + p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); + + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set rx queue = %d coalesce\n", + vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); + goto out; + } + } + + if (tx_coal) { + struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (!p_queue->cids[i].p_cid) + continue; + + if (!p_queue->cids[i].b_is_tx) + continue; + + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, + p_queue->cids[i].p_cid); + + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set tx queue coalesce\n", + vf->abs_vf_id); + goto out; + } + } + } + + status = PFVF_STATUS_SUCCESS; +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, + sizeof(struct pfvf_def_resp_tlv), status); +} static int qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) @@ -3725,6 +3805,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_UPDATE_TUNN_PARAM: qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_COALESCE_UPDATE: + qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 1926d1ed439f..0a7bbc0f19b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1343,6 +1343,50 @@ exit: return rc; } +int +qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_coalesce *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req)); + + req->rx_coal = rx_coal; + req->tx_coal = tx_coal; + req->qid = p_cid->rel.queue_id; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", + rx_coal, tx_coal, req->qid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + if (rx_coal) + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + + if (tx_coal) + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) { struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 34d9b882a780..2d9fdd62f56d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -497,6 +497,13 @@ struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; }; +struct vfpf_update_coalesce { + struct vfpf_first_tlv first_tlv; + u16 rx_coal; + u16 tx_coal; + u16 qid; + u8 padding[2]; +}; union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -509,6 +516,7 @@ union vfpf_tlvs { struct vfpf_vport_update_tlv vport_update; struct vfpf_ucast_filter_tlv ucast_filter; struct vfpf_update_tunn_param_tlv tunn_param_update; + struct vfpf_update_coalesce update_coalesce; struct channel_list_end_tlv list_end; struct tlv_buffer_size tlv_buf_size; }; @@ -624,7 +632,7 @@ enum { CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_UPDATE_TUNN_PARAM, - CHANNEL_TLV_RESERVED, + CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, CHANNEL_TLV_MAX, @@ -677,6 +685,20 @@ struct qed_vf_iov { bool b_doorbell_bar; }; +/** + * @brief VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the configuration. + * + * @param p_hwfn + * @param rx_coal - coalesce value in micro second for rx queue + * @param tx_coal - coalesce value in micro second for tx queue + * @param p_cid - queue cid + * + **/ +int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, + u16 tx_coal, struct qed_queue_cid *p_cid); + #ifdef CONFIG_QED_SRIOV /** * @brief Read the VF bulletin and act on it if needed diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 55fa2ef19d8a..76e0b132e8cc 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -718,8 +718,9 @@ static int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { struct qede_dev *edev = netdev_priv(dev); + struct qede_fastpath *fp; int i, rc = 0; - u16 rxc, txc, sb_id; + u16 rxc, txc; if (!netif_running(dev)) { DP_INFO(edev, "Interface is down\n"); @@ -730,21 +731,36 @@ static int qede_set_coalesce(struct net_device *dev, coal->tx_coalesce_usecs > QED_COALESCE_MAX) { DP_INFO(edev, "Can't support requested %s coalesce value [max supported value %d]\n", - coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" - : "tx", - QED_COALESCE_MAX); + coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" : + "tx", QED_COALESCE_MAX); return -EINVAL; } rxc = (u16)coal->rx_coalesce_usecs; txc = (u16)coal->tx_coalesce_usecs; for_each_queue(i) { - sb_id = edev->fp_array[i].sb_info->igu_sb_id; - rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc, - (u16)i, sb_id); - if (rc) { - DP_INFO(edev, "Set coalesce error, rc = %d\n", rc); - return rc; + fp = &edev->fp_array[i]; + + if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { + rc = edev->ops->common->set_coalesce(edev->cdev, + rxc, 0, + fp->rxq->handle); + if (rc) { + DP_INFO(edev, + "Set RX coalesce error, rc = %d\n", rc); + return rc; + } + } + + if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { + rc = edev->ops->common->set_coalesce(edev->cdev, + 0, txc, + fp->txq->handle); + if (rc) { + DP_INFO(edev, + "Set TX coalesce error, rc = %d\n", rc); + return rc; + } } } @@ -1758,6 +1774,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = { .get_msglevel = qede_get_msglevel, .set_msglevel = qede_set_msglevel, .get_link = qede_get_link, + .get_coalesce = qede_get_coalesce, + .set_coalesce = qede_set_coalesce, .get_ringparam = qede_get_ringparam, .set_ringparam = qede_set_ringparam, .get_strings = qede_get_strings, diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 9f3276271b02..4d59ca16134c 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -694,8 +694,8 @@ struct qed_common_ops { * * @return 0 on success, error otherwise. */ - int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, - u16 qid, u16 sb_id); + int (*set_coalesce)(struct qed_dev *cdev, + u16 rx_coal, u16 tx_coal, void *handle); /** * @brief set_led - Configure LED mode -- cgit v1.2.3-55-g7522 From bf5a94bfe26a9fcd4af91ae6bccd4f3d600d2262 Mon Sep 17 00:00:00 2001 From: Rahul Verma Date: Wed, 26 Jul 2017 06:07:14 -0700 Subject: qed: Read per queue coalesce from hardware Retrieve the actual coalesce value from hardware for every Rx/Tx queue, instead of Rx/Tx coalesce value cached during set coalesce. Signed-off-by: Rahul Verma Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 26 ++++-- drivers/net/ethernet/qlogic/qed/qed_l2.c | 115 ++++++++++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_l2.h | 11 ++- drivers/net/ethernet/qlogic/qed/qed_main.c | 7 -- drivers/net/ethernet/qlogic/qed/qed_sriov.c | 74 +++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_sriov.h | 3 + drivers/net/ethernet/qlogic/qed/qed_vf.c | 31 +++++++ drivers/net/ethernet/qlogic/qed/qed_vf.h | 29 +++++- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 47 ++++++++-- include/linux/qed/qed_eth_if.h | 1 + include/linux/qed/qed_if.h | 11 +-- 11 files changed, 324 insertions(+), 31 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index e6b3c83c5db8..defdda1ffaa2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -443,13 +443,25 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx or - * Tx queue. We can configure coalescing to up to 511, but on - * varying accuracy [the bigger the value the less accurate] up to a mistake - * of 3usec for the highest values. - * While the API allows setting coalescing per-qid, all queues sharing a SB - * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] - * otherwise configuration would break. + * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. + * + * @param p_hwfn + * @param p_coal - store coalesce value read from the hardware. + * @param p_handle + * + * @return int + **/ +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); + +/** + * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * Tx queue. The fact that we can configure coalescing to up to 511, but on + * varying accuracy [the bigger the value the less accurate] up to a mistake + * of 3usec for the highest values. + * While the API allows setting coalescing per-qid, all queues sharing a SB + * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. + * * * @param rx_coal - Rx Coalesce value in micro seconds. * @param tx_coal - TX Coalesce value in micro seconds. diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 0ba5ec8a9814..9a1645852015 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -2047,6 +2047,106 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, return qed_spq_post(p_hwfn, p_ent, NULL); } +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_rx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0); + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_rx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_tx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, 0); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1); + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_tx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_ptt *p_ptt; + int rc = 0; + + if (IS_VF(p_hwfn->cdev)) { + rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); + if (rc) + DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + + return rc; + } + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (p_cid->b_is_rx) { + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } else { + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } + +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { @@ -2696,6 +2796,20 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, return rc; } +static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_hwfn *p_hwfn; + int rc; + + p_hwfn = p_cid->p_owner; + rc = qed_get_queue_coalesce(p_hwfn, coal, handle); + if (rc) + DP_NOTICE(p_hwfn, "Unable to read queue calescing\n"); + + return rc; +} + static int qed_fp_cqe_completion(struct qed_dev *dev, u8 rss_id, struct eth_slow_path_rx_cqe *cqe) { @@ -2739,6 +2853,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = { .tunn_config = &qed_tunn_configure, .ntuple_filter_config = &qed_ntuple_arfs_filter_config, .configure_arfs_searcher = &qed_configure_arfs_searcher, + .get_coalesce = &qed_get_coalesce, }; const struct qed_eth_ops *qed_get_eth_ops(void) diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 60ea72ce3e2c..cc1f248551c9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -407,4 +407,13 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 coalesce, struct qed_queue_cid *p_cid); -#endif /* _QED_L2_H */ + +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 448810a235b8..27832885a87f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -1568,12 +1568,6 @@ static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, return rc; } -static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal) -{ - *rx_coal = cdev->rx_coalesce_usecs; - *tx_coal = cdev->tx_coalesce_usecs; -} - static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle) { @@ -1726,7 +1720,6 @@ const struct qed_common_ops qed_common_ops_pass = { .chain_alloc = &qed_chain_alloc, .chain_free = &qed_chain_free, .nvm_get_image = &qed_nvm_get_image, - .get_coalesce = &qed_get_coalesce, .set_coalesce = &qed_set_coalesce, .set_led = &qed_set_led, .update_drv_state = &qed_update_drv_state, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 5feef783623b..3f40b1de7957 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -3400,6 +3400,75 @@ static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, length, status); } +static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_read_coal_resp_tlv *p_resp; + struct vfpf_read_coal_req_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + u16 coal = 0, qid, i; + bool b_is_rx; + int rc = 0; + + mbx->offset = (u8 *)mbx->reply_virt; + req = &mbx->req_virt->read_coal_req; + + qid = req->qid; + b_is_rx = req->is_rx ? true : false; + + if (b_is_rx) { + if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + + p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + } else { + if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + p_queue = &p_vf->vf_queues[qid]; + if ((!p_queue->cids[i].p_cid) || + (!p_queue->cids[i].b_is_tx)) + continue; + + p_cid = p_queue->cids[i].p_cid; + + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + break; + } + } + + status = PFVF_STATUS_SUCCESS; + +send_resp: + p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, + sizeof(*p_resp)); + p_resp->coal = coal; + + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_vf_info *vf) @@ -3450,6 +3519,7 @@ static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); goto out; } + vf->rx_coal = rx_coal; } if (tx_coal) { @@ -3473,6 +3543,7 @@ static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, goto out; } } + vf->tx_coal = tx_coal; } status = PFVF_STATUS_SUCCESS; @@ -3808,6 +3879,9 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, case CHANNEL_TLV_COALESCE_UPDATE: qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_COALESCE_READ: + qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); + break; } } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { DP_VERBOSE(p_hwfn, QED_MSG_IOV, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c2e44bce398c..3955929ba892 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -217,6 +217,9 @@ struct qed_vf_info { u8 num_rxqs; u8 num_txqs; + u16 rx_coal; + u16 tx_coal; + u8 num_sbs; u8 num_mac_filters; diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index 0a7bbc0f19b0..91b5e9f02a62 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -1343,6 +1343,37 @@ exit: return rc; } +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_read_coal_resp_tlv *resp; + struct vfpf_read_coal_req_tlv *req; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req)); + req->qid = p_cid->rel.queue_id; + req->is_rx = p_cid->b_is_rx ? 1 : 0; + + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + resp = &p_iov->pf2vf_reply->read_coal_resp; + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + *p_coal = resp->coal; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 2d9fdd62f56d..97d44dfb38ca 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -504,6 +504,20 @@ struct vfpf_update_coalesce { u16 qid; u8 padding[2]; }; + +struct vfpf_read_coal_req_tlv { + struct vfpf_first_tlv first_tlv; + u16 qid; + u8 is_rx; + u8 padding[5]; +}; + +struct pfvf_read_coal_resp_tlv { + struct pfvf_tlv hdr; + u16 coal; + u8 padding[6]; +}; + union vfpf_tlvs { struct vfpf_first_tlv first_tlv; struct vfpf_acquire_tlv acquire; @@ -517,7 +531,7 @@ union vfpf_tlvs { struct vfpf_ucast_filter_tlv ucast_filter; struct vfpf_update_tunn_param_tlv tunn_param_update; struct vfpf_update_coalesce update_coalesce; - struct channel_list_end_tlv list_end; + struct vfpf_read_coal_req_tlv read_coal_req; struct tlv_buffer_size tlv_buf_size; }; @@ -527,6 +541,7 @@ union pfvf_tlvs { struct tlv_buffer_size tlv_buf_size; struct pfvf_start_queue_resp_tlv queue_start; struct pfvf_update_tunn_param_tlv tunn_param_resp; + struct pfvf_read_coal_resp_tlv read_coal_resp; }; enum qed_bulletin_bit { @@ -634,6 +649,7 @@ enum { CHANNEL_TLV_UPDATE_TUNN_PARAM, CHANNEL_TLV_COALESCE_UPDATE, CHANNEL_TLV_QID, + CHANNEL_TLV_COALESCE_READ, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. @@ -699,6 +715,17 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid); +/** + * @brief VF - Get coalesce per VF's relative queue. + * + * @param p_hwfn + * @param p_coal - coalesce value in micro second for VF queues. + * @param p_cid - queue cid + * + **/ +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid); + #ifdef CONFIG_QED_SRIOV /** * @brief Read the VF bulletin and act on it if needed diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 76e0b132e8cc..dae741270022 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -702,16 +702,53 @@ static u32 qede_get_link(struct net_device *dev) static int qede_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) { + void *rx_handle = NULL, *tx_handle = NULL; struct qede_dev *edev = netdev_priv(dev); - u16 rxc, txc; + u16 rx_coal, tx_coal, i, rc = 0; + struct qede_fastpath *fp; + + rx_coal = QED_DEFAULT_RX_USECS; + tx_coal = QED_DEFAULT_TX_USECS; memset(coal, 0, sizeof(struct ethtool_coalesce)); - edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc); - coal->rx_coalesce_usecs = rxc; - coal->tx_coalesce_usecs = txc; + __qede_lock(edev); + if (edev->state == QEDE_STATE_OPEN) { + for_each_queue(i) { + fp = &edev->fp_array[i]; + + if (fp->type & QEDE_FASTPATH_RX) { + rx_handle = fp->rxq->handle; + break; + } + } - return 0; + rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle); + if (rc) { + DP_INFO(edev, "Read Rx coalesce error\n"); + goto out; + } + + for_each_queue(i) { + fp = &edev->fp_array[i]; + if (fp->type & QEDE_FASTPATH_TX) { + tx_handle = fp->txq->handle; + break; + } + } + + rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle); + if (rc) + DP_INFO(edev, "Read Tx coalesce error\n"); + } + +out: + __qede_unlock(edev); + + coal->rx_coalesce_usecs = rx_coal; + coal->tx_coalesce_usecs = tx_coal; + + return rc; } static int qede_set_coalesce(struct net_device *dev, diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 0eef0a2b1901..d60de4a39810 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -323,6 +323,7 @@ struct qed_eth_ops { int (*configure_arfs_searcher)(struct qed_dev *cdev, bool en_searcher); + int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); }; const struct qed_eth_ops *qed_get_eth_ops(void); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 4d59ca16134c..2b4720bb8b40 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -186,6 +186,7 @@ enum qed_led_mode { #define QED_COALESCE_MAX 0xFF #define QED_DEFAULT_RX_USECS 12 +#define QED_DEFAULT_TX_USECS 48 /* forward */ struct qed_dev; @@ -673,16 +674,6 @@ struct qed_common_ops { int (*nvm_get_image)(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len); -/** - * @brief get_coalesce - Get coalesce parameters in usec - * - * @param cdev - * @param rx_coal - Rx coalesce value in usec - * @param tx_coal - Tx coalesce value in usec - * - */ - void (*get_coalesce)(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal); - /** * @brief set_coalesce - Configure Rx coalesce value in usec * -- cgit v1.2.3-55-g7522 From 41822878b2ff40a1f57be81f1a1f0f040847b912 Mon Sep 17 00:00:00 2001 From: Rahul Verma Date: Wed, 26 Jul 2017 06:07:15 -0700 Subject: qed: enhanced per queue max coalesce value. Maximum coalesce per Rx/Tx queue is extended from 255 to 511. Signed-off-by: Rahul Verma Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- include/linux/qed/qed_if.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 2b4720bb8b40..cc646ca97974 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -184,7 +184,7 @@ enum qed_led_mode { #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr)) -#define QED_COALESCE_MAX 0xFF +#define QED_COALESCE_MAX 0x1FF #define QED_DEFAULT_RX_USECS 12 #define QED_DEFAULT_TX_USECS 48 -- cgit v1.2.3-55-g7522 From 4b5dde2d6234ff5bc68e97e6901d1f2a0a7f3749 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Thu, 29 Jun 2017 18:23:54 -0700 Subject: mwifiex: correct channel stat buffer overflows mwifiex records information about various channels as it receives scan information. It does this by appending to a buffer that was sized to the max number of supported channels on any band, but there are numerous problems: (a) scans can return info from more than one band (e.g., both 2.4 and 5 GHz), so the determined "max" is not large enough (b) some firmware appears to return multiple results for a given channel, so the max *really* isn't large enough (c) there is no bounds checking when stashing these stats, so problems (a) and (b) can easily lead to buffer overflows Let's patch this by setting a slightly-more-correct max (that accounts for a combination of both 2.4G and 5G bands) and adding a bounds check when writing to our statistics buffer. Due to problem (b), we still might not properly report all known survey information (e.g., with "iw survey dump"), since duplicate results (or otherwise "larger than expected" results) will cause some truncation. But that's a problem for a future bugfix. (And because of this known deficiency, only log the excess at the WARN level, since that isn't visible by default in this driver and would otherwise be a bit too noisy.) Fixes: bf35443314ac ("mwifiex: channel statistics support for mwifiex") Cc: Cc: Avinash Patil Cc: Xinming Hu Signed-off-by: Brian Norris Reviewed-by: Dmitry Torokhov Reviewed-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 +- drivers/net/wireless/marvell/mwifiex/scan.c | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 06ad2d50f9b0..fdfdf2371986 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -4215,7 +4215,7 @@ int mwifiex_init_channel_scan_gap(struct mwifiex_adapter *adapter) if (adapter->config_bands & BAND_A) n_channels_a = mwifiex_band_5ghz.n_channels; - adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a); + adapter->num_in_chan_stats = n_channels_bg + n_channels_a; adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) * adapter->num_in_chan_stats); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index ae9630b49342..9900855746ac 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2492,6 +2492,12 @@ mwifiex_update_chan_statistics(struct mwifiex_private *priv, sizeof(struct mwifiex_chan_stats); for (i = 0 ; i < num_chan; i++) { + if (adapter->survey_idx >= adapter->num_in_chan_stats) { + mwifiex_dbg(adapter, WARN, + "FW reported too many channel results (max %d)\n", + adapter->num_in_chan_stats); + return; + } chan_stats.chan_num = fw_chan_stats->chan_num; chan_stats.bandcfg = fw_chan_stats->bandcfg; chan_stats.flags = fw_chan_stats->flags; -- cgit v1.2.3-55-g7522 From d0116f6f7b306bc2d1bfc98d7c7c80fe5f468c20 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 30 Jun 2017 11:08:43 +0100 Subject: rtlwifi: kfree entry until after entry->bssid has been accessed The current code kfree's entry and then dereferences it by accessing entry->bssid. Avoid the dereference-after-free by moving the kfree after the access to entry->bssid. Detected by CoverityScan, CID#1448600 ("Read from pointer after free") Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index e36ee592c660..208f56297a75 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1735,12 +1735,12 @@ void rtl_scan_list_expire(struct ieee80211_hw *hw) continue; list_del(&entry->list); - kfree(entry); rtlpriv->scan_list.num--; RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "BSSID=%pM is expire in scan list (total=%d)\n", entry->bssid, rtlpriv->scan_list.num); + kfree(entry); } spin_unlock_irqrestore(&rtlpriv->locks.scan_list_lock, flags); -- cgit v1.2.3-55-g7522 From 459c35148ef6b65c25bc9d005cc87c4c26850898 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 30 Jun 2017 12:02:57 -0700 Subject: bcma: gpio: Correct number of GPIOs for BCM53573 Broadcom BCM53573 SoCs actually have 32 GPIOs, and not 16. Fixes: 3f37ec79dd21 ("bcma: support BCM53573 series of wireless SoCs") Signed-off-by: Florian Fainelli Signed-off-by: Kalle Valo --- drivers/bcma/driver_gpio.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 7bde8d7a2816..982d5781d3ce 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -191,6 +191,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) case BCMA_CHIP_ID_BCM4707: case BCMA_CHIP_ID_BCM5357: case BCMA_CHIP_ID_BCM53572: + case BCMA_CHIP_ID_BCM53573: case BCMA_CHIP_ID_BCM47094: chip->ngpio = 32; break; -- cgit v1.2.3-55-g7522 From 6e9aae179f290f1a44fce7ef8e9a8e2dd68ed1e4 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 6 Jul 2017 15:00:37 -0700 Subject: wl1251: add a missing spin_lock_init() This fixes the following kernel warning: [ 5668.771453] BUG: spinlock bad magic on CPU#0, kworker/u2:3/9745 [ 5668.771850] lock: 0xce63ef20, .magic: 00000000, .owner: /-1, .owner_cpu: 0 [ 5668.772277] CPU: 0 PID: 9745 Comm: kworker/u2:3 Tainted: G W 4.12.0-03002-gec979a4-dirty #40 [ 5668.772796] Hardware name: Nokia RX-51 board [ 5668.773071] Workqueue: phy1 wl1251_irq_work [ 5668.773345] [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [ 5668.773803] [] (show_stack) from [] (do_raw_spin_lock+0x6c/0xa0) [ 5668.774230] [] (do_raw_spin_lock) from [] (_raw_spin_lock_irqsave+0x10/0x18) [ 5668.774658] [] (_raw_spin_lock_irqsave) from [] (wl1251_op_tx+0x38/0x5c) [ 5668.775115] [] (wl1251_op_tx) from [] (ieee80211_tx_frags+0x188/0x1c0) [ 5668.775543] [] (ieee80211_tx_frags) from [] (__ieee80211_tx+0x6c/0x130) [ 5668.775970] [] (__ieee80211_tx) from [] (ieee80211_tx+0xdc/0x104) [ 5668.776367] [] (ieee80211_tx) from [] (__ieee80211_subif_start_xmit+0x454/0x8c8) [ 5668.776824] [] (__ieee80211_subif_start_xmit) from [] (ieee80211_subif_start_xmit+0x30/0x2fc) [ 5668.777343] [] (ieee80211_subif_start_xmit) from [] (dev_hard_start_xmit+0x80/0x118) ... by adding the missing spin_lock_init(). Reported-by: Pavel Machek Cc: Kalle Valo Signed-off-by: Cong Wang Acked-by: Pavel Machek Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wl1251/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 08f0477f78d9..9915d83a4a30 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void) wl->state = WL1251_STATE_OFF; mutex_init(&wl->mutex); + spin_lock_init(&wl->wl_lock); wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE; wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE; -- cgit v1.2.3-55-g7522 From 26a8985fa52e3c3087794d1329cfdafceae8510c Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jul 2017 11:09:46 -0700 Subject: nfp: remove the probe deferral when FW not present We use a hack to defer probe when firmware was not pre-loaded or found on disk. This helps in case users forgot to include firmware in initramfs, the driver will most likely get another shot at probing after real root is mounted. This is not for what EPROBE_DEFER is supposed to be used, and when FW is completely missing every time new device is probed NFP will reprobe spamming kernel logs. Remove this hack, users will now have to make sure the right firmware image is present in initramfs if nfp.ko is placed there or built in. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 5797dbf2b507..d5e2361f0e86 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -704,7 +704,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (!pf->rtbl) { nfp_err(pf->cpp, "No %s, giving up.\n", pf->fw_loaded ? "symbol table" : "firmware found"); - return -EPROBE_DEFER; + return -EINVAL; } mutex_lock(&pf->lock); -- cgit v1.2.3-55-g7522 From 9511f2980d9367713c65991edeb7608c9d78dbeb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jul 2017 11:09:47 -0700 Subject: nfp: look for firmware image by device serial number and PCI name We generally look up firmware by card type, but that doesn't allow users who have more than one card of the same type in their system to select firmware per adapter. Unfortunately user space firmware helper seems fraught with difficulties and to be on its way out. In particular support for handling firmware uevents have been dropped from systemd and most distributions don't enable the FW fallback by default any more. To allow users selecting firmware for a particular device look up firmware names by serial and pci_name(). Use the direct lookup to disable generating uevents when enabled in Kconfig and not print any warnings to logs if adapter-specific files are missing. Users can place in /lib/firmware/netronome files named: pci-${pci_name}.nffw serial-${serial}.nffw to target a specific card. E.g.: pci-0000:04:00.0.nffw pci-0000:82:00.0.nffw serial-00-aa-bb-11-22-33-10-ff.nffw We use the full serial number including the interface id, as it appears in lspci output (bytes separated by '-'). Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index d67969d3e484..13d056da0765 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -188,9 +188,27 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) struct nfp_eth_table_port *port; const char *fw_model; char fw_name[256]; + const u8 *serial; int spc, err = 0; + u16 interface; int i, j; + /* First try to find a firmware image specific for this device */ + interface = nfp_cpp_interface(pf->cpp); + nfp_cpp_serial(pf->cpp, &serial); + sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw", + serial, interface >> 8, interface & 0xff); + err = request_firmware_direct(&fw, fw_name, &pdev->dev); + if (!err) + goto done; + + /* Then try the PCI name */ + sprintf(fw_name, "netronome/pci-%s.nffw", pci_name(pdev)); + err = request_firmware_direct(&fw, fw_name, &pdev->dev); + if (!err) + goto done; + + /* Finally try the card type and media */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; @@ -226,7 +244,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) err = request_firmware(&fw, fw_name, &pdev->dev); if (err) return NULL; - +done: dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name); return fw; -- cgit v1.2.3-55-g7522 From 1680a3705b00e90c1e1de91a9fec421b23cef719 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 26 Jul 2017 11:09:48 -0700 Subject: nfp: only use direct firmware requests request_firmware() will fallback to user space helper and may cause long delays when driver is loaded if udev doesn't correctly handle FW requests. Since we never really made use of the user space helper functionality switch to the simpler request_firmware_direct() call. The side effect of this change is that no warning will be printed when the FW image does not exists. To help users figure out which FW file is missing print a info message when we request each file. Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_main.c | 42 +++++++++++++++++---------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 13d056da0765..dd769eceb33d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -174,6 +174,21 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) return nfp_pcie_sriov_enable(pdev, num_vfs); } +static const struct firmware * +nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) +{ + const struct firmware *fw = NULL; + int err; + + err = request_firmware_direct(&fw, name, &pdev->dev); + nfp_info(pf->cpp, " %s: %s\n", + name, err ? "not found" : "found, loading..."); + if (err) + return NULL; + + return fw; +} + /** * nfp_net_fw_find() - Find the correct firmware image for netdev mode * @pdev: PCI Device structure @@ -184,29 +199,30 @@ static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) static const struct firmware * nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) { - const struct firmware *fw = NULL; struct nfp_eth_table_port *port; + const struct firmware *fw; const char *fw_model; char fw_name[256]; const u8 *serial; - int spc, err = 0; u16 interface; - int i, j; + int spc, i, j; + + nfp_info(pf->cpp, "Looking for firmware file in order of priority:\n"); /* First try to find a firmware image specific for this device */ interface = nfp_cpp_interface(pf->cpp); nfp_cpp_serial(pf->cpp, &serial); sprintf(fw_name, "netronome/serial-%pMF-%02hhx-%02hhx.nffw", serial, interface >> 8, interface & 0xff); - err = request_firmware_direct(&fw, fw_name, &pdev->dev); - if (!err) - goto done; + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; /* Then try the PCI name */ sprintf(fw_name, "netronome/pci-%s.nffw", pci_name(pdev)); - err = request_firmware_direct(&fw, fw_name, &pdev->dev); - if (!err) - goto done; + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; /* Finally try the card type and media */ if (!pf->eth_tbl) { @@ -241,13 +257,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) if (spc <= 0) return NULL; - err = request_firmware(&fw, fw_name, &pdev->dev); - if (err) - return NULL; -done: - dev_info(&pdev->dev, "Loading FW image: %s\n", fw_name); - - return fw; + return nfp_net_fw_request(pdev, pf, fw_name); } /** -- cgit v1.2.3-55-g7522 From fb5e7606b100668293f7b0c4a719f7a73233cbb1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Wed, 26 Jul 2017 12:05:38 -0700 Subject: net: phy: Remove stale comments referencing timer Since commit a390d1f379cf ("phylib: convert state_queue work to delayed_work"), the PHYLIB state machine was converted to use delayed workqueues, yet some functions were still referencing the PHY library timer in their comments, fix that and remove the now unused linux/timer.h include. Signed-off-by: Florian Fainelli Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d0626bf5c540..ac1dcf0289fa 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -705,8 +704,8 @@ EXPORT_SYMBOL(phy_start_aneg); * * Description: The PHY infrastructure can run a state machine * which tracks whether the PHY is starting up, negotiating, - * etc. This function starts the timer which tracks the state - * of the PHY. If you want to maintain your own state machine, + * etc. This function starts the delayed workqueue which tracks + * the state of the PHY. If you want to maintain your own state machine, * do not call this function. */ void phy_start_machine(struct phy_device *phydev) @@ -737,9 +736,9 @@ void phy_trigger_machine(struct phy_device *phydev, bool sync) * phy_stop_machine - stop the PHY state machine tracking * @phydev: target phy_device struct * - * Description: Stops the state machine timer, sets the state to UP - * (unless it wasn't up yet). This function must be called BEFORE - * phy_detach. + * Description: Stops the state machine delayed workqueue, sets the + * state to UP (unless it wasn't up yet). This function must be + * called BEFORE phy_detach. */ void phy_stop_machine(struct phy_device *phydev) { -- cgit v1.2.3-55-g7522 From ade0a79ab11dc18f9ef377472e262ec13597d7ca Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Wed, 26 Jul 2017 12:10:48 -0700 Subject: liquidio: standardization: use min_t instead of custom macro Signed-off-by: Rick Farrington Signed-off-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_console.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index e08f7600f986..501ad95171fe 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -42,7 +42,6 @@ module_param(console_bitmask, int, 0644); MODULE_PARM_DESC(console_bitmask, "Bitmask indicating which consoles have debug output redirected to syslog."); -#define MIN(a, b) min((a), (b)) #define CAST_ULL(v) ((u64)(v)) #define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008 @@ -704,7 +703,7 @@ static int octeon_console_read(struct octeon_device *oct, u32 console_num, if (bytes_to_read <= 0) return bytes_to_read; - bytes_to_read = MIN(bytes_to_read, (s32)buf_size); + bytes_to_read = min_t(s32, bytes_to_read, buf_size); /* Check to see if what we want to read is not contiguous, and limit * ourselves to the contiguous block -- cgit v1.2.3-55-g7522 From 19d5c35950ad222e7f0cb076b428f0888f3f15f4 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Wed, 26 Jul 2017 12:11:09 -0700 Subject: liquidio: cleanup: removed cryptic and misleading macro Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_console.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 501ad95171fe..15ad1ab2c0c7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -42,8 +42,6 @@ module_param(console_bitmask, int, 0644); MODULE_PARM_DESC(console_bitmask, "Bitmask indicating which consoles have debug output redirected to syslog."); -#define CAST_ULL(v) ((u64)(v)) - #define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008 #define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004 #define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000 @@ -233,7 +231,7 @@ static int __cvmx_bootmem_check_version(struct octeon_device *oct, (exact_match && major_version != exact_match)) { dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n", major_version, minor_version, - CAST_ULL(oct->bootmem_desc_addr)); + (long long)oct->bootmem_desc_addr); return -1; } else { return 0; -- cgit v1.2.3-55-g7522 From e0d0ae8a4304ce6be488570d085bef278fe941dd Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Thu, 6 Jul 2017 20:07:04 +0530 Subject: rsi: use BUILD_BUG_ON check for fsm_state Whenever new fsm_state enum element is added, fsm_state array also needs to be updated. If this change is missed, we may end up doing invalid access in array. BUILD_BUG_ON check will help to avoid this problem. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_debugfs.c | 2 ++ drivers/net/wireless/rsi/rsi_main.h | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index 4c0a493bd44e..f3b91b656351 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -138,6 +138,8 @@ static int rsi_stats_read(struct seq_file *seq, void *data) seq_puts(seq, "==> RSI STA DRIVER STATUS <==\n"); seq_puts(seq, "DRIVER_FSM_STATE: "); + BUILD_BUG_ON(ARRAY_SIZE(fsm_state) != NUM_FSM_STATES); + if (common->fsm_state <= FSM_MAC_INIT_DONE) seq_printf(seq, "%s", fsm_state[common->fsm_state]); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index f3985250b593..72675ebcd454 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -40,7 +40,9 @@ enum RSI_FSM_STATES { FSM_RESET_MAC_SENT, FSM_RADIO_CAPS_SENT, FSM_BB_RF_PROG_SENT, - FSM_MAC_INIT_DONE + FSM_MAC_INIT_DONE, + + NUM_FSM_STATES }; extern u32 rsi_zone_enabled; -- cgit v1.2.3-55-g7522 From 09cfb41f35799af91372105a40c0d173e4fef6e5 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Thu, 6 Jul 2017 20:07:05 +0530 Subject: rsi: changes in eeprom read frame EEPROM read frame is sent during device initialization to read mac address. The format of the frame is modified in firmware to include eeprom length and offset. This frame does not return firmware version now. Also same frame is sent again to read rf type and band information. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_debugfs.c | 1 + drivers/net/wireless/rsi/rsi_91x_mgmt.c | 90 ++++++++++++++++++++---------- drivers/net/wireless/rsi/rsi_main.h | 16 ++++++ drivers/net/wireless/rsi/rsi_mgmt.h | 19 ++++++- 4 files changed, 96 insertions(+), 30 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c index f3b91b656351..e98eb55c26cc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c +++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c @@ -130,6 +130,7 @@ static int rsi_stats_read(struct seq_file *seq, void *data) "FSM_COMMON_DEV_PARAMS_SENT", "FSM_BOOT_PARAMS_SENT", "FSM_EEPROM_READ_MAC_ADDR", + "FSM_EEPROM_READ_RF_TYPE", "FSM_RESET_MAC_SENT", "FSM_RADIO_CAPS_SENT", "FSM_BB_RF_PROG_SENT", diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index d4d365b5d2d6..ebd1e5647f03 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1276,7 +1276,8 @@ void rsi_inform_bss_status(struct rsi_common *common, */ static int rsi_eeprom_read(struct rsi_common *common) { - struct rsi_mac_frame *mgmt_frame; + struct rsi_eeprom_read_frame *mgmt_frame; + struct rsi_hw *adapter = common->priv; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending EEPROM read req frame\n", __func__); @@ -1289,18 +1290,21 @@ static int rsi_eeprom_read(struct rsi_common *common) } memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + mgmt_frame = (struct rsi_eeprom_read_frame *)skb->data; /* FrameType */ - mgmt_frame->desc_word[1] = cpu_to_le16(EEPROM_READ_TYPE); - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + rsi_set_len_qno(&mgmt_frame->len_qno, 0, RSI_WIFI_MGMT_Q); + mgmt_frame->pkt_type = EEPROM_READ; + /* Number of bytes to read */ - mgmt_frame->desc_word[3] = cpu_to_le16(ETH_ALEN + - WLAN_MAC_MAGIC_WORD_LEN + - WLAN_HOST_MODE_LEN + - WLAN_FW_VERSION_LEN); + mgmt_frame->pkt_info = + cpu_to_le32((adapter->eeprom.length << RSI_EEPROM_LEN_OFFSET) & + RSI_EEPROM_LEN_MASK); + mgmt_frame->pkt_info |= cpu_to_le32((3 << RSI_EEPROM_HDR_SIZE_OFFSET) & + RSI_EEPROM_HDR_SIZE_MASK); + /* Address to read */ - mgmt_frame->desc_word[4] = cpu_to_le16(WLAN_MAC_EEPROM_ADDR); + mgmt_frame->eeprom_offset = cpu_to_le32(adapter->eeprom.offset); skb_put(skb, FRAME_DESC_SZ); @@ -1426,19 +1430,25 @@ int rsi_set_antenna(struct rsi_common *common, u8 antenna) static int rsi_handle_ta_confirm_type(struct rsi_common *common, u8 *msg) { + struct rsi_hw *adapter = common->priv; u8 sub_type = (msg[15] & 0xff); + u16 msg_len = ((u16 *)msg)[0] & 0xfff; + u8 offset; switch (sub_type) { case BOOTUP_PARAMS_REQUEST: rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n", __func__); if (common->fsm_state == FSM_BOOT_PARAMS_SENT) { + adapter->eeprom.length = (IEEE80211_ADDR_LEN + + WLAN_MAC_MAGIC_WORD_LEN + + WLAN_HOST_MODE_LEN); + adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR; if (rsi_eeprom_read(common)) { common->fsm_state = FSM_CARD_NOT_READY; goto out; - } else { - common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } + common->fsm_state = FSM_EEPROM_READ_MAC_ADDR; } else { rsi_dbg(INFO_ZONE, "%s: Received bootup params cfm in %d state\n", @@ -1447,30 +1457,52 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, } break; - case EEPROM_READ_TYPE: + case EEPROM_READ: + rsi_dbg(FSM_ZONE, "EEPROM READ confirm received\n"); + if (msg_len <= 0) { + rsi_dbg(FSM_ZONE, + "%s: [EEPROM_READ] Invalid len %d\n", + __func__, msg_len); + goto out; + } + if (msg[16] != MAGIC_WORD) { + rsi_dbg(FSM_ZONE, + "%s: [EEPROM_READ] Invalid token\n", __func__); + common->fsm_state = FSM_CARD_NOT_READY; + goto out; + } if (common->fsm_state == FSM_EEPROM_READ_MAC_ADDR) { - if (msg[16] == MAGIC_WORD) { - u8 offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN - + WLAN_MAC_MAGIC_WORD_LEN); - memcpy(common->mac_addr, - &msg[offset], - ETH_ALEN); - memcpy(&common->fw_ver, - &msg[offset + ETH_ALEN], - sizeof(struct version_info)); - - } else { + offset = (FRAME_DESC_SZ + WLAN_HOST_MODE_LEN + + WLAN_MAC_MAGIC_WORD_LEN); + memcpy(common->mac_addr, &msg[offset], ETH_ALEN); + adapter->eeprom.length = + ((WLAN_MAC_MAGIC_WORD_LEN + 3) & (~3)); + adapter->eeprom.offset = WLAN_EEPROM_RFTYPE_ADDR; + if (rsi_eeprom_read(common)) { + rsi_dbg(ERR_ZONE, + "%s: Failed reading RF band\n", + __func__); common->fsm_state = FSM_CARD_NOT_READY; - break; + goto out; + } + common->fsm_state = FSM_EEPROM_READ_RF_TYPE; + } else if (common->fsm_state == FSM_EEPROM_READ_RF_TYPE) { + if ((msg[17] & 0x3) == 0x3) { + rsi_dbg(INIT_ZONE, "Dual band supported\n"); + common->band = NL80211_BAND_5GHZ; + common->num_supp_bands = 2; + } else if ((msg[17] & 0x3) == 0x1) { + rsi_dbg(INIT_ZONE, + "Only 2.4Ghz band supported\n"); + common->band = NL80211_BAND_2GHZ; + common->num_supp_bands = 1; } if (rsi_send_reset_mac(common)) goto out; - else - common->fsm_state = FSM_RESET_MAC_SENT; + common->fsm_state = FSM_RESET_MAC_SENT; } else { - rsi_dbg(ERR_ZONE, - "%s: Received eeprom mac addr in %d state\n", - __func__, common->fsm_state); + rsi_dbg(ERR_ZONE, "%s: Invalid EEPROM read type\n", + __func__); return 0; } break; diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 72675ebcd454..cbf29c36f4f4 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -37,6 +37,7 @@ enum RSI_FSM_STATES { FSM_COMMON_DEV_PARAMS_SENT, FSM_BOOT_PARAMS_SENT, FSM_EEPROM_READ_MAC_ADDR, + FSM_EEPROM_READ_RF_TYPE, FSM_RESET_MAC_SENT, FSM_RADIO_CAPS_SENT, FSM_BB_RF_PROG_SENT, @@ -177,6 +178,7 @@ struct rsi_common { /* Channel/band related */ u8 band; + u8 num_supp_bands; u8 channel_width; u16 rts_threshold; @@ -230,6 +232,19 @@ enum host_intf { RSI_HOST_INTF_USB }; +struct eepromrw_info { + u32 offset; + u32 length; + u8 write; + u16 eeprom_erase; + u8 data[480]; +}; + +struct eeprom_read { + u16 length; + u16 off_set; +}; + struct rsi_hw { struct rsi_common *priv; u8 device_model; @@ -252,6 +267,7 @@ struct rsi_hw { struct timer_list bl_cmd_timer; bool blcmd_timer_expired; u32 flash_capacity; + struct eepromrw_info eeprom; u8 dfs_region; void *rsi_dev; struct rsi_host_intf_ops *host_intf_ops; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index dcb6db728cbd..47926c9f2027 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -43,6 +43,7 @@ #define WLAN_HOST_MODE_LEN 0x04 #define WLAN_FW_VERSION_LEN 0x08 #define MAGIC_WORD 0x5A +#define WLAN_EEPROM_RFTYPE_ADDR 424 /* Receive Frame Types */ #define TA_CONFIRM_TYPE 0x01 @@ -192,7 +193,7 @@ enum cmd_frame_type { AUTO_RATE_IND, BOOTUP_PARAMS_REQUEST, VAP_CAPABILITIES, - EEPROM_READ_TYPE , + EEPROM_READ, EEPROM_WRITE, GPIO_PIN_CONFIG , SET_RX_FILTER, @@ -353,6 +354,22 @@ struct rsi_config_vals { u8 reserved2[16]; } __packed; +/* Packet info flags */ +#define RSI_EEPROM_HDR_SIZE_OFFSET 8 +#define RSI_EEPROM_HDR_SIZE_MASK 0x300 +#define RSI_EEPROM_LEN_OFFSET 20 +#define RSI_EEPROM_LEN_MASK 0xFFF00000 + +struct rsi_eeprom_read_frame { + __le16 len_qno; + u8 pkt_type; + u8 misc_flags; + __le32 pkt_info; + __le32 eeprom_offset; + __le16 delay_ms; + __le16 reserved3; +} __packed; + static inline u32 rsi_get_queueno(u8 *addr, u16 offset) { return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12; -- cgit v1.2.3-55-g7522 From f95bbd979df76319f8f87b96049efa41092d99cc Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:06 +0530 Subject: rsi: fix sdio card reset problem card reset is not working with recent kernels. Using host->card->ocr instead of host->ocr_avail solved the problem. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index e5ea99bb2dd8..b5ac50394e2b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -219,26 +219,18 @@ static void rsi_reset_card(struct sdio_func *pfunction) if (err) rsi_dbg(ERR_ZONE, "%s: CMD0 failed : %d\n", __func__, err); - if (!host->ocr_avail) { - /* Issue CMD5, arg = 0 */ - err = rsi_issue_sdiocommand(pfunction, - SD_IO_SEND_OP_COND, - 0, - (MMC_RSP_R4 | MMC_CMD_BCR), - &resp); - if (err) - rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", - __func__, err); - host->ocr_avail = resp; - } + /* Issue CMD5, arg = 0 */ + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, 0, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); + if (err) + rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); + card->ocr = resp; /* Issue CMD5, arg = ocr. Wait till card is ready */ for (i = 0; i < 100; i++) { - err = rsi_issue_sdiocommand(pfunction, - SD_IO_SEND_OP_COND, - host->ocr_avail, - (MMC_RSP_R4 | MMC_CMD_BCR), - &resp); + err = rsi_issue_sdiocommand(pfunction, SD_IO_SEND_OP_COND, + card->ocr, + (MMC_RSP_R4 | MMC_CMD_BCR), &resp); if (err) { rsi_dbg(ERR_ZONE, "%s: CMD5 failed : %d\n", __func__, err); -- cgit v1.2.3-55-g7522 From 49ddac0d4b804aed1bba4ffdc7ed20815430cd70 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:07 +0530 Subject: rsi: chip reset for SDIO interface We need to reset the chip in teardown path so that it can work next time when driver is loaded. This patch adds support for this reset configuration for SDIO. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 80 ++++++++++++++++++++++++++++++++- drivers/net/wireless/rsi/rsi_hal.h | 33 ++++++++++++++ drivers/net/wireless/rsi/rsi_sdio.h | 1 + 3 files changed, 113 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index b5ac50394e2b..ebfd29cafbbe 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -933,6 +933,84 @@ fail: return 1; } +static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, + u16 len_in_bits) +{ + rsi_sdio_master_reg_write(adapter, RSI_GSPI_DATA_REG1, + ((addr << 6) | ((data >> 16) & 0xffff)), 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_DATA_REG0, + (data & 0xffff), 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_CTRL_REG0, + RSI_GSPI_CTRL_REG0_VALUE, 2); + rsi_sdio_master_reg_write(adapter, RSI_GSPI_CTRL_REG1, + ((len_in_bits - 1) | RSI_GSPI_TRIG), 2); + msleep(20); +} + +/*This function resets and re-initializes the chip.*/ +static void rsi_reset_chip(struct rsi_hw *adapter) +{ + __le32 data; + u8 sdio_interrupt_status = 0; + u8 request = 1; + int ret; + + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); + ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); + if (ret < 0) { + rsi_dbg(ERR_ZONE, + "%s: Failed to write SDIO wakeup register\n", __func__); + return; + } + msleep(20); + ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, + &sdio_interrupt_status); + if (ret < 0) { + rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", + __func__); + return; + } + rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", + __func__, sdio_interrupt_status); + + /* Put Thread-Arch processor on hold */ + if (rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR)) { + rsi_dbg(ERR_ZONE, + "%s: Unable to set ms word to common reg\n", + __func__); + return; + } + + data = TA_HOLD_THREAD_VALUE; + if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | + RSI_SD_REQUEST_MASTER, + (u8 *)&data, 4)) { + rsi_dbg(ERR_ZONE, + "%s: Unable to hold Thread-Arch processor threads\n", + __func__); + return; + } + + /* This msleep will ensure Thread-Arch processor to go to hold + * and any pending dma transfers to rf spi in device to finish. + */ + msleep(100); + + ulp_read_write(adapter, RSI_ULP_RESET_REG, RSI_ULP_WRITE_0, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, RSI_ULP_WRITE_0, 32); + ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, RSI_ULP_WRITE_50, + 32); + ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, RSI_ULP_WRITE_0, + 32); + ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE, + RSI_ULP_TIMER_ENABLE, 32); + /* This msleep will be sufficient for the ulp + * read write operations to complete for chip reset. + */ + msleep(500); +} + /** * rsi_disconnect() - This function performs the reverse of the probe function. * @pfunction: Pointer to the sdio_func structure. @@ -956,7 +1034,7 @@ static void rsi_disconnect(struct sdio_func *pfunction) sdio_release_irq(pfunction); sdio_disable_func(pfunction); rsi_91x_deinit(adapter); - /* Resetting to take care of the case, where-in driver is re-loaded */ + rsi_reset_chip(adapter); rsi_reset_card(pfunction); sdio_release_host(pfunction); } diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 902dc540849c..3179e8606b7e 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -52,6 +52,39 @@ #define FW_LOADING_SUCCESSFUL 'S' #define LOADING_INITIATED '1' +#define RSI_ULP_RESET_REG 0x161 +#define RSI_WATCH_DOG_TIMER_1 0x16c +#define RSI_WATCH_DOG_TIMER_2 0x16d +#define RSI_WATCH_DOG_DELAY_TIMER_1 0x16e +#define RSI_WATCH_DOG_DELAY_TIMER_2 0x16f +#define RSI_WATCH_DOG_TIMER_ENABLE 0x170 + +#define RSI_ULP_WRITE_0 00 +#define RSI_ULP_WRITE_2 02 +#define RSI_ULP_WRITE_50 50 + +#define RSI_RESTART_WDT BIT(11) +#define RSI_BYPASS_ULP_ON_WDT BIT(1) + +#define RSI_ULP_TIMER_ENABLE ((0xaa000) | RSI_RESTART_WDT | \ + RSI_BYPASS_ULP_ON_WDT) +#define RSI_RF_SPI_PROG_REG_BASE_ADDR 0x40080000 + +#define RSI_GSPI_CTRL_REG0 (RSI_RF_SPI_PROG_REG_BASE_ADDR) +#define RSI_GSPI_CTRL_REG1 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x2) +#define RSI_GSPI_DATA_REG0 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x4) +#define RSI_GSPI_DATA_REG1 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x6) +#define RSI_GSPI_DATA_REG2 (RSI_RF_SPI_PROG_REG_BASE_ADDR + 0x8) + +#define RSI_GSPI_CTRL_REG0_VALUE 0x340 + +#define RSI_GSPI_DMA_MODE BIT(13) + +#define RSI_GSPI_2_ULP BIT(12) +#define RSI_GSPI_TRIG BIT(7) +#define RSI_GSPI_READ BIT(6) +#define RSI_GSPI_RF_SPI_ACTIVE BIT(8) + /* Boot loader commands */ #define SEND_RPS_FILE '2' diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 9fb73f68282a..f11f8189e0b6 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -58,6 +58,7 @@ enum sdio_interrupt_type { #define SDIO_READ_START_LVL 0x000FC #define SDIO_READ_FIFO_CTL 0x000FD #define SDIO_WRITE_FIFO_CTL 0x000FE +#define SDIO_WAKEUP_REG 0x000FF #define SDIO_FUN1_INTR_CLR_REG 0x0008 #define SDIO_REG_HIGH_SPEED 0x0013 -- cgit v1.2.3-55-g7522 From f746606a51edf85de3a11f545e506be8f4dba219 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:08 +0530 Subject: rsi: correct SDIO disconnect path handling Sometimes it's observed that we get interrupt/Rx frame when device is already detached from mac80211. In this case couple of error messages are displayed in dmesg log. This patch corrects the order so that disconnection will happen cleanly Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index ebfd29cafbbe..f1ba8ac3783d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -1026,17 +1026,26 @@ static void rsi_disconnect(struct sdio_func *pfunction) return; dev = (struct rsi_91x_sdiodev *)adapter->rsi_dev; + sdio_claim_host(pfunction); + sdio_release_irq(pfunction); + sdio_release_host(pfunction); + mdelay(10); - dev->write_fail = 2; rsi_mac80211_detach(adapter); + mdelay(10); - sdio_claim_host(pfunction); - sdio_release_irq(pfunction); - sdio_disable_func(pfunction); - rsi_91x_deinit(adapter); + /* Reset Chip */ rsi_reset_chip(adapter); + + /* Resetting to take care of the case, where-in driver is re-loaded */ + sdio_claim_host(pfunction); rsi_reset_card(pfunction); + sdio_disable_func(pfunction); sdio_release_host(pfunction); + dev->write_fail = 2; + rsi_91x_deinit(adapter); + rsi_dbg(ERR_ZONE, "##### RSI SDIO device disconnected #####\n"); + } #ifdef CONFIG_PM -- cgit v1.2.3-55-g7522 From ea0676c470852dea84ea74658e8ddee36ebc685a Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:09 +0530 Subject: rsi: card reset for USB interface We need to reset the chip in teardown path so that it can work next time when driver is loaded. This patch adds support for this reset configuration for USB. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 72 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/rsi/rsi_usb.h | 1 + 2 files changed, 73 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index bcd7f454ef30..1d7bb9d9d09b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -558,6 +558,77 @@ fail_tx: return status; } +static int usb_ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, + u16 len_in_bits) +{ + int ret; + + ret = rsi_usb_master_reg_write + (adapter, RSI_GSPI_DATA_REG1, + ((addr << 6) | ((data >> 16) & 0xffff)), 2); + if (ret < 0) + return ret; + + ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_DATA_REG0, + (data & 0xffff), 2); + if (ret < 0) + return ret; + + /* Initializing GSPI for ULP read/writes */ + rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG0, + RSI_GSPI_CTRL_REG0_VALUE, 2); + + ret = rsi_usb_master_reg_write(adapter, RSI_GSPI_CTRL_REG1, + ((len_in_bits - 1) | RSI_GSPI_TRIG), 2); + if (ret < 0) + return ret; + + msleep(20); + + return 0; +} + +static int rsi_reset_card(struct rsi_hw *adapter) +{ + int ret; + + rsi_dbg(INFO_ZONE, "Resetting Card...\n"); + rsi_usb_master_reg_write(adapter, RSI_TA_HOLD_REG, 0xE, 4); + + /* This msleep will ensure Thread-Arch processor to go to hold + * and any pending dma transfers to rf in device to finish. + */ + msleep(100); + + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, + RSI_ULP_WRITE_2, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, + RSI_ULP_WRITE_0, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, + RSI_ULP_WRITE_50, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, + RSI_ULP_WRITE_0, 32); + if (ret < 0) + goto fail; + ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE, + RSI_ULP_TIMER_ENABLE, 32); + if (ret < 0) + goto fail; + + rsi_dbg(INFO_ZONE, "Reset card done\n"); + return ret; + +fail: + rsi_dbg(ERR_ZONE, "Reset card failed\n"); + return ret; +} + /** * rsi_probe() - This function is called by kernel when the driver provided * Vendor and device IDs are matched. All the initialization @@ -641,6 +712,7 @@ static void rsi_disconnect(struct usb_interface *pfunction) return; rsi_mac80211_detach(adapter); + rsi_reset_card(adapter); rsi_deinit_usb_interface(adapter); rsi_91x_deinit(adapter); diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 59513ac61fb3..2c5c0e7a979a 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -25,6 +25,7 @@ #define USB_INTERNAL_REG_1 0x25000 #define RSI_USB_READY_MAGIC_NUM 0xab #define FW_STATUS_REG 0x41050012 +#define RSI_TA_HOLD_REG 0x22000844 #define USB_VENDOR_REGISTER_READ 0x15 #define USB_VENDOR_REGISTER_WRITE 0x16 -- cgit v1.2.3-55-g7522 From ed833be6faa00168466c37e763dee43895b2623b Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:10 +0530 Subject: rsi: USB tx headroom cleanup USB headroom is added while submitting the data to URB as per firmware's requirement. This logic is moved to rsi_usb_card_write() so that caller need not worry about it. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 41 +++++++++++++++++----------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 1d7bb9d9d09b..25f619506b58 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -29,19 +29,24 @@ * Return: status: 0 on success, a negative error code on failure. */ static int rsi_usb_card_write(struct rsi_hw *adapter, - void *buf, + u8 *buf, u16 len, u8 endpoint) { struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; int status; - s32 transfer; + u8 *seg = dev->tx_buffer; + int transfer; + int ep = dev->bulkout_endpoint_addr[endpoint - 1]; + memset(seg, 0, len + RSI_USB_TX_HEAD_ROOM); + memcpy(seg + RSI_USB_TX_HEAD_ROOM, buf, len); + len += RSI_USB_TX_HEAD_ROOM; + transfer = len; status = usb_bulk_msg(dev->usbdev, - usb_sndbulkpipe(dev->usbdev, - dev->bulkout_endpoint_addr[endpoint - 1]), - buf, - len, + usb_sndbulkpipe(dev->usbdev, ep), + (void *)seg, + (int)len, &transfer, HZ * 5); @@ -68,23 +73,19 @@ static int rsi_write_multiple(struct rsi_hw *adapter, u8 *data, u32 count) { - struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)adapter->rsi_dev; - u8 *seg = dev->tx_buffer; + struct rsi_91x_usbdev *dev = + (struct rsi_91x_usbdev *)adapter->rsi_dev; - if (dev->write_fail) - return 0; + if (!adapter) + return -ENODEV; - if (endpoint == MGMT_EP) { - memset(seg, 0, RSI_USB_TX_HEAD_ROOM); - memcpy(seg + RSI_USB_TX_HEAD_ROOM, data, count); - } else { - seg = ((u8 *)data - RSI_USB_TX_HEAD_ROOM); - } + if (endpoint == 0) + return -EINVAL; + + if (dev->write_fail) + return -ENETDOWN; - return rsi_usb_card_write(adapter, - seg, - count + RSI_USB_TX_HEAD_ROOM, - endpoint); + return rsi_usb_card_write(adapter, data, count, endpoint); } /** -- cgit v1.2.3-55-g7522 From ac6107caa0d871e59a63f69e3617e47f1ed15c01 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Thu, 6 Jul 2017 20:07:11 +0530 Subject: rsi: correct the logic of deriving queue number Maximum valid queue number is 0x5. So anding with 0x7 should be ok here. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 25f619506b58..dc0a0b2e9afc 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -381,7 +381,7 @@ static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter, u8 *pkt, u32 len) { - u32 queueno = ((pkt[1] >> 4) & 0xf); + u32 queueno = ((pkt[1] >> 4) & 0x7); u8 endpoint; endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP); -- cgit v1.2.3-55-g7522 From 5d16a1c1ae1193fdc8379fc4fb0b49b8ff9386db Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:12 +0530 Subject: rsi: rename USB endpoint macros These endpoints are for WLAN and BT protocols. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 2 +- drivers/net/wireless/rsi/rsi_usb.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index dc0a0b2e9afc..99967ce94a39 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -384,7 +384,7 @@ static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter, u32 queueno = ((pkt[1] >> 4) & 0x7); u8 endpoint; - endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? MGMT_EP : DATA_EP); + endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? WLAN_EP : BT_EP); return rsi_write_multiple(adapter, endpoint, diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 2c5c0e7a979a..3babf81f5a39 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -33,8 +33,8 @@ #define MAX_RX_URBS 1 #define MAX_BULK_EP 8 -#define MGMT_EP 1 -#define DATA_EP 2 +#define WLAN_EP 1 +#define BT_EP 2 #define RSI_USB_BUF_SIZE 4096 -- cgit v1.2.3-55-g7522 From d1f69e418f663e67472580aecceff755ec0b5914 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:13 +0530 Subject: rsi: choose correct endpoint based on queue. Till now only management packets were handled. Let's enhance the logic for choosing endpoint to accommodate other packets. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 99967ce94a39..3febf24d619b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -384,7 +384,8 @@ static int rsi_usb_host_intf_write_pkt(struct rsi_hw *adapter, u32 queueno = ((pkt[1] >> 4) & 0x7); u8 endpoint; - endpoint = ((queueno == RSI_WIFI_MGMT_Q) ? WLAN_EP : BT_EP); + endpoint = ((queueno == RSI_WIFI_MGMT_Q || queueno == RSI_WIFI_DATA_Q || + queueno == RSI_COEX_Q) ? WLAN_EP : BT_EP); return rsi_write_multiple(adapter, endpoint, -- cgit v1.2.3-55-g7522 From 015240018b0a6c4b9e394055a4fc7d27bdd2e104 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:14 +0530 Subject: rsi: set immediate wakeup bit immediate wakeup bit is set while sending internal management frame to the firmware. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index c2303599c12e..7c9224fa53a3 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -149,6 +149,7 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, extnd_size = ((uintptr_t)skb->data & 0x3); if (tx_params->flags & INTERNAL_MGMT_PKT) { + skb->data[1] |= BIT(7); /* Immediate Wakeup bit*/ if ((extnd_size) > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); dev_kfree_skb(skb); -- cgit v1.2.3-55-g7522 From de2dea16ec9f6b483c13f9e1709821c76699fb6b Mon Sep 17 00:00:00 2001 From: Pavani Muthyala Date: Thu, 6 Jul 2017 20:07:15 +0530 Subject: rsi: management frame descriptor preparation cleanup Currently this descriptor is prepared with the help of __le16 pointer. This patch makes use of a structure to prepare the descriptor in a cleaner way. Signed-off-by: Pavani Muthyala Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 37 +++++++++++++++++----------------- drivers/net/wireless/rsi/rsi_hal.h | 17 ++++++++++++++++ 2 files changed, 35 insertions(+), 19 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 7c9224fa53a3..9eaa0a265c77 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -138,9 +138,9 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, struct ieee80211_bss_conf *bss; struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; + struct rsi_mgmt_desc *mgmt_desc; struct skb_info *tx_params; int status = -E2BIG; - __le16 *msg; u8 extnd_size; u8 vap_id = 0; @@ -176,44 +176,43 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, skb_push(skb, FRAME_DESC_SZ); memset(skb->data, 0, FRAME_DESC_SZ); - msg = (__le16 *)skb->data; + mgmt_desc = (struct rsi_mgmt_desc *)skb->data; if (skb->len > MAX_MGMT_PKT_SIZE) { rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); goto err; } - msg[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - msg[1] = cpu_to_le16(TX_DOT11_MGMT); - msg[2] = cpu_to_le16(MIN_802_11_HDR_LEN << 8); - msg[3] = cpu_to_le16(RATE_INFO_ENABLE); - msg[6] = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4); + rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + mgmt_desc->frame_type = TX_DOT11_MGMT; + mgmt_desc->header_len = MIN_802_11_HDR_LEN; + mgmt_desc->info_cap |= cpu_to_le16(RATE_INFO_ENABLE); + mgmt_desc->seq_ctrl = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4); if (wh->addr1[0] & BIT(0)) - msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT); + mgmt_desc->info_cap |= cpu_to_le16(RSI_BROADCAST_PKT); if (common->band == NL80211_BAND_2GHZ) - msg[4] = cpu_to_le16(RSI_11B_MODE); + mgmt_desc->rate_info = RSI_11B_MODE; else - msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE); + mgmt_desc->rate_info = (RSI_RATE_6 & 0x0f) | RSI_11G_MODE; if (conf_is_ht40(conf)) { - msg[4] = cpu_to_le16(0xB | RSI_11G_MODE); - msg[5] = cpu_to_le16(0x6); + mgmt_desc->rate_info = 0xB | RSI_11G_MODE; + mgmt_desc->bbp_info = BBP_INFO_40MHZ; } /* Indicate to firmware to give cfm */ if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) { - msg[1] |= cpu_to_le16(BIT(10)); - msg[7] = cpu_to_le16(PROBEREQ_CONFIRM); + mgmt_desc->misc_flags |= BIT(2); + mgmt_desc->cfm_frame_type = PROBEREQ_CONFIRM; common->mgmt_q_block = true; } + mgmt_desc->vap_info = vap_id << 8; - msg[7] |= cpu_to_le16(vap_id << 8); - - status = adapter->host_intf_ops->write_pkt(common->priv, (u8 *)msg, - skb->len); + status = adapter->host_intf_ops->write_pkt(common->priv, + (u8 *)mgmt_desc, skb->len); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__); diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 3179e8606b7e..da115dd11bdd 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -99,6 +99,8 @@ #define RSI_DEV_OPMODE_WIFI_ALONE 1 #define RSI_DEV_COEX_MODE_WIFI_ALONE 1 +#define BBP_INFO_40MHZ 0x6 + struct bl_header { __le32 flags; __le32 image_no; @@ -112,6 +114,21 @@ struct ta_metadata { unsigned int address; }; +struct rsi_mgmt_desc { + __le16 len_qno; + u8 frame_type; + u8 misc_flags; + u8 reserved1; + u8 header_len; + __le16 info_cap; + u8 rate_info; + u8 reserved2; + u16 bbp_info; + __le16 seq_ctrl; + u8 cfm_frame_type; + u8 vap_info; +} __packed; + int rsi_hal_device_init(struct rsi_hw *adapter); #endif -- cgit v1.2.3-55-g7522 From 6507de6df9007b24d843287a6feba10c1dafffd6 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Thu, 6 Jul 2017 20:07:16 +0530 Subject: rsi: separate function for management packet descriptor Management descriptor preparation is move to a separate function as it will be called from a different context in upcoming patches. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 132 +++++++++++++++++++++------------ drivers/net/wireless/rsi/rsi_hal.h | 10 +-- drivers/net/wireless/rsi/rsi_main.h | 7 ++ drivers/net/wireless/rsi/rsi_mgmt.h | 1 + 4 files changed, 98 insertions(+), 52 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 9eaa0a265c77..f8ccb1cb7c75 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -25,6 +25,89 @@ static struct ta_metadata metadata_flash_content[] = { {"rsi/rs9113_wlan_qspi.rps", 0x00010000}, }; +/*This function prepares descriptor for given management packet*/ + +static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = common->priv; + struct ieee80211_hdr *wh = NULL; + struct ieee80211_tx_info *info; + struct ieee80211_conf *conf = &adapter->hw->conf; + struct ieee80211_vif *vif = NULL; + struct rsi_mgmt_desc *mgmt_desc; + struct skb_info *tx_params; + struct ieee80211_bss_conf *bss = NULL; + struct xtended_desc *xtend_desc = NULL; + u8 header_size; + u32 dword_align_bytes = 0; + + info = IEEE80211_SKB_CB(skb); + tx_params = (struct skb_info *)info->driver_data; + + /* Update header size */ + header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to add extended descriptor\n", + __func__); + return -ENOSPC; + } + skb_push(skb, header_size); + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (dword_align_bytes > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to add dword align\n", __func__); + return -ENOSPC; + } + skb_push(skb, dword_align_bytes); + header_size += dword_align_bytes; + + tx_params->internal_hdr_size = header_size; + memset(&skb->data[0], 0, header_size); + bss = &info->control.vif->bss_conf; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + vif = adapter->vifs[0]; + + mgmt_desc = (struct rsi_mgmt_desc *)skb->data; + xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + + if (skb->len > MAX_MGMT_PKT_SIZE) { + rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); + return -EINVAL; + } + rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + mgmt_desc->frame_type = TX_DOT11_MGMT; + mgmt_desc->header_len = MIN_802_11_HDR_LEN; + mgmt_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; + mgmt_desc->frame_info |= cpu_to_le16(RATE_INFO_ENABLE); + if (is_broadcast_ether_addr(wh->addr1)) + mgmt_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); + + mgmt_desc->seq_ctrl = + cpu_to_le16(IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl))); + if (common->band == NL80211_BAND_2GHZ) + mgmt_desc->rate_info = RSI_RATE_1; + else + mgmt_desc->rate_info = RSI_RATE_6; + + if (conf_is_ht40(conf)) + mgmt_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); + + if (ieee80211_is_probe_req(wh->frame_control)) { + if (!bss->assoc) { + rsi_dbg(INFO_ZONE, + "%s: blocking mgmt queue\n", __func__); + mgmt_desc->misc_flags = RSI_DESC_REQUIRE_CFM_TO_HOST; + xtend_desc->confirm_frame_type = PROBEREQ_CONFIRM; + common->mgmt_q_block = true; + rsi_dbg(INFO_ZONE, "Mgmt queue blocked\n"); + } + } + + return 0; +} + /** * rsi_send_data_pkt() - This function sends the recieved data packet from * driver to device. @@ -133,16 +216,10 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *wh; struct ieee80211_tx_info *info; - struct ieee80211_bss_conf *bss; - struct ieee80211_hw *hw = adapter->hw; - struct ieee80211_conf *conf = &hw->conf; - struct rsi_mgmt_desc *mgmt_desc; struct skb_info *tx_params; int status = -E2BIG; u8 extnd_size; - u8 vap_id = 0; info = IEEE80211_SKB_CB(skb); tx_params = (struct skb_info *)info->driver_data; @@ -168,51 +245,12 @@ int rsi_send_mgmt_pkt(struct rsi_common *common, return status; } - bss = &info->control.vif->bss_conf; - wh = (struct ieee80211_hdr *)&skb->data[0]; - if (FRAME_DESC_SZ > skb_headroom(skb)) goto err; - skb_push(skb, FRAME_DESC_SZ); - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_desc = (struct rsi_mgmt_desc *)skb->data; - - if (skb->len > MAX_MGMT_PKT_SIZE) { - rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); - goto err; - } - - rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), - RSI_WIFI_MGMT_Q); - mgmt_desc->frame_type = TX_DOT11_MGMT; - mgmt_desc->header_len = MIN_802_11_HDR_LEN; - mgmt_desc->info_cap |= cpu_to_le16(RATE_INFO_ENABLE); - mgmt_desc->seq_ctrl = cpu_to_le16(le16_to_cpu(wh->seq_ctrl) >> 4); - - if (wh->addr1[0] & BIT(0)) - mgmt_desc->info_cap |= cpu_to_le16(RSI_BROADCAST_PKT); - - if (common->band == NL80211_BAND_2GHZ) - mgmt_desc->rate_info = RSI_11B_MODE; - else - mgmt_desc->rate_info = (RSI_RATE_6 & 0x0f) | RSI_11G_MODE; - - if (conf_is_ht40(conf)) { - mgmt_desc->rate_info = 0xB | RSI_11G_MODE; - mgmt_desc->bbp_info = BBP_INFO_40MHZ; - } - - /* Indicate to firmware to give cfm */ - if ((skb->data[16] == IEEE80211_STYPE_PROBE_REQ) && (!bss->assoc)) { - mgmt_desc->misc_flags |= BIT(2); - mgmt_desc->cfm_frame_type = PROBEREQ_CONFIRM; - common->mgmt_q_block = true; - } - mgmt_desc->vap_info = vap_id << 8; - + rsi_prepare_mgmt_desc(common, skb); status = adapter->host_intf_ops->write_pkt(common->priv, - (u8 *)mgmt_desc, skb->len); + (u8 *)skb->data, skb->len); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write the packet\n", __func__); diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index da115dd11bdd..adbe54e10007 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -118,14 +118,14 @@ struct rsi_mgmt_desc { __le16 len_qno; u8 frame_type; u8 misc_flags; - u8 reserved1; + u8 xtend_desc_size; u8 header_len; - __le16 info_cap; + __le16 frame_info; u8 rate_info; - u8 reserved2; - u16 bbp_info; + u8 reserved1; + __le16 bbp_info; __le16 seq_ctrl; - u8 cfm_frame_type; + u8 reserved2; u8 vap_info; } __packed; diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index cbf29c36f4f4..699e9da1a87b 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -105,6 +105,7 @@ struct skb_info { u16 channel; s8 tid; s8 sta_id; + u8 internal_hdr_size; }; enum edca_queue { @@ -158,6 +159,12 @@ struct cqm_info { u32 rssi_hyst; }; +struct xtended_desc { + u8 confirm_frame_type; + u8 retry_cnt; + u16 reserved; +}; + struct rsi_hw; struct rsi_common { diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 47926c9f2027..8e05e0b222e0 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -63,6 +63,7 @@ #define RF_RESET_ENABLE BIT(3) #define RATE_INFO_ENABLE BIT(0) #define RSI_BROADCAST_PKT BIT(9) +#define RSI_DESC_REQUIRE_CFM_TO_HOST BIT(2) #define UPPER_20_ENABLE (0x2 << 12) #define LOWER_20_ENABLE (0x4 << 12) -- cgit v1.2.3-55-g7522 From af193097767819b72456800143cf577e453a9331 Mon Sep 17 00:00:00 2001 From: Pavani Muthyala Date: Thu, 6 Jul 2017 20:07:17 +0530 Subject: rsi: data packet descriptor code cleanup Currently this descriptor is prepared with the help of __le16 pointer. This patch makes use of a structure to prepare the descriptor in a cleaner way. Signed-off-by: Pavani Muthyala Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 41 +++++++++++++++++----------------- drivers/net/wireless/rsi/rsi_hal.h | 13 +++++++++++ drivers/net/wireless/rsi/rsi_mgmt.h | 3 +++ 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index f8ccb1cb7c75..9da2fc806a4d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -123,10 +123,10 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) struct ieee80211_tx_info *info; struct skb_info *tx_params; struct ieee80211_bss_conf *bss; + struct rsi_data_desc *data_desc; int status; u8 ieee80211_size = MIN_802_11_HDR_LEN; u8 extnd_size; - __le16 *frame_desc; u16 seq_num; info = IEEE80211_SKB_CB(skb); @@ -150,12 +150,12 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) } skb_push(skb, (FRAME_DESC_SZ + extnd_size)); - frame_desc = (__le16 *)&skb->data[0]; - memset((u8 *)frame_desc, 0, FRAME_DESC_SZ); + data_desc = (struct rsi_data_desc *)skb->data; + memset(data_desc, 0, sizeof(*data_desc)); if (ieee80211_is_data_qos(tmp_hdr->frame_control)) { ieee80211_size += 2; - frame_desc[6] |= cpu_to_le16(BIT(12)); + data_desc->mac_flags |= cpu_to_le16(RSI_QOS_ENABLE); } if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) && @@ -164,35 +164,34 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) ieee80211_size += 4; else ieee80211_size += 8; - frame_desc[6] |= cpu_to_le16(BIT(15)); + data_desc->mac_flags |= cpu_to_le16(RSI_ENCRYPT_PKT); } + rsi_set_len_qno(&data_desc->len_qno, (skb->len - FRAME_DESC_SZ), + RSI_WIFI_DATA_Q); + data_desc->header_len = ieee80211_size; + data_desc->xtend_desc_size = extnd_size; - frame_desc[0] = cpu_to_le16((skb->len - FRAME_DESC_SZ) | - (RSI_WIFI_DATA_Q << 12)); - frame_desc[2] = cpu_to_le16((extnd_size) | (ieee80211_size) << 8); - - if (common->min_rate != 0xffff) { + if (common->min_rate != RSI_RATE_AUTO) { /* Send fixed rate */ - frame_desc[3] = cpu_to_le16(RATE_INFO_ENABLE); - frame_desc[4] = cpu_to_le16(common->min_rate); + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + data_desc->rate_info = cpu_to_le16(common->min_rate); if (conf_is_ht40(&common->priv->hw->conf)) - frame_desc[5] = cpu_to_le16(FULL40M_ENABLE); + data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); if (common->vif_info[0].sgi) { if (common->min_rate & 0x100) /* Only MCS rates */ - frame_desc[4] |= + data_desc->rate_info |= cpu_to_le16(ENABLE_SHORTGI_RATE); } - } - frame_desc[6] |= cpu_to_le16(seq_num & 0xfff); - frame_desc[7] = cpu_to_le16(((tx_params->tid & 0xf) << 4) | - (skb->priority & 0xf) | - (tx_params->sta_id << 8)); + data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); + data_desc->qid_tid = ((skb->priority & 0xf) | + ((tx_params->tid & 0xf) << 4)); + data_desc->sta_id = tx_params->sta_id; - status = adapter->host_intf_ops->write_pkt(common->priv, skb->data, + status = adapter->host_intf_ops->write_pkt(adapter, skb->data, skb->len); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", @@ -200,7 +199,7 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) err: ++common->tx_stats.total_tx_pkt_freed[skb->priority]; - rsi_indicate_tx_status(common->priv, skb, status); + rsi_indicate_tx_status(adapter, skb, status); return status; } diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index adbe54e10007..2ae5863b9390 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -129,6 +129,19 @@ struct rsi_mgmt_desc { u8 vap_info; } __packed; +struct rsi_data_desc { + __le16 len_qno; + u16 reserved; + u8 xtend_desc_size; + u8 header_len; + __le16 frame_info; + __le16 rate_info; + __le16 bbp_info; + __le16 mac_flags; + u8 qid_tid; + u8 sta_id; +} __packed; + int rsi_hal_device_init(struct rsi_hw *adapter); #endif diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 8e05e0b222e0..058dfe539922 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -64,6 +64,8 @@ #define RATE_INFO_ENABLE BIT(0) #define RSI_BROADCAST_PKT BIT(9) #define RSI_DESC_REQUIRE_CFM_TO_HOST BIT(2) +#define RSI_QOS_ENABLE BIT(12) +#define RSI_ENCRYPT_PKT BIT(15) #define UPPER_20_ENABLE (0x2 << 12) #define LOWER_20_ENABLE (0x4 << 12) @@ -122,6 +124,7 @@ #define RSI_RATE_MCS6 0x106 #define RSI_RATE_MCS7 0x107 #define RSI_RATE_MCS7_SG 0x307 +#define RSI_RATE_AUTO 0xffff #define BW_20MHZ 0 #define BW_40MHZ 1 -- cgit v1.2.3-55-g7522 From 0eb42586cf876ebeea07c5952d7c95f3b81d685b Mon Sep 17 00:00:00 2001 From: Pavani Muthyala Date: Thu, 6 Jul 2017 20:07:18 +0530 Subject: rsi: data packet descriptor enhancements This patch covers some enhancements in data packet descriptor preparation especially for EAPOL, multicast/broadcast packets. Signed-off-by: Pavani Muthyala Signed-off-by: Amitkumar Karwar Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 64 ++++++++++++++++++++++++++-------- drivers/net/wireless/rsi/rsi_hal.h | 3 +- drivers/net/wireless/rsi/rsi_mgmt.h | 3 ++ 3 files changed, 55 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 9da2fc806a4d..af7fe87940c2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -119,14 +119,18 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; - struct ieee80211_hdr *tmp_hdr; + struct ieee80211_hdr *wh = NULL; struct ieee80211_tx_info *info; + struct ieee80211_vif *vif = NULL; struct skb_info *tx_params; struct ieee80211_bss_conf *bss; struct rsi_data_desc *data_desc; + struct xtended_desc *xtend_desc; int status; u8 ieee80211_size = MIN_802_11_HDR_LEN; - u8 extnd_size; + u8 header_size; + u8 vap_id = 0; + u8 dword_align_bytes; u16 seq_num; info = IEEE80211_SKB_CB(skb); @@ -137,23 +141,34 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) status = -EINVAL; goto err; } - - tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; - seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4); - - extnd_size = ((uintptr_t)skb->data & 0x3); - - if ((FRAME_DESC_SZ + extnd_size) > skb_headroom(skb)) { + header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); + if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); status = -ENOSPC; goto err; } + skb_push(skb, header_size); + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (header_size > skb_headroom(skb)) { + rsi_dbg(ERR_ZONE, "%s: Not enough headroom\n", __func__); + status = -ENOSPC; + goto err; + } + skb_push(skb, dword_align_bytes); + header_size += dword_align_bytes; - skb_push(skb, (FRAME_DESC_SZ + extnd_size)); + tx_params->internal_hdr_size = header_size; data_desc = (struct rsi_data_desc *)skb->data; - memset(data_desc, 0, sizeof(*data_desc)); + memset(data_desc, 0, header_size); + + xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; + wh = (struct ieee80211_hdr *)&skb->data[header_size]; + seq_num = (le16_to_cpu(wh->seq_ctrl) >> 4); + vif = adapter->vifs[0]; - if (ieee80211_is_data_qos(tmp_hdr->frame_control)) { + data_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; + + if (ieee80211_is_data_qos(wh->frame_control)) { ieee80211_size += 2; data_desc->mac_flags |= cpu_to_le16(RSI_QOS_ENABLE); } @@ -169,7 +184,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) rsi_set_len_qno(&data_desc->len_qno, (skb->len - FRAME_DESC_SZ), RSI_WIFI_DATA_Q); data_desc->header_len = ieee80211_size; - data_desc->xtend_desc_size = extnd_size; if (common->min_rate != RSI_RATE_AUTO) { /* Send fixed rate */ @@ -184,6 +198,21 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) data_desc->rate_info |= cpu_to_le16(ENABLE_SHORTGI_RATE); } + + } + + if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n"); + + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + if (common->band == NL80211_BAND_5GHZ) + data_desc->rate_info = cpu_to_le16(RSI_RATE_6); + else + data_desc->rate_info = cpu_to_le16(RSI_RATE_1); + data_desc->mac_flags |= cpu_to_le16(RSI_REKEY_PURPOSE); + data_desc->misc_flags |= RSI_FETCH_RETRY_CNT_FRM_HST; +#define EAPOL_RETRY_CNT 15 + xtend_desc->retry_cnt = EAPOL_RETRY_CNT; } data_desc->mac_flags = cpu_to_le16(seq_num & 0xfff); @@ -191,7 +220,14 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) ((tx_params->tid & 0xf) << 4)); data_desc->sta_id = tx_params->sta_id; - status = adapter->host_intf_ops->write_pkt(adapter, skb->data, + if ((is_broadcast_ether_addr(wh->addr1)) || + (is_multicast_ether_addr(wh->addr1))) { + data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); + data_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); + data_desc->sta_id = vap_id; + } + + status = adapter->host_intf_ops->write_pkt(common->priv, skb->data, skb->len); if (status) rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 2ae5863b9390..00c6a0c5a891 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -131,7 +131,8 @@ struct rsi_mgmt_desc { struct rsi_data_desc { __le16 len_qno; - u16 reserved; + u8 cfm_frame_type; + u8 misc_flags; u8 xtend_desc_size; u8 header_len; __le16 frame_info; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 058dfe539922..a8a195e8a91e 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -64,7 +64,10 @@ #define RATE_INFO_ENABLE BIT(0) #define RSI_BROADCAST_PKT BIT(9) #define RSI_DESC_REQUIRE_CFM_TO_HOST BIT(2) +#define RSI_ADD_DELTA_TSF_VAP_ID BIT(3) +#define RSI_FETCH_RETRY_CNT_FRM_HST BIT(4) #define RSI_QOS_ENABLE BIT(12) +#define RSI_REKEY_PURPOSE BIT(13) #define RSI_ENCRYPT_PKT BIT(15) #define UPPER_20_ENABLE (0x2 << 12) -- cgit v1.2.3-55-g7522 From ceb2e4eab9948a8ed14fd5b439ccb66dec4931e0 Mon Sep 17 00:00:00 2001 From: Pavani Muthyala Date: Thu, 6 Jul 2017 20:07:19 +0530 Subject: rsi: separate function for data packet descriptor Data packet descriptor preparation code is moved a separate function. Signed-off-by: Pavani Muthyala Signed-off-by: Amitkumar Karwar Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_hal.c | 51 +++++++++++++++++++--------------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index af7fe87940c2..b0a7a1511aee 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -108,25 +108,15 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) return 0; } -/** - * rsi_send_data_pkt() - This function sends the recieved data packet from - * driver to device. - * @common: Pointer to the driver private structure. - * @skb: Pointer to the socket buffer structure. - * - * Return: status: 0 on success, -1 on failure. - */ -int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) +/* This function prepares descriptor for given data packet */ +static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) { - struct rsi_hw *adapter = common->priv; struct ieee80211_hdr *wh = NULL; struct ieee80211_tx_info *info; - struct ieee80211_vif *vif = NULL; struct skb_info *tx_params; struct ieee80211_bss_conf *bss; struct rsi_data_desc *data_desc; struct xtended_desc *xtend_desc; - int status; u8 ieee80211_size = MIN_802_11_HDR_LEN; u8 header_size; u8 vap_id = 0; @@ -137,22 +127,16 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) bss = &info->control.vif->bss_conf; tx_params = (struct skb_info *)info->driver_data; - if (!bss->assoc) { - status = -EINVAL; - goto err; - } header_size = FRAME_DESC_SZ + sizeof(struct xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); - status = -ENOSPC; - goto err; + return -ENOSPC; } skb_push(skb, header_size); dword_align_bytes = ((unsigned long)skb->data & 0x3f); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Not enough headroom\n", __func__); - status = -ENOSPC; - goto err; + return -ENOSPC; } skb_push(skb, dword_align_bytes); header_size += dword_align_bytes; @@ -164,7 +148,6 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; wh = (struct ieee80211_hdr *)&skb->data[header_size]; seq_num = (le16_to_cpu(wh->seq_ctrl) >> 4); - vif = adapter->vifs[0]; data_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; @@ -227,11 +210,33 @@ int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) data_desc->sta_id = vap_id; } + return 0; +} + +/* This function sends received data packet from driver to device */ +int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = common->priv; + struct ieee80211_tx_info *info; + struct ieee80211_bss_conf *bss; + int status = -EIO; + + info = IEEE80211_SKB_CB(skb); + bss = &info->control.vif->bss_conf; + + if (!bss->assoc) { + status = -EINVAL; + goto err; + } + + status = rsi_prepare_data_desc(common, skb); + if (status) + goto err; + status = adapter->host_intf_ops->write_pkt(common->priv, skb->data, skb->len); if (status) - rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", - __func__); + rsi_dbg(ERR_ZONE, "%s: Failed to write pkt\n", __func__); err: ++common->tx_stats.total_tx_pkt_freed[skb->priority]; -- cgit v1.2.3-55-g7522 From 5f6ae7cae21120bd3236873872532b5864c44ebc Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:20 +0530 Subject: rsi: rename variable in_sdio_litefi_irq The variable in_sdio_litefi_irq is renamed to sdio_irq_task, as it is more relevant here. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_sdio.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index f11f8189e0b6..cbbc0448dc40 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -104,7 +104,7 @@ struct receive_info { struct rsi_91x_sdiodev { struct sdio_func *pfunction; - struct task_struct *in_sdio_litefi_irq; + struct task_struct *sdio_irq_task; struct receive_info rx_info; u32 next_read_delay; u32 sdio_high_speed_enable; -- cgit v1.2.3-55-g7522 From 72bccf51d459282e7bf66e2721d4a831a209e918 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:21 +0530 Subject: rsi: Optimise sdio claim and release host SDIO host is already claimed in our interrupt handler. Some lower level APIs claims host while performing SDIO read or write operations. Let's use sdio_irq_task variable to check if we are in interrupt context and claim/release the host accordingly. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index f1ba8ac3783d..42d558b61721 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -138,12 +138,15 @@ static int rsi_issue_sdiocommand(struct sdio_func *func, static void rsi_handle_interrupt(struct sdio_func *function) { struct rsi_hw *adapter = sdio_get_drvdata(function); + struct rsi_91x_sdiodev *dev = + (struct rsi_91x_sdiodev *)adapter->rsi_dev; if (adapter->priv->fsm_state == FSM_FW_NOT_LOADED) return; - sdio_release_host(function); + + dev->sdio_irq_task = current; rsi_interrupt_handler(adapter); - sdio_claim_host(function); + dev->sdio_irq_task = NULL; } /** @@ -407,14 +410,16 @@ int rsi_sdio_read_register(struct rsi_hw *adapter, u8 fun_num = 0; int status; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); if (fun_num == 0) *data = sdio_f0_readb(dev->pfunction, addr, &status); else *data = sdio_readb(dev->pfunction, addr, &status); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); return status; } @@ -438,14 +443,16 @@ int rsi_sdio_write_register(struct rsi_hw *adapter, (struct rsi_91x_sdiodev *)adapter->rsi_dev; int status = 0; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); if (function == 0) sdio_f0_writeb(dev->pfunction, *data, addr, &status); else sdio_writeb(dev->pfunction, *data, addr, &status); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); return status; } @@ -490,11 +497,13 @@ static int rsi_sdio_read_register_multiple(struct rsi_hw *adapter, (struct rsi_91x_sdiodev *)adapter->rsi_dev; u32 status; - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); status = sdio_readsb(dev->pfunction, data, addr, count); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); if (status != 0) rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 read failed\n", __func__); @@ -532,11 +541,13 @@ int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, dev->write_fail++; } - sdio_claim_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_claim_host(dev->pfunction); status = sdio_writesb(dev->pfunction, addr, data, count); - sdio_release_host(dev->pfunction); + if (likely(dev->sdio_irq_task != current)) + sdio_release_host(dev->pfunction); if (status) { rsi_dbg(ERR_ZONE, "%s: Synch Cmd53 write failed %d\n", -- cgit v1.2.3-55-g7522 From ebf084ea0ec749f6ec576a67762f0ec34c514111 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:22 +0530 Subject: rsi: SDIO Rx packet processing enhancement Newer firmware sends information about number of blocks through interrupt only. We don't need to read extra register for this. This patch adds needed driver changes for this enhancment. The change here is backward compatible Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio_ops.c | 34 ++++++++++++++++++++++------- drivers/net/wireless/rsi/rsi_main.h | 1 + drivers/net/wireless/rsi/rsi_sdio.h | 1 + 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index df2a63b1f15c..b6d0e2ae1412 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -69,20 +69,37 @@ int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word) static int rsi_process_pkt(struct rsi_common *common) { struct rsi_hw *adapter = common->priv; + struct rsi_91x_sdiodev *dev = + (struct rsi_91x_sdiodev *)adapter->rsi_dev; u8 num_blks = 0; u32 rcv_pkt_len = 0; int status = 0; + u8 value = 0; - status = rsi_sdio_read_register(adapter, - SDIO_RX_NUM_BLOCKS_REG, - &num_blks); + num_blks = ((adapter->interrupt_status & 1) | + ((adapter->interrupt_status >> RECV_NUM_BLOCKS) << 1)); - if (status) { - rsi_dbg(ERR_ZONE, - "%s: Failed to read pkt length from the card:\n", - __func__); - return status; + if (!num_blks) { + status = rsi_sdio_read_register(adapter, + SDIO_RX_NUM_BLOCKS_REG, + &value); + if (status) { + rsi_dbg(ERR_ZONE, + "%s: Failed to read pkt length from the card:\n", + __func__); + return status; + } + num_blks = value & 0x1f; } + + if (dev->write_fail == 2) + rsi_sdio_ack_intr(common->priv, (1 << MSDU_PKT_PENDING)); + + if (unlikely(!num_blks)) { + dev->write_fail = 2; + return -1; + } + rcv_pkt_len = (num_blks * 256); common->rx_data_pkt = kmalloc(rcv_pkt_len, GFP_KERNEL); @@ -224,6 +241,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) mutex_unlock(&common->tx_rxlock); return; } + adapter->interrupt_status = isr_status; if (isr_status == 0) { rsi_set_event(&common->tx_thread.event); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 699e9da1a87b..c2e1c1ce285c 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -275,6 +275,7 @@ struct rsi_hw { bool blcmd_timer_expired; u32 flash_capacity; struct eepromrw_info eeprom; + u32 interrupt_status; u8 dfs_region; void *rsi_dev; struct rsi_host_intf_ops *host_intf_ops; diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index cbbc0448dc40..3cf67565feb1 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -41,6 +41,7 @@ enum sdio_interrupt_type { #define PKT_BUFF_FULL 1 #define PKT_MGMT_BUFF_FULL 2 #define MSDU_PKT_PENDING 3 +#define RECV_NUM_BLOCKS 4 /* Interrupt Bit Related Macros */ #define PKT_BUFF_AVAILABLE 1 #define FW_ASSERT_IND 2 -- cgit v1.2.3-55-g7522 From 6c409cad3d2b66cb4fd184d140fdf48e34890249 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:23 +0530 Subject: rsi: use separate mutex lock for receive thread Deadlock issue is observed during our stress tests. The root cause for the issue is same lock is used between tx and rx threads. This patch adds a separate mutex lock for rx thread to resolve the problem. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_main.c | 1 + drivers/net/wireless/rsi/rsi_91x_sdio_ops.c | 10 +++++----- drivers/net/wireless/rsi/rsi_91x_usb_ops.c | 6 +++--- drivers/net/wireless/rsi/rsi_main.h | 2 ++ 4 files changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index f1cde0ca81f9..939f568dae9b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -221,6 +221,7 @@ struct rsi_hw *rsi_91x_init(void) rsi_init_event(&common->tx_thread.event); mutex_init(&common->mutex); mutex_init(&common->tx_rxlock); + mutex_init(&common->rx_lock); if (rsi_create_kthread(common, &common->tx_thread, diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b6d0e2ae1412..b3f7adc9d085 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -230,7 +230,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) dev->rx_info.sdio_int_counter++; do { - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->rx_lock); status = rsi_sdio_read_register(common->priv, RSI_FN1_INT_REGISTER, &isr_status); @@ -238,7 +238,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } adapter->interrupt_status = isr_status; @@ -246,7 +246,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) if (isr_status == 0) { rsi_set_event(&common->tx_thread.event); dev->rx_info.sdio_intr_status_zero++; - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } @@ -304,7 +304,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ERR_ZONE, "%s: Failed to read pkt\n", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } break; @@ -319,7 +319,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) } isr_status ^= BIT(isr_type - 1); } while (isr_status); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); } while (1); } diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c index d3e0a07604a6..465692b3c351 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c @@ -37,14 +37,14 @@ void rsi_usb_rx_thread(struct rsi_common *common) if (atomic_read(&dev->rx_thread.thread_done)) goto out; - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->rx_lock); status = rsi_read_pkt(common, 0); if (status) { rsi_dbg(ERR_ZONE, "%s: Failed To read data", __func__); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); return; } - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->rx_lock); rsi_reset_event(&dev->rx_thread.event); if (adapter->rx_urb_submit(adapter)) { rsi_dbg(ERR_ZONE, diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index c2e1c1ce285c..29bccb7079ec 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -181,6 +181,8 @@ struct rsi_common { struct mutex mutex; /* Mutex used between tx/rx threads */ struct mutex tx_rxlock; + /* Mutex used for rx thread */ + struct mutex rx_lock; u8 endpoint; /* Channel/band related */ -- cgit v1.2.3-55-g7522 From cb16453565f8d2d6ee9f098ee575ea030e5d71e9 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 6 Jul 2017 20:07:24 +0530 Subject: rsi: Rename mutex tx_rxlock to the tx_lock. We have now added separate lock for Rx. This lock is used to protect tx path only Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_core.c | 10 +++++----- drivers/net/wireless/rsi/rsi_91x_main.c | 2 +- drivers/net/wireless/rsi/rsi_main.h | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 68f04a76769e..88a1a56a20ab 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -268,11 +268,11 @@ void rsi_core_qos_processor(struct rsi_common *common) break; } - mutex_lock(&common->tx_rxlock); + mutex_lock(&common->tx_lock); status = adapter->check_hw_queue_status(adapter, q_num); if ((status <= 0)) { - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } @@ -287,7 +287,7 @@ void rsi_core_qos_processor(struct rsi_common *common) skb = rsi_core_dequeue_pkt(common, q_num); if (skb == NULL) { rsi_dbg(ERR_ZONE, "skb null\n"); - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } @@ -297,14 +297,14 @@ void rsi_core_qos_processor(struct rsi_common *common) status = rsi_send_data_pkt(common, skb); if (status) { - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); break; } common->tx_stats.total_tx_pkt_send[q_num]++; tstamp_2 = jiffies; - mutex_unlock(&common->tx_rxlock); + mutex_unlock(&common->tx_lock); if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000)) schedule(); diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index 939f568dae9b..bb0febb17be0 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -220,7 +220,7 @@ struct rsi_hw *rsi_91x_init(void) rsi_init_event(&common->tx_thread.event); mutex_init(&common->mutex); - mutex_init(&common->tx_rxlock); + mutex_init(&common->tx_lock); mutex_init(&common->rx_lock); if (rsi_create_kthread(common, diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 29bccb7079ec..709f767aa34b 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -179,8 +179,8 @@ struct rsi_common { struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1]; /* Mutex declaration */ struct mutex mutex; - /* Mutex used between tx/rx threads */ - struct mutex tx_rxlock; + /* Mutex used for tx thread */ + struct mutex tx_lock; /* Mutex used for rx thread */ struct mutex rx_lock; u8 endpoint; -- cgit v1.2.3-55-g7522 From 6c8ab76d6ae9ba8027c985444299372b965ed2d2 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:32 +0530 Subject: rsi: add common structures needed for command packets All internal management packets (command packets) use some common fields and some packet specific fields for packet descriptors. This patch adds some common structures which are needed for all command packets. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_mgmt.h | 38 +++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index a8a195e8a91e..63360c2be726 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -221,6 +221,44 @@ struct rsi_mac_frame { __le16 desc_word[8]; } __packed; +#define PWR_SAVE_WAKEUP_IND BIT(0) +#define TCP_CHECK_SUM_OFFLOAD BIT(1) +#define CONFIRM_REQUIRED_TO_HOST BIT(2) +#define ADD_DELTA_TSF BIT(3) +#define FETCH_RETRY_CNT_FROM_HOST_DESC BIT(4) +#define EOSP_INDICATION BIT(5) +#define REQUIRE_TSF_SYNC_CONFIRM BIT(6) +#define ENCAP_MGMT_PKT BIT(7) + +struct rsi_cmd_desc_dword0 { + __le16 len_qno; + u8 frame_type; + u8 misc_flags; +}; + +struct rsi_cmd_desc_dword1 { + u8 xtend_desc_size; + u8 reserved1; + __le16 reserved2; +}; + +struct rsi_cmd_desc_dword2 { + __le32 pkt_info; /* Packet specific data */ +}; + +struct rsi_cmd_desc_dword3 { + __le16 token; + u8 qid_tid; + u8 sta_id; +}; + +struct rsi_cmd_desc { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + struct rsi_cmd_desc_dword2 desc_dword2; + struct rsi_cmd_desc_dword3 desc_dword3; +}; + struct rsi_boot_params { __le16 desc_word[8]; struct bootup_params bootup_params; -- cgit v1.2.3-55-g7522 From 9a629fafe7d8a486f8f02e2788c5368319f2dd0c Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:33 +0530 Subject: rsi: immediate wakeup bit and priority for TX command packets For all TX command packets immediate wakeup bit needs to be set in descriptor. This will make sure device will wakeup if it is in any sleep state. Priority of the packet is also set. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 3 +++ drivers/net/wireless/rsi/rsi_mgmt.h | 1 + 2 files changed, 4 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index ebd1e5647f03..68771b05797e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -266,11 +266,14 @@ static int rsi_send_internal_mgmt_frame(struct rsi_common *common, struct sk_buff *skb) { struct skb_info *tx_params; + struct rsi_cmd_desc *desc; if (skb == NULL) { rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } + desc->desc_dword0.len_qno |= cpu_to_le16(DESC_IMMEDIATE_WAKEUP); + skb->priority = MGMT_SOFT_Q; tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data; tx_params->flags |= INTERNAL_MGMT_PKT; skb_queue_tail(&common->tx_queue[MGMT_SOFT_Q], skb); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 63360c2be726..6f7f181710f8 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -229,6 +229,7 @@ struct rsi_mac_frame { #define EOSP_INDICATION BIT(5) #define REQUIRE_TSF_SYNC_CONFIRM BIT(6) #define ENCAP_MGMT_PKT BIT(7) +#define DESC_IMMEDIATE_WAKEUP BIT(15) struct rsi_cmd_desc_dword0 { __le16 len_qno; -- cgit v1.2.3-55-g7522 From 6abdf2c19346b0f98437943215c6763e5f4847c9 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:34 +0530 Subject: rsi: Update in tx command frame radio capabilities Radio capabilities frame is updated to use common descriptor structure. Also, few changes to this frame is done like hardware queues are increase to 12 from 8, default channel number is included. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 56 ++++++++++++++++++--------------- drivers/net/wireless/rsi/rsi_main.h | 7 ++++- drivers/net/wireless/rsi/rsi_mgmt.h | 13 +++++++- 3 files changed, 48 insertions(+), 28 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 68771b05797e..65d2dd6b908a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -301,10 +301,11 @@ static int rsi_load_radio_caps(struct rsi_common *common) 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0}; struct sk_buff *skb; + u16 frame_len = sizeof(struct rsi_radio_caps); rsi_dbg(INFO_ZONE, "%s: Sending rate symbol req frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_radio_caps)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -312,37 +313,40 @@ static int rsi_load_radio_caps(struct rsi_common *common) return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_radio_caps)); + memset(skb->data, 0, frame_len); radio_caps = (struct rsi_radio_caps *)skb->data; - radio_caps->desc_word[1] = cpu_to_le16(RADIO_CAPABILITIES); - radio_caps->desc_word[4] = cpu_to_le16(RSI_RF_TYPE << 8); + radio_caps->desc_dword0.frame_type = RADIO_CAPABILITIES; + radio_caps->channel_num = common->channel; + radio_caps->rf_model = RSI_RF_TYPE; if (common->channel_width == BW_40MHZ) { - radio_caps->desc_word[7] |= cpu_to_le16(RSI_LMAC_CLOCK_80MHZ); - radio_caps->desc_word[7] |= cpu_to_le16(RSI_ENABLE_40MHZ); + radio_caps->radio_cfg_info = RSI_LMAC_CLOCK_80MHZ; + radio_caps->radio_cfg_info |= RSI_ENABLE_40MHZ; if (common->fsm_state == FSM_MAC_INIT_DONE) { struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; + if (conf_is_ht40_plus(conf)) { - radio_caps->desc_word[5] = - cpu_to_le16(LOWER_20_ENABLE); - radio_caps->desc_word[5] |= - cpu_to_le16(LOWER_20_ENABLE >> 12); + radio_caps->radio_cfg_info = + RSI_CMDDESC_LOWER_20_ENABLE; + radio_caps->radio_info = + RSI_CMDDESC_LOWER_20_ENABLE; } else if (conf_is_ht40_minus(conf)) { - radio_caps->desc_word[5] = - cpu_to_le16(UPPER_20_ENABLE); - radio_caps->desc_word[5] |= - cpu_to_le16(UPPER_20_ENABLE >> 12); + radio_caps->radio_cfg_info = + RSI_CMDDESC_UPPER_20_ENABLE; + radio_caps->radio_info = + RSI_CMDDESC_UPPER_20_ENABLE; } else { - radio_caps->desc_word[5] = - cpu_to_le16(BW_40MHZ << 12); - radio_caps->desc_word[5] |= - cpu_to_le16(FULL40M_ENABLE); + radio_caps->radio_cfg_info = + RSI_CMDDESC_40MHZ; + radio_caps->radio_info = + RSI_CMDDESC_FULL_40_ENABLE; } } } + radio_caps->radio_info |= radio_id; radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE); radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE); @@ -351,8 +355,6 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->cck_ack_tout = cpu_to_le16(CCK_ACK_TOUT_VALUE); radio_caps->preamble_type = cpu_to_le16(LONG_PREAMBLE); - radio_caps->desc_word[7] |= cpu_to_le16(radio_id << 8); - for (ii = 0; ii < MAX_HW_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(3); radio_caps->qos_params[ii].cont_win_max_q = cpu_to_le16(0x3f); @@ -360,7 +362,7 @@ static int rsi_load_radio_caps(struct rsi_common *common) radio_caps->qos_params[ii].txop_q = 0; } - for (ii = 0; ii < MAX_HW_QUEUES - 4; ii++) { + for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) { radio_caps->qos_params[ii].cont_win_min_q = cpu_to_le16(common->edca_params[ii].cw_min); radio_caps->qos_params[ii].cont_win_max_q = @@ -371,17 +373,19 @@ static int rsi_load_radio_caps(struct rsi_common *common) cpu_to_le16(common->edca_params[ii].txop); } + radio_caps->qos_params[BROADCAST_HW_Q].txop_q = cpu_to_le16(0xffff); + radio_caps->qos_params[MGMT_HW_Q].txop_q = 0; + radio_caps->qos_params[BEACON_HW_Q].txop_q = cpu_to_le16(0xffff); + memcpy(&common->rate_pwr[0], &gc[0], 40); for (ii = 0; ii < 20; ii++) radio_caps->gcpd_per_rate[inx++] = cpu_to_le16(common->rate_pwr[ii] & 0x00FF); - radio_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_radio_caps) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - + rsi_set_len_qno(&radio_caps->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); - skb_put(skb, (sizeof(struct rsi_radio_caps))); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 709f767aa34b..a567986c5b0b 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -61,11 +61,16 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define MAC_80211_HDR_FRAME_CONTROL 0 #define WME_NUM_AC 4 #define NUM_SOFT_QUEUES 5 -#define MAX_HW_QUEUES 8 +#define MAX_HW_QUEUES 12 #define INVALID_QUEUE 0xff #define MAX_CONTINUOUS_VO_PKTS 8 #define MAX_CONTINUOUS_VI_PKTS 4 +/* Hardware queue info */ +#define BROADCAST_HW_Q 9 +#define MGMT_HW_Q 10 +#define BEACON_HW_Q 11 + /* Queue information */ #define RSI_COEX_Q 0x0 #define RSI_WIFI_MGMT_Q 0x4 diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 6f7f181710f8..3b4bd85e9c4e 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -70,6 +70,10 @@ #define RSI_REKEY_PURPOSE BIT(13) #define RSI_ENCRYPT_PKT BIT(15) +#define RSI_CMDDESC_40MHZ BIT(4) +#define RSI_CMDDESC_UPPER_20_ENABLE BIT(5) +#define RSI_CMDDESC_LOWER_20_ENABLE BIT(6) +#define RSI_CMDDESC_FULL_40_ENABLE (BIT(5) | BIT(6)) #define UPPER_20_ENABLE (0x2 << 12) #define LOWER_20_ENABLE (0x4 << 12) #define FULL40M_ENABLE 0x6 @@ -317,7 +321,14 @@ struct qos_params { } __packed; struct rsi_radio_caps { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword0 desc_dword1; + u8 channel_num; + u8 rf_model; + __le16 ppe_ack_rate; + __le16 mode_11j; + u8 radio_cfg_info; + u8 radio_info; struct qos_params qos_params[MAX_HW_QUEUES]; u8 num_11n_rates; u8 num_11ac_rates; -- cgit v1.2.3-55-g7522 From dff80fc5fe4fa14299ee333fb37a7a7991b67c01 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:35 +0530 Subject: rsi: remove unnecessary check for 802.11 management packet The function rsi_mgmt_pkt_to_core() is for passing the 802.11 management frames to mac80211. So, it is unnecessary to check again for the frame type 802.11 management in this function. It can be checked before passing to this function. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 54 +++++++++++++++------------------ 1 file changed, 24 insertions(+), 30 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 65d2dd6b908a..3724dd4c6377 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -401,8 +401,7 @@ static int rsi_load_radio_caps(struct rsi_common *common) */ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 *msg, - s32 msg_len, - u8 type) + s32 msg_len) { struct rsi_hw *adapter = common->priv; struct ieee80211_tx_info *info; @@ -410,37 +409,30 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, u8 pad_bytes = msg[4]; struct sk_buff *skb; - if (type == RX_DOT11_MGMT) { - if (!adapter->sc_nvifs) - return -ENOLINK; + if (!adapter->sc_nvifs) + return -ENOLINK; - msg_len -= pad_bytes; - if (msg_len <= 0) { - rsi_dbg(MGMT_RX_ZONE, - "%s: Invalid rx msg of len = %d\n", - __func__, msg_len); - return -EINVAL; - } + msg_len -= pad_bytes; + if (msg_len <= 0) { + rsi_dbg(MGMT_RX_ZONE, + "%s: Invalid rx msg of len = %d\n", + __func__, msg_len); + return -EINVAL; + } - skb = dev_alloc_skb(msg_len); - if (!skb) { - rsi_dbg(ERR_ZONE, "%s: Failed to allocate skb\n", - __func__); - return -ENOMEM; - } + skb = dev_alloc_skb(msg_len); + if (!skb) + return -ENOMEM; - skb_put_data(skb, - (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), - msg_len); + skb_put_data(skb, + (u8 *)(msg + FRAME_DESC_SZ + pad_bytes), + msg_len); - info = IEEE80211_SKB_CB(skb); - rx_params = (struct skb_info *)info->driver_data; - rx_params->rssi = rsi_get_rssi(msg); - rx_params->channel = rsi_get_channel(msg); - rsi_indicate_pkt_to_os(common, skb); - } else { - rsi_dbg(MGMT_TX_ZONE, "%s: Internal Packet\n", __func__); - } + info = IEEE80211_SKB_CB(skb); + rx_params = (struct skb_info *)info->driver_data; + rx_params->rssi = rsi_get_rssi(msg); + rx_params->channel = rsi_get_channel(msg); + rsi_indicate_pkt_to_os(common, skb); return 0; } @@ -1641,8 +1633,10 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); } + } else if (msg_type == RX_DOT11_MGMT) { + return rsi_mgmt_pkt_to_core(common, msg, msg_len); } else { - return rsi_mgmt_pkt_to_core(common, msg, msg_len, msg_type); + rsi_dbg(INFO_ZONE, "Received packet type: 0x%x\n", msg_type); } return 0; } -- cgit v1.2.3-55-g7522 From 59e006dc77c4e2b48c4efbe9a46477c7e6f3ca6c Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:36 +0530 Subject: rsi: Update peer notify command frame TX command frame peer notify is updated to use common descriptor structure. MPDU density value added to the frame. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 19 ++++++++++--------- drivers/net/wireless/rsi/rsi_mgmt.h | 4 +++- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 3724dd4c6377..fd9bcd719a33 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -460,10 +460,11 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, struct rsi_peer_notify *peer_notify; u16 vap_id = 0; int status; + u16 frame_len = sizeof(struct rsi_peer_notify); rsi_dbg(MGMT_TX_ZONE, "%s: Sending sta notify frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_peer_notify)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -471,7 +472,7 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_peer_notify)); + memset(skb->data, 0, frame_len); peer_notify = (struct rsi_peer_notify *)skb->data; peer_notify->command = cpu_to_le16(opmode << 1); @@ -489,16 +490,16 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, peer_notify->command |= cpu_to_le16((aid & 0xfff) << 4); ether_addr_copy(peer_notify->mac_addr, bssid); - + peer_notify->mpdu_density = cpu_to_le16(RSI_MPDU_DENSITY); peer_notify->sta_flags = cpu_to_le32((qos_enable) ? 1 : 0); - peer_notify->desc_word[0] = - cpu_to_le16((sizeof(struct rsi_peer_notify) - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - peer_notify->desc_word[1] = cpu_to_le16(PEER_NOTIFY); - peer_notify->desc_word[7] |= cpu_to_le16(vap_id << 8); + rsi_set_len_qno(&peer_notify->desc.desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), + RSI_WIFI_MGMT_Q); + peer_notify->desc.desc_dword0.frame_type = PEER_NOTIFY; + peer_notify->desc.desc_dword3.sta_id = vap_id; - skb_put(skb, sizeof(struct rsi_peer_notify)); + skb_put(skb, frame_len); status = rsi_send_internal_mgmt_frame(common, skb); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 3b4bd85e9c4e..d2fe9ee1e934 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -165,6 +165,8 @@ #define ALLOW_CONN_PEER_MGMT_WHILE_BUF_FULL BIT(5) #define DISALLOW_BROADCAST_DATA BIT(6) +#define RSI_MPDU_DENSITY 0x8 + enum opmode { STA_OPMODE = 1, AP_OPMODE = 2 @@ -270,7 +272,7 @@ struct rsi_boot_params { } __packed; struct rsi_peer_notify { - __le16 desc_word[8]; + struct rsi_cmd_desc desc; u8 mac_addr[6]; __le16 command; __le16 mpdu_density; -- cgit v1.2.3-55-g7522 From 3a9828c92ba16b8a51e451a5d074d7e4c567950d Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:37 +0530 Subject: rsi: Update aggregation parameters command frame AMPDU aggregation parameters frame configured to device is modified to use common descriptor structure. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 41 ++++++++++++++------------------- drivers/net/wireless/rsi/rsi_mgmt.h | 14 +++++++++++ 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index fd9bcd719a33..a1a60f7dd42f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -528,10 +528,11 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u8 event) { struct sk_buff *skb = NULL; - struct rsi_mac_frame *mgmt_frame; + struct rsi_aggr_params *aggr_params; u8 peer_id = 0; + u16 frame_len = sizeof(struct rsi_aggr_params); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -539,37 +540,29 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, return -ENOMEM; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + memset(skb->data, 0, frame_len); + aggr_params = (struct rsi_aggr_params *)skb->data; rsi_dbg(MGMT_TX_ZONE, "%s: Sending AMPDU indication frame\n", __func__); - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(AMPDU_IND); + rsi_set_len_qno(&aggr_params->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + aggr_params->desc_dword0.frame_type = AMPDU_IND; + aggr_params->aggr_params = tid & RSI_AGGR_PARAMS_TID_MASK; + aggr_params->peer_id = peer_id; if (event == STA_TX_ADDBA_DONE) { - mgmt_frame->desc_word[4] = cpu_to_le16(ssn); - mgmt_frame->desc_word[5] = cpu_to_le16(buf_size); - mgmt_frame->desc_word[7] = - cpu_to_le16((tid | (START_AMPDU_AGGR << 4) | (peer_id << 8))); + aggr_params->seq_start = cpu_to_le16(ssn); + aggr_params->baw_size = cpu_to_le16(buf_size); + aggr_params->aggr_params |= RSI_AGGR_PARAMS_START; } else if (event == STA_RX_ADDBA_DONE) { - mgmt_frame->desc_word[4] = cpu_to_le16(ssn); - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (START_AMPDU_AGGR << 4) | - (RX_BA_INDICATION << 5) | - (peer_id << 8)); - } else if (event == STA_TX_DELBA) { - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (STOP_AMPDU_AGGR << 4) | - (peer_id << 8)); + aggr_params->seq_start = cpu_to_le16(ssn); + aggr_params->aggr_params |= (RSI_AGGR_PARAMS_START | + RSI_AGGR_PARAMS_RX_AGGR); } else if (event == STA_RX_DELBA) { - mgmt_frame->desc_word[7] = cpu_to_le16(tid | - (STOP_AMPDU_AGGR << 4) | - (RX_BA_INDICATION << 5) | - (peer_id << 8)); + aggr_params->aggr_params |= RSI_AGGR_PARAMS_RX_AGGR; } - skb_put(skb, FRAME_DESC_SZ); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index d2fe9ee1e934..68863c814c82 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -280,6 +280,20 @@ struct rsi_peer_notify { __le32 sta_flags; } __packed; +/* Aggregation params flags */ +#define RSI_AGGR_PARAMS_TID_MASK 0xf +#define RSI_AGGR_PARAMS_START BIT(4) +#define RSI_AGGR_PARAMS_RX_AGGR BIT(5) +struct rsi_aggr_params { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword0 desc_dword1; + __le16 seq_start; + __le16 baw_size; + __le16 token; + u8 aggr_params; + u8 peer_id; +} __packed; + struct rsi_vap_caps { __le16 desc_word[8]; u8 mac_addr[6]; -- cgit v1.2.3-55-g7522 From 5c7ca1bbf2007c767a8027d8a4ee1de8f93eb48d Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:38 +0530 Subject: rsi: Update baseband RF programming frame Baseband RF programming frame configured to device is modified to use common descriptor structure. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 24 +++++++++++++----------- drivers/net/wireless/rsi/rsi_mgmt.h | 12 ++++++++++++ 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index a1a60f7dd42f..ead3573b027b 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -577,34 +577,36 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, static int rsi_program_bb_rf(struct rsi_common *common) { struct sk_buff *skb; - struct rsi_mac_frame *mgmt_frame; + struct rsi_bb_rf_prog *bb_rf_prog; + u16 frame_len = sizeof(struct rsi_bb_rf_prog); rsi_dbg(MGMT_TX_ZONE, "%s: Sending program BB/RF frame\n", __func__); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + memset(skb->data, 0, frame_len); + bb_rf_prog = (struct rsi_bb_rf_prog *)skb->data; - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(BBP_PROG_IN_TA); - mgmt_frame->desc_word[4] = cpu_to_le16(common->endpoint); + rsi_set_len_qno(&bb_rf_prog->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + bb_rf_prog->desc_dword0.frame_type = BBP_PROG_IN_TA; + bb_rf_prog->endpoint = common->endpoint; + bb_rf_prog->rf_power_mode = common->wlan_rf_power_mode; if (common->rf_reset) { - mgmt_frame->desc_word[7] = cpu_to_le16(RF_RESET_ENABLE); + bb_rf_prog->flags = cpu_to_le16(RF_RESET_ENABLE); rsi_dbg(MGMT_TX_ZONE, "%s: ===> RF RESET REQUEST SENT <===\n", __func__); common->rf_reset = 0; } common->bb_rf_prog_count = 1; - mgmt_frame->desc_word[7] |= cpu_to_le16(PUT_BBP_RESET | - BBP_REG_WRITE | (RSI_RF_TYPE << 4)); - skb_put(skb, FRAME_DESC_SZ); + bb_rf_prog->flags |= cpu_to_le16(PUT_BBP_RESET | BBP_REG_WRITE | + (RSI_RF_TYPE << 4)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 68863c814c82..d44c79f74882 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -294,6 +294,18 @@ struct rsi_aggr_params { u8 peer_id; } __packed; +struct rsi_bb_rf_prog { + struct rsi_cmd_desc_dword0 desc_dword0; + __le16 reserved1; + u8 rf_power_mode; + u8 reserved2; + u8 endpoint; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; + __le16 flags; +} __packed; + struct rsi_vap_caps { __le16 desc_word[8]; u8 mac_addr[6]; -- cgit v1.2.3-55-g7522 From f04854fa3c38286e0c1ba504a32fd5dc04d5cb69 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:39 +0530 Subject: rsi: update set_channel command frame TX command frame set_channel is modified to use common descriptor structure. Also DFS channel indication to firmware is added in the descriptor for dfs channels configuration. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 47 +++++++++++++++------------------ drivers/net/wireless/rsi/rsi_mgmt.h | 13 +++++++++ 2 files changed, 35 insertions(+), 25 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index ead3573b027b..4bcb84087874 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -965,12 +965,13 @@ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel) { struct sk_buff *skb = NULL; - struct rsi_mac_frame *mgmt_frame; + struct rsi_chan_config *chan_cfg; + u16 frame_len = sizeof(struct rsi_chan_config); rsi_dbg(MGMT_TX_ZONE, "%s: Sending scan req frame\n", __func__); - skb = dev_alloc_skb(FRAME_DESC_SZ); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); @@ -981,37 +982,33 @@ int rsi_set_channel(struct rsi_common *common, dev_kfree_skb(skb); return 0; } - memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; - - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); - mgmt_frame->desc_word[4] = cpu_to_le16(channel->hw_value); - - mgmt_frame->desc_word[4] |= - cpu_to_le16(((char)(channel->max_antenna_gain)) << 8); - mgmt_frame->desc_word[5] = - cpu_to_le16((char)(channel->max_antenna_gain)); - - mgmt_frame->desc_word[7] = cpu_to_le16(PUT_BBP_RESET | - BBP_REG_WRITE | - (RSI_RF_TYPE << 4)); - - if (!(channel->flags & IEEE80211_CHAN_NO_IR) && - !(channel->flags & IEEE80211_CHAN_RADAR)) { + memset(skb->data, 0, frame_len); + chan_cfg = (struct rsi_chan_config *)skb->data; + + rsi_set_len_qno(&chan_cfg->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + chan_cfg->desc_dword0.frame_type = SCAN_REQUEST; + chan_cfg->channel_number = channel->hw_value; + chan_cfg->antenna_gain_offset_2g = channel->max_antenna_gain; + chan_cfg->antenna_gain_offset_5g = channel->max_antenna_gain; + chan_cfg->region_rftype = (RSI_RF_TYPE & 0xf) << 4; + + if ((channel->flags & IEEE80211_CHAN_NO_IR) || + (channel->flags & IEEE80211_CHAN_RADAR)) { + chan_cfg->antenna_gain_offset_2g |= RSI_CHAN_RADAR; + } else { if (common->tx_power < channel->max_power) - mgmt_frame->desc_word[6] = cpu_to_le16(common->tx_power); + chan_cfg->tx_power = cpu_to_le16(common->tx_power); else - mgmt_frame->desc_word[6] = cpu_to_le16(channel->max_power); + chan_cfg->tx_power = cpu_to_le16(channel->max_power); } - mgmt_frame->desc_word[7] = cpu_to_le16(common->priv->dfs_region); + chan_cfg->region_rftype |= (common->priv->dfs_region & 0xf); if (common->channel_width == BW_40MHZ) - mgmt_frame->desc_word[5] |= cpu_to_le16(0x1 << 8); + chan_cfg->channel_width = 0x1; common->channel = channel->hw_value; - skb_put(skb, FRAME_DESC_SZ); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index d44c79f74882..2f49e55650b0 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -166,6 +166,7 @@ #define DISALLOW_BROADCAST_DATA BIT(6) #define RSI_MPDU_DENSITY 0x8 +#define RSI_CHAN_RADAR BIT(7) enum opmode { STA_OPMODE = 1, @@ -306,6 +307,18 @@ struct rsi_bb_rf_prog { __le16 flags; } __packed; +struct rsi_chan_config { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + u8 channel_number; + u8 antenna_gain_offset_2g; + u8 antenna_gain_offset_5g; + u8 channel_width; + __le16 tx_power; + u8 region_rftype; + u8 flags; +} __packed; + struct rsi_vap_caps { __le16 desc_word[8]; u8 mac_addr[6]; -- cgit v1.2.3-55-g7522 From de1d1813a8ca31e8c1229fe0b42eba0574f43e89 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:40 +0530 Subject: rsi: update vap capabilities command frame VAP capablities frame configured to device is modified to use common descriptor structure. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 56 ++++++++++++++++++--------------- drivers/net/wireless/rsi/rsi_main.h | 3 ++ drivers/net/wireless/rsi/rsi_mgmt.h | 19 +++++++++-- 3 files changed, 49 insertions(+), 29 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 4bcb84087874..3ce1f71cf537 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -230,6 +230,8 @@ static void rsi_set_default_parameters(struct rsi_common *common) common->rf_power_val = 0; /* Default 1.9V */ common->wlan_rf_power_mode = 0; common->obm_ant_sel_val = 2; + common->beacon_interval = RSI_BEACON_INTERVAL; + common->dtim_cnt = RSI_DTIM_COUNT; } /** @@ -627,59 +629,61 @@ int rsi_set_vap_capabilities(struct rsi_common *common, struct rsi_hw *adapter = common->priv; struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; + u16 frame_len = sizeof(struct rsi_vap_caps); u16 vap_id = 0; rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_vap_caps)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_vap_caps)); + memset(skb->data, 0, frame_len); vap_caps = (struct rsi_vap_caps *)skb->data; - vap_caps->desc_word[0] = cpu_to_le16((sizeof(struct rsi_vap_caps) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - vap_caps->desc_word[1] = cpu_to_le16(VAP_CAPABILITIES); - vap_caps->desc_word[2] = cpu_to_le16(vap_status << 8); - vap_caps->desc_word[4] = cpu_to_le16(mode | - (common->channel_width << 8)); - vap_caps->desc_word[7] = cpu_to_le16((vap_id << 8) | - (common->mac_id << 4) | - common->radio_id); + rsi_set_len_qno(&vap_caps->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + vap_caps->desc_dword0.frame_type = VAP_CAPABILITIES; + vap_caps->status = vap_status; + vap_caps->vif_type = mode; + vap_caps->channel_bw = common->channel_width; + vap_caps->vap_id = vap_id; + vap_caps->radioid_macid = ((common->mac_id & 0xf) << 4) | + (common->radio_id & 0xf); memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN); vap_caps->keep_alive_period = cpu_to_le16(90); vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD); vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold); - vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); if (common->band == NL80211_BAND_5GHZ) { - vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6); - if (conf_is_ht40(&common->priv->hw->conf)) { - vap_caps->default_ctrl_rate |= - cpu_to_le32(FULL40M_ENABLE << 16); - } + vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_6); + vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6); } else { - vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_1); + vap_caps->default_ctrl_rate = cpu_to_le16(RSI_RATE_1); + vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_1); + } + if (conf_is_ht40(conf)) { if (conf_is_ht40_minus(conf)) - vap_caps->default_ctrl_rate |= - cpu_to_le32(UPPER_20_ENABLE << 16); + vap_caps->ctrl_rate_flags = + cpu_to_le16(UPPER_20_ENABLE); else if (conf_is_ht40_plus(conf)) - vap_caps->default_ctrl_rate |= - cpu_to_le32(LOWER_20_ENABLE << 16); + vap_caps->ctrl_rate_flags = + cpu_to_le16(LOWER_20_ENABLE); + else + vap_caps->ctrl_rate_flags = + cpu_to_le16(FULL40M_ENABLE); } vap_caps->default_data_rate = 0; - vap_caps->beacon_interval = cpu_to_le16(200); - vap_caps->dtim_period = cpu_to_le16(4); + vap_caps->beacon_interval = cpu_to_le16(common->beacon_interval); + vap_caps->dtim_period = cpu_to_le16(common->dtim_cnt); - skb_put(skb, sizeof(*vap_caps)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index a567986c5b0b..485b97ab5779 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -239,6 +239,9 @@ struct rsi_common { u8 obm_ant_sel_val; int tx_power; u8 ant_in_use; + + u16 beacon_interval; + u8 dtim_cnt; }; enum host_intf { diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 2f49e55650b0..fcde44e966f1 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -167,6 +167,8 @@ #define RSI_MPDU_DENSITY 0x8 #define RSI_CHAN_RADAR BIT(7) +#define RSI_BEACON_INTERVAL 200 +#define RSI_DTIM_COUNT 2 enum opmode { STA_OPMODE = 1, @@ -320,19 +322,30 @@ struct rsi_chan_config { } __packed; struct rsi_vap_caps { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + u8 reserved1; + u8 status; + __le16 reserved2; + u8 vif_type; + u8 channel_bw; + __le16 antenna_info; + u8 radioid_macid; + u8 vap_id; + __le16 reserved3; u8 mac_addr[6]; __le16 keep_alive_period; u8 bssid[6]; - __le16 reserved; + __le16 reserved4; __le32 flags; __le16 frag_threshold; __le16 rts_threshold; __le32 default_mgmt_rate; - __le32 default_ctrl_rate; + __le16 default_ctrl_rate; + __le16 ctrl_rate_flags; __le32 default_data_rate; __le16 beacon_interval; __le16 dtim_period; + __le16 beacon_miss_threshold; } __packed; struct rsi_set_key { -- cgit v1.2.3-55-g7522 From a84faab0fdb6a2e99c90fff6e3344bf5c50bd7cd Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:41 +0530 Subject: rsi: update set_key command frame TX command frame set_key is modified to use common descriptor structure. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 26 +++++++++++++------------- drivers/net/wireless/rsi/rsi_mgmt.h | 16 +++++++++++++++- 2 files changed, 28 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 3ce1f71cf537..9395d6deac15 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -709,39 +709,39 @@ int rsi_hal_load_key(struct rsi_common *common, struct sk_buff *skb = NULL; struct rsi_set_key *set_key; u16 key_descriptor = 0; + u16 frame_len = sizeof(struct rsi_set_key); rsi_dbg(MGMT_TX_ZONE, "%s: Sending load key frame\n", __func__); - skb = dev_alloc_skb(sizeof(struct rsi_set_key)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_set_key)); + memset(skb->data, 0, frame_len); set_key = (struct rsi_set_key *)skb->data; if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { key_len += 1; - key_descriptor |= BIT(2); + key_descriptor |= RSI_WEP_KEY; if (key_len >= 13) - key_descriptor |= BIT(3); + key_descriptor |= RSI_WEP_KEY_104; } else if (cipher != KEY_TYPE_CLEAR) { - key_descriptor |= BIT(4); + key_descriptor |= RSI_CIPHER_WPA; if (key_type == RSI_PAIRWISE_KEY) key_id = 0; if (cipher == WLAN_CIPHER_SUITE_TKIP) - key_descriptor |= BIT(5); + key_descriptor |= RSI_CIPHER_TKIP; } - key_descriptor |= (key_type | BIT(13) | (key_id << 14)); + key_descriptor |= (key_type | RSI_PROTECT_DATA_FRAMES | (key_id << 14)); - set_key->desc_word[0] = cpu_to_le16((sizeof(struct rsi_set_key) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); - set_key->desc_word[1] = cpu_to_le16(SET_KEY_REQ); - set_key->desc_word[4] = cpu_to_le16(key_descriptor); + rsi_set_len_qno(&set_key->desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + set_key->desc_dword0.frame_type = SET_KEY_REQ; + set_key->key_desc = cpu_to_le16(key_descriptor); if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { @@ -755,7 +755,7 @@ int rsi_hal_load_key(struct rsi_common *common, memcpy(set_key->tx_mic_key, &data[16], 8); memcpy(set_key->rx_mic_key, &data[24], 8); - skb_put(skb, sizeof(struct rsi_set_key)); + skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); } diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index fcde44e966f1..5df64d3c67dc 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -348,8 +348,22 @@ struct rsi_vap_caps { __le16 beacon_miss_threshold; } __packed; +/* Key descriptor flags */ +#define RSI_KEY_TYPE_BROADCAST BIT(1) +#define RSI_WEP_KEY BIT(2) +#define RSI_WEP_KEY_104 BIT(3) +#define RSI_CIPHER_WPA BIT(4) +#define RSI_CIPHER_TKIP BIT(5) +#define RSI_PROTECT_DATA_FRAMES BIT(13) +#define RSI_KEY_ID_MASK 0xC0 +#define RSI_KEY_ID_OFFSET 14 struct rsi_set_key { - __le16 desc_word[8]; + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + __le16 key_desc; + __le32 bpn; + u8 sta_id; + u8 vap_id; u8 key[4][32]; u8 tx_mic_key[8]; u8 rx_mic_key[8]; -- cgit v1.2.3-55-g7522 From 2a58900bf59db2ea2212afec8487a143063075ef Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:42 +0530 Subject: rsi: set_key enhancements Broadcast bit to be set for broadcast and multicast packets. For remove_key, tx and rx mic need not be filled. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 9395d6deac15..3bffe310f8d9 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -723,38 +723,40 @@ int rsi_hal_load_key(struct rsi_common *common, memset(skb->data, 0, frame_len); set_key = (struct rsi_set_key *)skb->data; + if (key_type == RSI_GROUP_KEY) + key_descriptor = RSI_KEY_TYPE_BROADCAST; if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { - key_len += 1; + key_id = 0; key_descriptor |= RSI_WEP_KEY; if (key_len >= 13) key_descriptor |= RSI_WEP_KEY_104; } else if (cipher != KEY_TYPE_CLEAR) { key_descriptor |= RSI_CIPHER_WPA; - if (key_type == RSI_PAIRWISE_KEY) - key_id = 0; if (cipher == WLAN_CIPHER_SUITE_TKIP) key_descriptor |= RSI_CIPHER_TKIP; } - key_descriptor |= (key_type | RSI_PROTECT_DATA_FRAMES | (key_id << 14)); + key_descriptor |= RSI_PROTECT_DATA_FRAMES; + key_descriptor |= ((key_id << RSI_KEY_ID_OFFSET) & RSI_KEY_ID_MASK); rsi_set_len_qno(&set_key->desc_dword0.len_qno, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); set_key->desc_dword0.frame_type = SET_KEY_REQ; set_key->key_desc = cpu_to_le16(key_descriptor); - if ((cipher == WLAN_CIPHER_SUITE_WEP40) || - (cipher == WLAN_CIPHER_SUITE_WEP104)) { - memcpy(&set_key->key[key_id][1], - data, - key_len * 2); + if (data) { + if ((cipher == WLAN_CIPHER_SUITE_WEP40) || + (cipher == WLAN_CIPHER_SUITE_WEP104)) { + memcpy(&set_key->key[key_id][1], data, key_len * 2); + } else { + memcpy(&set_key->key[0][0], data, key_len); + } + memcpy(set_key->tx_mic_key, &data[16], 8); + memcpy(set_key->rx_mic_key, &data[24], 8); } else { - memcpy(&set_key->key[0][0], data, key_len); + memset(&set_key[FRAME_DESC_SZ], 0, frame_len - FRAME_DESC_SZ); } - memcpy(set_key->tx_mic_key, &data[16], 8); - memcpy(set_key->rx_mic_key, &data[24], 8); - skb_put(skb, frame_len); return rsi_send_internal_mgmt_frame(common, skb); -- cgit v1.2.3-55-g7522 From b3115e8a426733988cfe32d8c8c418c84c88e198 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:43 +0530 Subject: rsi: update autorate request command frame When Short Gaurd Interval is enabled bit 9 is set in rate. Otherwise it should not be set. Added missing 'else' case in this patch. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 3bffe310f8d9..77b675701b87 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1199,6 +1199,9 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) conf_is_ht40(&common->priv->hw->conf)) auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk] | BIT(9)); + else + auto_rate->supported_rates[ii++] = + cpu_to_le16(rsi_mcsrates[kk]); auto_rate->supported_rates[ii] = cpu_to_le16(rsi_mcsrates[kk--]); } -- cgit v1.2.3-55-g7522 From bcb283d27f653597180fe125aef38f2a06e46bbf Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:44 +0530 Subject: rsi: block/unblock data queues as per connection status Data queues should be unblocked after station add notify frame is sent and should be blocked after station delete notify is sent. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 77b675701b87..64bbd0a6f8de 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1250,6 +1250,7 @@ void rsi_inform_bss_status(struct rsi_common *common, u16 aid) { if (status) { + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, RSI_IFTYPE_STATION, STA_CONNECTED, @@ -1258,13 +1259,17 @@ void rsi_inform_bss_status(struct rsi_common *common, aid); if (common->min_rate == 0xffff) rsi_send_auto_rate_request(common); + if (!rsi_send_block_unblock_frame(common, false)) + common->hw_data_qs_blocked = false; } else { + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, RSI_IFTYPE_STATION, STA_DISCONNECTED, bssid, qos_enable, aid); + rsi_send_block_unblock_frame(common, true); } } -- cgit v1.2.3-55-g7522 From 79e590d924de3f73f7469b3927c8aa7d086e4c82 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:45 +0530 Subject: rsi: update tx command frame block/unblock data TX command frame block/unblock data is modified to use common descriptor structure. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 16 +++++++++------- drivers/net/wireless/rsi/rsi_mgmt.h | 13 +++++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 64bbd0a6f8de..1fba7bba3a10 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1327,7 +1327,7 @@ static int rsi_eeprom_read(struct rsi_common *common) */ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) { - struct rsi_mac_frame *mgmt_frame; + struct rsi_block_unblock_data *mgmt_frame; struct sk_buff *skb; rsi_dbg(MGMT_TX_ZONE, "%s: Sending block/unblock frame\n", __func__); @@ -1340,23 +1340,25 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event) } memset(skb->data, 0, FRAME_DESC_SZ); - mgmt_frame = (struct rsi_mac_frame *)skb->data; + mgmt_frame = (struct rsi_block_unblock_data *)skb->data; - mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); - mgmt_frame->desc_word[1] = cpu_to_le16(BLOCK_HW_QUEUE); + rsi_set_len_qno(&mgmt_frame->desc_dword0.len_qno, 0, RSI_WIFI_MGMT_Q); + mgmt_frame->desc_dword0.frame_type = BLOCK_HW_QUEUE; + mgmt_frame->host_quiet_info = QUIET_INFO_VALID; if (block_event) { rsi_dbg(INFO_ZONE, "blocking the data qs\n"); - mgmt_frame->desc_word[4] = cpu_to_le16(0xf); + mgmt_frame->block_q_bitmap = cpu_to_le16(0xf); + mgmt_frame->block_q_bitmap |= cpu_to_le16(0xf << 4); } else { rsi_dbg(INFO_ZONE, "unblocking the data qs\n"); - mgmt_frame->desc_word[5] = cpu_to_le16(0xf); + mgmt_frame->unblock_q_bitmap = cpu_to_le16(0xf); + mgmt_frame->unblock_q_bitmap |= cpu_to_le16(0xf << 4); } skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); - } /** diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 5df64d3c67dc..cb0b17ec48d0 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -381,6 +381,19 @@ struct rsi_auto_rate { __le16 supported_rates[40]; } __packed; +#define QUIET_INFO_VALID BIT(0) +#define QUIET_ENABLE BIT(1) +struct rsi_block_unblock_data { + struct rsi_cmd_desc_dword0 desc_dword0; + u8 xtend_desc_size; + u8 host_quiet_info; + __le16 reserved; + __le16 block_q_bitmap; + __le16 unblock_q_bitmap; + __le16 token; + __le16 flush_q_bitmap; +} __packed; + struct qos_params { __le16 cont_win_min_q; __le16 cont_win_max_q; -- cgit v1.2.3-55-g7522 From 5059afacee8815ebf387f79c55ff9e60a5ba8c6f Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:46 +0530 Subject: rsi: Remove internal header from Tx status skb Device specific descriptor for each TX packet is prepared on top of skb data address. This descriptor should be pulled out before indicating the TX status to mac80211. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 021e5ac5f107..36303ae820af 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -229,12 +229,20 @@ void rsi_indicate_tx_status(struct rsi_hw *adapter, int status) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct skb_info *tx_params; - memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + if (!adapter->hw) { + rsi_dbg(ERR_ZONE, "##### No MAC #####\n"); + return; + } if (!status) info->flags |= IEEE80211_TX_STAT_ACK; + tx_params = (struct skb_info *)info->driver_data; + skb_pull(skb, tx_params->internal_hdr_size); + memset(info->driver_data, 0, IEEE80211_TX_INFO_DRIVER_DATA_SIZE); + ieee80211_tx_status_irqsafe(adapter->hw, skb); } -- cgit v1.2.3-55-g7522 From 86583258499ce5c3ee0e49f897c2d1c013ebdbd9 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:47 +0530 Subject: rsi: Send rx filter frame to device when interface is down When interface is down it is better to stop all RX packets to host. 0xffff will block all packets to host. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 36303ae820af..d094b0ab05ba 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -301,6 +301,10 @@ static void rsi_mac80211_stop(struct ieee80211_hw *hw) mutex_lock(&common->mutex); common->iface_down = true; + + /* Block all rx frames */ + rsi_send_rx_filter_frame(common, 0xffff); + mutex_unlock(&common->mutex); } -- cgit v1.2.3-55-g7522 From d7203a83f065dc78d907c2a2f5d317972ec28d52 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Mon, 10 Jul 2017 18:10:48 +0530 Subject: rsi: regulatory enhancements Below regulatory changes are included this patch * Country code is saved as it will be used in bgscan. * Region codes are mapped according to RSI region codes. * Radar flag settings are moved under the check if 5GHZ band is enabled. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 49 ++++++++++++++++++++++------- drivers/net/wireless/rsi/rsi_main.h | 8 +++++ 2 files changed, 45 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index d094b0ab05ba..c91d6efa7c84 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1163,6 +1163,21 @@ static int rsi_mac80211_get_antenna(struct ieee80211_hw *hw, return 0; } +static int rsi_map_region_code(enum nl80211_dfs_regions region_code) +{ + switch (region_code) { + case NL80211_DFS_FCC: + return RSI_REGION_FCC; + case NL80211_DFS_ETSI: + return RSI_REGION_ETSI; + case NL80211_DFS_JP: + return RSI_REGION_TELEC; + case NL80211_DFS_UNSET: + return RSI_REGION_WORLD; + } + return RSI_REGION_WORLD; +} + static void rsi_reg_notify(struct wiphy *wiphy, struct regulatory_request *request) { @@ -1170,23 +1185,33 @@ static void rsi_reg_notify(struct wiphy *wiphy, struct ieee80211_channel *ch; struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rsi_hw * adapter = hw->priv; + struct rsi_common *common = adapter->priv; int i; - - sband = wiphy->bands[NL80211_BAND_5GHZ]; - for (i = 0; i < sband->n_channels; i++) { - ch = &sband->channels[i]; - if (ch->flags & IEEE80211_CHAN_DISABLED) - continue; + mutex_lock(&common->mutex); + + rsi_dbg(INFO_ZONE, "country = %s dfs_region = %d\n", + request->alpha2, request->dfs_region); + + if (common->num_supp_bands > 1) { + sband = wiphy->bands[NL80211_BAND_5GHZ]; - if (ch->flags & IEEE80211_CHAN_RADAR) - ch->flags |= IEEE80211_CHAN_NO_IR; + for (i = 0; i < sband->n_channels; i++) { + ch = &sband->channels[i]; + if (ch->flags & IEEE80211_CHAN_DISABLED) + continue; + + if (ch->flags & IEEE80211_CHAN_RADAR) + ch->flags |= IEEE80211_CHAN_NO_IR; + } } + adapter->dfs_region = rsi_map_region_code(request->dfs_region); + rsi_dbg(INFO_ZONE, "RSI region code = %d\n", adapter->dfs_region); - rsi_dbg(INFO_ZONE, - "country = %s dfs_region = %d\n", - request->alpha2, request->dfs_region); - adapter->dfs_region = request->dfs_region; + adapter->country[0] = request->alpha2[0]; + adapter->country[1] = request->alpha2[1]; + + mutex_unlock(&common->mutex); } static struct ieee80211_ops mac80211_ops = { diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 485b97ab5779..6a8e8e7ed1fb 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -170,6 +170,13 @@ struct xtended_desc { u16 reserved; }; +enum rsi_dfs_regions { + RSI_REGION_FCC = 0, + RSI_REGION_ETSI, + RSI_REGION_TELEC, + RSI_REGION_WORLD +}; + struct rsi_hw; struct rsi_common { @@ -287,6 +294,7 @@ struct rsi_hw { struct eepromrw_info eeprom; u32 interrupt_status; u8 dfs_region; + char country[2]; void *rsi_dev; struct rsi_host_intf_ops *host_intf_ops; int (*check_hw_queue_status)(struct rsi_hw *adapter, u8 q_num); -- cgit v1.2.3-55-g7522 From 523b724a769b69963ef46efa14b8b730b40419d5 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 11 Jul 2017 19:57:51 +0530 Subject: rsi: use macro for allocating USB buffer 4 bytes is fixed size for reading or writing USB register. We will use a macro instead of hardcoding this. Fixes: b97e9b94ad75c ("rsi: Add new host interface operations") Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 4 ++-- drivers/net/wireless/rsi/rsi_usb.h | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 3febf24d619b..99a520afc7c5 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -162,7 +162,7 @@ static int rsi_usb_reg_read(struct usb_device *usbdev, u8 *buf; int status = -ENOMEM; - buf = kmalloc(0x04, GFP_KERNEL); + buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!buf) return status; @@ -204,7 +204,7 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, u8 *usb_reg_buf; int status = -ENOMEM; - usb_reg_buf = kmalloc(0x04, GFP_KERNEL); + usb_reg_buf = kmalloc(RSI_USB_CTRL_BUF_SIZE, GFP_KERNEL); if (!usb_reg_buf) return status; diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h index 3babf81f5a39..891daea2d932 100644 --- a/drivers/net/wireless/rsi/rsi_usb.h +++ b/drivers/net/wireless/rsi/rsi_usb.h @@ -37,6 +37,7 @@ #define BT_EP 2 #define RSI_USB_BUF_SIZE 4096 +#define RSI_USB_CTRL_BUF_SIZE 0x04 struct rsi_91x_usbdev { struct rsi_thread rx_thread; -- cgit v1.2.3-55-g7522 From 59f73e2ae185a6ab5c40f63657a83192bf054fb4 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 11 Jul 2017 19:57:52 +0530 Subject: rsi: check length before USB read/write register These checks are required. Otherwise we may end up getting memory corruption if invalid length is passed. Fixes: b97e9b94ad75c ("rsi: Add new host interface operations") Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 99a520afc7c5..3d33ce9ca2ba 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -166,6 +166,9 @@ static int rsi_usb_reg_read(struct usb_device *usbdev, if (!buf) return status; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + status = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), USB_VENDOR_REGISTER_READ, @@ -208,6 +211,9 @@ static int rsi_usb_reg_write(struct usb_device *usbdev, if (!usb_reg_buf) return status; + if (len > RSI_USB_CTRL_BUF_SIZE) + return -EINVAL; + usb_reg_buf[0] = (value & 0x00ff); usb_reg_buf[1] = (value & 0xff00) >> 8; usb_reg_buf[2] = 0x0; -- cgit v1.2.3-55-g7522 From e6249e15fea2f915ddfcbee8efd9120cb64ed93d Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 11 Jul 2017 19:57:53 +0530 Subject: rsi: fix static checker warning u32 pointer is changed to u16 and filled the value. Problem is solved by using local temporary variable. Below static checker warning was reported. drivers/net/wireless/rsi/rsi_91x_usb.c:400 rsi_usb_master_reg_read() warn: passing casted pointer 'value' to 'rsi_usb_reg_read()' 32 vs 16. Fixes: b97e9b94ad75c ("rsi: Add new host interface operations") Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_usb.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c index 3d33ce9ca2ba..9097f7e6229e 100644 --- a/drivers/net/wireless/rsi/rsi_91x_usb.c +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c @@ -404,8 +404,15 @@ static int rsi_usb_master_reg_read(struct rsi_hw *adapter, u32 reg, { struct usb_device *usbdev = ((struct rsi_91x_usbdev *)adapter->rsi_dev)->usbdev; + u16 temp; + int ret; - return rsi_usb_reg_read(usbdev, reg, (u16 *)value, len); + ret = rsi_usb_reg_read(usbdev, reg, &temp, len); + if (ret < 0) + return ret; + *value = temp; + + return 0; } static int rsi_usb_master_reg_write(struct rsi_hw *adapter, -- cgit v1.2.3-55-g7522 From b6658b66d8a6eaaf216382c19d016116931a0f63 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:17 -0700 Subject: mwifiex: reunite copy-and-pasted remove/reset code When PCIe FLR code was added, it explicitly copy-and-pasted much of mwifiex_remove_card() into mwifiex_shutdown_sw(). This is unnecessary, as almost all of the code should be reused. Let's reunite what we can for now. The only functional changes for now: * call netif_device_detach() in the remove() code path -- this wasn't done before, but it really should be a no-op, when the device is getting totally unregistered soon anyway * call the ->down_dev() driver callback only after we've finished all SW teardown -- this should have no significant effect, since the only user (pcie.c) does very minimal work there, and it doesn't matter that we reorder this Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 104 ++++++++-------------------- 1 file changed, 28 insertions(+), 76 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index f2600b827e81..8615099468da 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1352,26 +1352,12 @@ static void mwifiex_main_work_queue(struct work_struct *work) mwifiex_main_process(adapter); } -/* - * This function gets called during PCIe function level reset. Required - * code is extracted from mwifiex_remove_card() - */ -int -mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) +/* Common teardown code used for both device removal and reset */ +static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; int i; - if (!adapter) - goto exit_return; - - wait_for_completion(adapter->fw_done); - /* Caller should ensure we aren't suspending while this happens */ - reinit_completion(adapter->fw_done); - - priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); - mwifiex_deauthenticate(priv, NULL); - /* We can no longer handle interrupts once we start doing the teardown * below. */ @@ -1393,12 +1379,9 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) } mwifiex_dbg(adapter, CMD, "cmd: calling mwifiex_shutdown_drv...\n"); - mwifiex_shutdown_drv(adapter); - if (adapter->if_ops.down_dev) - adapter->if_ops.down_dev(adapter); - mwifiex_dbg(adapter, CMD, "cmd: mwifiex_shutdown_drv done\n"); + if (atomic_read(&adapter->rx_pending) || atomic_read(&adapter->tx_pending) || atomic_read(&adapter->cmd_pending)) { @@ -1421,9 +1404,30 @@ mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) rtnl_unlock(); } vfree(adapter->chan_stats); +} + +/* + * This function gets called during PCIe function level reset. + */ +int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter) +{ + struct mwifiex_private *priv; + + if (!adapter) + return 0; + + wait_for_completion(adapter->fw_done); + /* Caller should ensure we aren't suspending while this happens */ + reinit_completion(adapter->fw_done); + + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); + mwifiex_deauthenticate(priv, NULL); + + mwifiex_uninit_sw(adapter); + + if (adapter->if_ops.down_dev) + adapter->if_ops.down_dev(adapter); - mwifiex_dbg(adapter, INFO, "%s, successful\n", __func__); -exit_return: return 0; } EXPORT_SYMBOL_GPL(mwifiex_shutdown_sw); @@ -1676,61 +1680,10 @@ EXPORT_SYMBOL_GPL(mwifiex_add_card); */ int mwifiex_remove_card(struct mwifiex_adapter *adapter) { - struct mwifiex_private *priv = NULL; - int i; - if (!adapter) - goto exit_remove; - - /* We can no longer handle interrupts once we start doing the teardown - * below. */ - if (adapter->if_ops.disable_int) - adapter->if_ops.disable_int(adapter); - - adapter->surprise_removed = true; - - mwifiex_terminate_workqueue(adapter); - - /* Stop data */ - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - if (priv && priv->netdev) { - mwifiex_stop_net_dev_queue(priv->netdev, adapter); - if (netif_carrier_ok(priv->netdev)) - netif_carrier_off(priv->netdev); - } - } - - mwifiex_dbg(adapter, CMD, - "cmd: calling mwifiex_shutdown_drv...\n"); - - mwifiex_shutdown_drv(adapter); - mwifiex_dbg(adapter, CMD, - "cmd: mwifiex_shutdown_drv done\n"); - if (atomic_read(&adapter->rx_pending) || - atomic_read(&adapter->tx_pending) || - atomic_read(&adapter->cmd_pending)) { - mwifiex_dbg(adapter, ERROR, - "rx_pending=%d, tx_pending=%d,\t" - "cmd_pending=%d\n", - atomic_read(&adapter->rx_pending), - atomic_read(&adapter->tx_pending), - atomic_read(&adapter->cmd_pending)); - } - - for (i = 0; i < adapter->priv_num; i++) { - priv = adapter->priv[i]; - - if (!priv) - continue; + return 0; - rtnl_lock(); - if (priv->netdev && - priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) - mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); - rtnl_unlock(); - } - vfree(adapter->chan_stats); + mwifiex_uninit_sw(adapter); wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); @@ -1748,7 +1701,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) "info: free adapter\n"); mwifiex_free_adapter(adapter); -exit_remove: return 0; } EXPORT_SYMBOL_GPL(mwifiex_remove_card); -- cgit v1.2.3-55-g7522 From 4b1f5a0d2eeb988d3e77dce7210e3ad3136b0912 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:18 -0700 Subject: mwifiex: reset interrupt status across device reset When resetting the device, we might have queued up interrupts that didn't get a chance to finish processing. We really don't need to handle them at this point; we just want to make sure they don't cause us to try to process old commands from before the device was reset. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 8615099468da..275cf8dc4f2a 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1366,6 +1366,7 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) adapter->surprise_removed = true; mwifiex_terminate_workqueue(adapter); + adapter->int_status = 0; /* Stop data */ for (i = 0; i < adapter->priv_num; i++) { -- cgit v1.2.3-55-g7522 From 7dc4a6b5ca942a7196e17ecb3827b406cf79bc60 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:19 -0700 Subject: mwifiex: pcie: don't allow cmd buffer reuse after reset In rogue cases (due to other bugs) it's possible we try to process an old command response *after* resetting the device. This could trigger a double-free (or the SKB can get reallocated elsewhere...causing other memory corruptions) in mwifiex_pcie_process_cmd_complete(). For safety (and symmetry) let's always NULL out the command buffer as we free it up. We're already doing this for the command response buffer. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 21f2201405d1..f54053e6fb60 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -1043,12 +1043,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(card->cmdrsp_buf); + card->cmdrsp_buf = NULL; } if (card && card->cmd_buf) { mwifiex_unmap_pci_memory(adapter, card->cmd_buf, PCI_DMA_TODEVICE); dev_kfree_skb_any(card->cmd_buf); + card->cmd_buf = NULL; } return 0; } @@ -2934,7 +2936,6 @@ static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) mwifiex_pcie_delete_evtbd_ring(adapter); mwifiex_pcie_delete_rxbd_ring(adapter); mwifiex_pcie_delete_txbd_ring(adapter); - card->cmdrsp_buf = NULL; } /* -- cgit v1.2.3-55-g7522 From 643acea6297f9aa7241cfb17d5b5606186f54137 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:20 -0700 Subject: mwifiex: re-register wiphy across reset In general, it's helpful to use the same code for device removal as for device reset, as this tends to have fewer bugs. Let's move the wiphy unregistration code into the common reset and removal code. In particular, it's very hard to properly handle the reset sequence when something fails. Currently, if mwifiex_reinit_sw() fails, we've failed to unregister the associated wiphy, and so running something as simple as "iw phy" can trigger an OOPS, as the wiphy still has hooks back into freed mwifiex data structures. For example, KASAN complained: [... see reset fail for other reasons ...] [ 1184.821158] mwifiex_pcie 0000:01:00.0: info: dnld wifi firmware from 174948 bytes [ 1186.870914] mwifiex_pcie 0000:01:00.0: info: FW download over, size 608396 bytes [ 1187.685990] mwifiex_pcie 0000:01:00.0: WLAN FW is active [ 1187.692673] mwifiex_pcie 0000:01:00.0: cmd_wait_q terminated: -512 [ 1187.699075] mwifiex_pcie 0000:01:00.0: info: _mwifiex_fw_dpc: unregister device [ 1187.713476] mwifiex: Failed to bring up adapter: -5 [ 1187.718644] mwifiex_pcie 0000:01:00.0: reinit failed: -5 [... run `iw phy` ...] [ 1212.902419] ================================================================== [ 1212.909806] BUG: KASAN: use-after-free in mwifiex_cfg80211_get_antenna+0x54/0xfc [mwifiex] at addr ffffffc0ad1a8028 [ 1212.920246] Read of size 1 by task iw/3127 [...] [ 1212.934946] page dumped because: kasan: bad access detected [...] [ 1212.950665] Call trace: [ 1212.953148] [] dump_backtrace+0x0/0x190 [ 1212.958572] [] show_stack+0x20/0x28 [ 1212.963648] [] dump_stack+0xa4/0xcc [ 1212.968723] [] kasan_report+0x378/0x500 [ 1212.974140] [] __asan_load1+0x44/0x4c [ 1212.979462] [] mwifiex_cfg80211_get_antenna+0x54/0xfc [mwifiex] [ 1212.987131] [] nl80211_send_wiphy+0x75c/0x2de0 [cfg80211] [ 1212.994246] [] nl80211_dump_wiphy+0x32c/0x438 [cfg80211] [ 1213.001149] [] genl_lock_dumpit+0x48/0x64 [ 1213.006746] [] netlink_dump+0x178/0x398 [ 1213.012171] [] __netlink_dump_start+0x1bc/0x260 [...] This all goes away if we just tear down the wiphy on the way down, and set it back up if/when we bring the device back up. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 275cf8dc4f2a..9c8f7bcfef8b 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1405,6 +1405,10 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) rtnl_unlock(); } vfree(adapter->chan_stats); + + wiphy_unregister(adapter->wiphy); + wiphy_free(adapter->wiphy); + adapter->wiphy = NULL; } /* @@ -1686,9 +1690,6 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) mwifiex_uninit_sw(adapter); - wiphy_unregister(adapter->wiphy); - wiphy_free(adapter->wiphy); - if (adapter->irq_wakeup >= 0) device_init_wakeup(adapter->dev, false); -- cgit v1.2.3-55-g7522 From ce32d1d83702064e7746453622fc2d6bdcd8baa4 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:21 -0700 Subject: mwifiex: unregister wiphy before freeing resources It's possible for some control interfaces (e.g., scans, set freq) to be active after we've stopped our main work queue and the netif TX queues. These don't get completely shut out until we've unregistered the wdevs and wiphy. So let's only free command buffers and poison our lists after wiphy_unregister(). This resolves various use-after-free issues seen when resetting the device. Cc: Johannes Berg Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/init.c | 3 +++ drivers/net/wireless/marvell/mwifiex/main.c | 7 ++++++- drivers/net/wireless/marvell/mwifiex/main.h | 1 + 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 3ecb59f7405b..de96675e43d5 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -418,7 +418,10 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) mwifiex_cancel_all_pending_cmd(adapter); wake_up_interruptible(&adapter->cmd_wait_q.wait); wake_up_interruptible(&adapter->hs_activate_wait_q); +} +void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter) +{ /* Free lock variables */ mwifiex_free_lock_list(adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 9c8f7bcfef8b..77e491720664 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -653,6 +653,7 @@ err_dnld_fw: if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } init_failed = true; @@ -1404,11 +1405,13 @@ static void mwifiex_uninit_sw(struct mwifiex_adapter *adapter) mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); rtnl_unlock(); } - vfree(adapter->chan_stats); wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); adapter->wiphy = NULL; + + vfree(adapter->chan_stats); + mwifiex_free_cmd_buffers(adapter); } /* @@ -1515,6 +1518,7 @@ err_kmalloc: mwifiex_dbg(adapter, ERROR, "info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } complete_all(adapter->fw_done); @@ -1662,6 +1666,7 @@ err_registerdev: if (adapter->hw_status == MWIFIEX_HW_STATUS_READY) { pr_debug("info: %s: shutdown mwifiex\n", __func__); mwifiex_shutdown_drv(adapter); + mwifiex_free_cmd_buffers(adapter); } err_kmalloc: mwifiex_free_adapter(adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index f8cf3079ac7d..62ce4e81f695 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1078,6 +1078,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *, int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter); int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); +void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); -- cgit v1.2.3-55-g7522 From 6417dba33538a856cf1b7d605942c707422213c9 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:22 -0700 Subject: mwifiex: don't short-circuit netdev notifiers on interface deletion When we leave the delete interface function, there are still netdev hooks that might try to process the device. We're short-circuiting some of that by changing the interface type and clearing ieee80211_ptr. This means we skip NETDEV_UNREGISTER_FINAL in cfg80211. Fortunately, that is currently a no-op. We don't need most of the cleanup here anyway: * the connection state will get (un)set as part of the disconnect process (which cfg80211 already initiates for us) * the interface type doesn't actually need to be cleared at all (it'll trigger a WARN_ON() in cfg80211 if we do) * the iee80211_ptr isn't really "ours" to clear anyway So stop resetting those 3 things. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index fdfdf2371986..2be78170ec67 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3123,11 +3123,7 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) priv->dfs_chan_sw_workqueue = NULL; } /* Clear the priv in adapter */ - priv->netdev->ieee80211_ptr = NULL; priv->netdev = NULL; - priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; - - priv->media_connected = false; switch (priv->bss_mode) { case NL80211_IFTYPE_UNSPECIFIED: -- cgit v1.2.3-55-g7522 From c253a62da9b456e4cd1db49f65c8f605c4f399ea Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:23 -0700 Subject: mwifiex: fixup init_channel_scan_gap error case In reading through _mwifiex_fw_dpc(), I noticed that after we've registered our wiphy, we still have error paths that don't free it back up. Let's do that. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 77e491720664..0448dcc07139 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -588,7 +588,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) if (mwifiex_init_channel_scan_gap(adapter)) { mwifiex_dbg(adapter, ERROR, "could not init channel stats table\n"); - goto err_init_fw; + goto err_init_chan_scan; } if (driver_mode) { @@ -636,6 +636,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context) err_add_intf: vfree(adapter->chan_stats); +err_init_chan_scan: wiphy_unregister(adapter->wiphy); wiphy_free(adapter->wiphy); err_init_fw: -- cgit v1.2.3-55-g7522 From 9557d9f2e62b0c929510e4dda32013aa62453558 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:24 -0700 Subject: mwifiex: ensure "disable auto DS" struct is initialized The .idle_time field *should* be unused, but technically, we're allowing unitialized stack garbage to pass all the way through to the firmware host command. Let's zero it out instead. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 42997e05d90f..43ecd621d1ef 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -654,9 +654,9 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv, */ int mwifiex_disable_auto_ds(struct mwifiex_private *priv) { - struct mwifiex_ds_auto_ds auto_ds; - - auto_ds.auto_ds = DEEP_SLEEP_OFF; + struct mwifiex_ds_auto_ds auto_ds = { + .auto_ds = DEEP_SLEEP_OFF, + }; return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_PS_MODE_ENH, DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds, true); -- cgit v1.2.3-55-g7522 From 5e6588b9d4ab0f97ff7ea58195f4c6fd6380bd91 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:25 -0700 Subject: mwifiex: fix misnomers in mwifiex_free_lock_list() Despite the name (and meticulous comments), this function frees no memory and does not touch any locks. All it does is "delete" the list heads -- which just means they'll be dangling, and we'll need to re-init them if we use them again. It seems like this code would work OK as a sort of canary for using the list after we've torn everything down, so it's fine to keep the code; let's just get the name and comments to match what's actually happening. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/init.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index de96675e43d5..de974e8bb9c6 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -373,15 +373,13 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev, } /* - * This function releases the lock variables and frees the locks and - * associated locks. + * This function invalidates the list heads. */ -static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter) +static void mwifiex_invalidate_lists(struct mwifiex_adapter *adapter) { struct mwifiex_private *priv; s32 i, j; - /* Free lists */ list_del(&adapter->cmd_free_q); list_del(&adapter->cmd_pending_q); list_del(&adapter->scan_pending_q); @@ -422,8 +420,7 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter) void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter) { - /* Free lock variables */ - mwifiex_free_lock_list(adapter); + mwifiex_invalidate_lists(adapter); /* Free command buffer */ mwifiex_dbg(adapter, INFO, "info: free cmd buffer\n"); -- cgit v1.2.3-55-g7522 From f7d7e4b689ca7889cf7733549844eb913c9665c1 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:26 -0700 Subject: mwifiex: make mwifiex_free_cmd_buffer() return void It doesn't fail. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 6 ++---- drivers/net/wireless/marvell/mwifiex/main.h | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 8dad52886034..6ff8e84b01e0 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -427,7 +427,7 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter) * The function calls the completion callback for all the command * buffers that still have response buffers associated with them. */ -int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) +void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_array; u32 i; @@ -436,7 +436,7 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) if (!adapter->cmd_pool) { mwifiex_dbg(adapter, FATAL, "info: FREE_CMD_BUF: cmd_pool is null\n"); - return 0; + return; } cmd_array = adapter->cmd_pool; @@ -464,8 +464,6 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter) kfree(adapter->cmd_pool); adapter->cmd_pool = NULL; } - - return 0; } /* diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 62ce4e81f695..2bee5cdf1fc8 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1077,7 +1077,7 @@ int mwifiex_get_debug_info(struct mwifiex_private *, struct mwifiex_debug_info *); int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter); -int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); +void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); -- cgit v1.2.3-55-g7522 From fe8d730adaee7988c46e6e4204f4f49f9dba5cc2 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:27 -0700 Subject: mwifiex: utilize netif_tx_{wake,stop}_all_queues() We're open-coding these. Just use the helpers. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/init.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index de974e8bb9c6..e11919db7818 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -337,17 +337,9 @@ void mwifiex_wake_up_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; - unsigned int i; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); - - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); - } - + netif_tx_wake_all_queues(netdev); spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); } @@ -358,17 +350,9 @@ void mwifiex_stop_net_dev_queue(struct net_device *netdev, struct mwifiex_adapter *adapter) { unsigned long dev_queue_flags; - unsigned int i; spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags); - - for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(netdev, i); - - if (!netif_tx_queue_stopped(txq)) - netif_tx_stop_queue(txq); - } - + netif_tx_stop_all_queues(netdev); spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags); } -- cgit v1.2.3-55-g7522 From 8395fd9b194cecbd9748d1b9b6ea87e3f8a6067c Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:28 -0700 Subject: mwifiex: don't open-code ARRAY_SIZE() Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfp.c | 4 +--- drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 8 ++------ drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c | 5 ++--- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c index 6e2994308526..bfe84e55df77 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c @@ -180,11 +180,9 @@ static struct region_code_mapping region_code_mapping_t[] = { u8 *mwifiex_11d_code_2_region(u8 code) { u8 i; - u8 size = sizeof(region_code_mapping_t)/ - sizeof(struct region_code_mapping); /* Look for code in mapping table */ - for (i = 0; i < size; i++) + for (i = 0; i < ARRAY_SIZE(region_code_mapping_t); i++) if (region_code_mapping_t[i].code == code) return region_code_mapping_t[i].region; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 534d94a206a5..b71ad4de5e54 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -189,9 +189,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv, if (pbitmap_rates != NULL) { rate_scope->hr_dsss_rate_bitmap = cpu_to_le16(pbitmap_rates[0]); rate_scope->ofdm_rate_bitmap = cpu_to_le16(pbitmap_rates[1]); - for (i = 0; - i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16); - i++) + for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++) rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(pbitmap_rates[2 + i]); if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) { @@ -206,9 +204,7 @@ static int mwifiex_cmd_tx_rate_cfg(struct mwifiex_private *priv, cpu_to_le16(priv->bitmap_rates[0]); rate_scope->ofdm_rate_bitmap = cpu_to_le16(priv->bitmap_rates[1]); - for (i = 0; - i < sizeof(rate_scope->ht_mcs_rate_bitmap) / sizeof(u16); - i++) + for (i = 0; i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); i++) rate_scope->ht_mcs_rate_bitmap[i] = cpu_to_le16(priv->bitmap_rates[2 + i]); if (priv->adapter->fw_api_ver == MWIFIEX_FW_V15) { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 2945775e83c5..0fba5b10ef2d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -298,9 +298,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv, priv->bitmap_rates[1] = le16_to_cpu(rate_scope->ofdm_rate_bitmap); for (i = 0; - i < - sizeof(rate_scope->ht_mcs_rate_bitmap) / - sizeof(u16); i++) + i < ARRAY_SIZE(rate_scope->ht_mcs_rate_bitmap); + i++) priv->bitmap_rates[2 + i] = le16_to_cpu(rate_scope-> ht_mcs_rate_bitmap[i]); -- cgit v1.2.3-55-g7522 From 463df4719084c96e13af5ddefca11b4bcd563074 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:29 -0700 Subject: mwifiex: drop 'add_tail' param from mwifiex_insert_cmd_to_pending_q() It's always called with 'true' -- we only determine it 'false' locally within this function. So drop the parameter. Also, this should be 'bool' (since we use true/false), not 'u32'. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 5 +++-- drivers/net/wireless/marvell/mwifiex/main.h | 3 +-- drivers/net/wireless/marvell/mwifiex/scan.c | 5 ++--- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 6ff8e84b01e0..3f5e822673bf 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -664,7 +664,7 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, cmd_no == HostCmd_CMD_802_11_SCAN_EXT) { mwifiex_queue_scan_cmd(priv, cmd_node); } else { - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); if (cmd_node->wait_q_enabled) ret = mwifiex_wait_queue_complete(adapter, cmd_node); @@ -682,11 +682,12 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no, */ void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node, u32 add_tail) + struct cmd_ctrl_node *cmd_node) { struct host_cmd_ds_command *host_cmd = NULL; u16 command; unsigned long flags; + bool add_tail = true; host_cmd = (struct host_cmd_ds_command *) (cmd_node->cmd_skb->data); if (!host_cmd) { diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 2bee5cdf1fc8..909bd1ad3838 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1088,8 +1088,7 @@ void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_node); void mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter, - struct cmd_ctrl_node *cmd_node, - u32 addtail); + struct cmd_ctrl_node *cmd_node); int mwifiex_exec_next_cmd(struct mwifiex_adapter *adapter); int mwifiex_process_cmdresp(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 9900855746ac..d8e8b857ddfb 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1534,8 +1534,7 @@ int mwifiex_scan_networks(struct mwifiex_private *priv, list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, - true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); queue_work(adapter->workqueue, &adapter->main_work); /* Perform internal scan synchronously */ @@ -2033,7 +2032,7 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv) struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); - mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); + mwifiex_insert_cmd_to_pending_q(adapter, cmd_node); } return; -- cgit v1.2.3-55-g7522 From 605db27f7405eb410a2c1d332c9f35d2631ffa14 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:30 -0700 Subject: mwifiex: pcie: remove unnecessary masks After removing the interrupt loop in commit 5d5ddb5e0d9b ("mwifiex: pcie: don't loop/retry interrupt status checks"), we don't need to keep track of the cleared interrupts (actually, we didn't need to do that before, but we *really* don't need to now). Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index f54053e6fb60..13722961def2 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2473,28 +2473,24 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) } if (pcie_ireg & HOST_INTR_DNLD_DONE) { - pcie_ireg &= ~HOST_INTR_DNLD_DONE; mwifiex_dbg(adapter, INTR, "info: TX DNLD Done\n"); ret = mwifiex_pcie_send_data_complete(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_UPLD_RDY) { - pcie_ireg &= ~HOST_INTR_UPLD_RDY; mwifiex_dbg(adapter, INTR, "info: Rx DATA\n"); ret = mwifiex_pcie_process_recv_data(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_EVENT_RDY) { - pcie_ireg &= ~HOST_INTR_EVENT_RDY; mwifiex_dbg(adapter, INTR, "info: Rx EVENT\n"); ret = mwifiex_pcie_process_event_ready(adapter); if (ret) return ret; } if (pcie_ireg & HOST_INTR_CMD_DONE) { - pcie_ireg &= ~HOST_INTR_CMD_DONE; if (adapter->cmd_sent) { mwifiex_dbg(adapter, INTR, "info: CMD sent Interrupt\n"); -- cgit v1.2.3-55-g7522 From 87a602126aaf1ecd52c2b70754a5a4b948b27903 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:31 -0700 Subject: mwifiex: pcie: unify MSI-X / non-MSI-X interrupt process After removing the interrupt loop in commit 5d5ddb5e0d9b ("mwifiex: pcie: don't loop/retry interrupt status checks"), there is practically zero difference between mwifiex_process_pcie_int() (which handled legacy PCI interrupts and MSI interrupts) and mwifiex_process_msix_int() (which handled MSI-X interrupts). Let's add the one relevant line to mwifiex_process_pcie_int() and kill the copy-and-paste. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 68 ++--------------------------- 1 file changed, 3 insertions(+), 65 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 13722961def2..bffe46a29287 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2430,7 +2430,7 @@ exit: * In case of Rx packets received, the packets are uploaded from card to * host and processed accordingly. */ -static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) +static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) { int ret; u32 pcie_ireg = 0; @@ -2505,75 +2505,13 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INTR, "info: cmd_sent=%d data_sent=%d\n", adapter->cmd_sent, adapter->data_sent); - if (!card->msi_enable && adapter->ps_state != PS_STATE_SLEEP) + if (!card->msi_enable && !card->msix_enable && + adapter->ps_state != PS_STATE_SLEEP) mwifiex_pcie_enable_host_int(adapter); return 0; } -static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter) -{ - int ret; - u32 pcie_ireg; - unsigned long flags; - - spin_lock_irqsave(&adapter->int_lock, flags); - /* Clear out unused interrupts */ - pcie_ireg = adapter->int_status; - adapter->int_status = 0; - spin_unlock_irqrestore(&adapter->int_lock, flags); - - if (pcie_ireg & HOST_INTR_DNLD_DONE) { - mwifiex_dbg(adapter, INTR, - "info: TX DNLD Done\n"); - ret = mwifiex_pcie_send_data_complete(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_UPLD_RDY) { - mwifiex_dbg(adapter, INTR, - "info: Rx DATA\n"); - ret = mwifiex_pcie_process_recv_data(adapter); - if (ret) - return ret; - } - if (pcie_ireg & HOST_INTR_EVENT_RDY) { - mwifiex_dbg(adapter, INTR, - "info: Rx EVENT\n"); - ret = mwifiex_pcie_process_event_ready(adapter); - if (ret) - return ret; - } - - if (pcie_ireg & HOST_INTR_CMD_DONE) { - if (adapter->cmd_sent) { - mwifiex_dbg(adapter, INTR, - "info: CMD sent Interrupt\n"); - adapter->cmd_sent = false; - } - /* Handle command response */ - ret = mwifiex_pcie_process_cmd_complete(adapter); - if (ret) - return ret; - } - - mwifiex_dbg(adapter, INTR, - "info: cmd_sent=%d data_sent=%d\n", - adapter->cmd_sent, adapter->data_sent); - - return 0; -} - -static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) -{ - struct pcie_service_card *card = adapter->card; - - if (card->msix_enable) - return mwifiex_process_msix_int(adapter); - else - return mwifiex_process_pcie_int(adapter); -} - /* * This function downloads data from driver to card. * -- cgit v1.2.3-55-g7522 From 37680819c6e1f5f22c171cd76d1ac093528fae56 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:32 -0700 Subject: mwifiex: debugfs: allow card_reset() to cancel things The card_reset() implementation should be setting our state flags and cancelling commands for us (i.e., in mwifiex_shutdown_drv()), so let's not do it here. Also, this debugfs file is useful for testing and debugging the reset feature, so we shouldn't do extra preparatory steps here, as that might cause different reset behavior, which could either cause new bugs or paper over existing ones that this debug feature should otherwise help us catch. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/debugfs.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index f6f105a7d3ff..6f4239be609d 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -940,8 +940,6 @@ mwifiex_reset_write(struct file *file, if (adapter->if_ops.card_reset) { dev_info(adapter->dev, "Resetting per request\n"); - adapter->hw_status = MWIFIEX_HW_STATUS_RESET; - mwifiex_cancel_all_pending_cmd(adapter); adapter->if_ops.card_reset(adapter); } -- cgit v1.2.3-55-g7522 From 2f47150ab3efd338ccdf75bae9f91ec147d6d98b Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:33 -0700 Subject: mwifiex: pcie: disable device DMA before unmapping/freeing buffers In testing the mwifiex reset code path, I've noticed KASAN complaining about some "overwritten poison values" in our RX buffer descriptors. Because KASAN didn't notice this at the time of a CPU write, this seems to suggest that the device is writing to this memory. This makes a little sense, because when resetting, we don't necessarily expect the device to be responsive, so we don't have a chance to disable everything cleanly. We can at least take the precaution of disabling DMA for the device though, and in my testing that seems to clear up this particular issue. This patch reorders the removal path so that we disable the device *before* releasing our last PCIe buffers, and it clears/sets the bus master feature from the PCI device when resetting. Along the way, remove the insufficient (and confusing) error path in mwifiex_pcie_up_dev() (it doesn't unwind things well enough, and it doesn't propagate its errors upward anyway). Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index bffe46a29287..fc42697d7ec8 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2971,15 +2971,17 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) "Failed to write driver not-ready signature\n"); } - mwifiex_pcie_free_buffers(adapter); - if (pdev) { + pci_disable_device(pdev); + pci_iounmap(pdev, card->pci_mmap); pci_iounmap(pdev, card->pci_mmap1); pci_disable_device(pdev); pci_release_region(pdev, 2); pci_release_region(pdev, 0); } + + mwifiex_pcie_free_buffers(adapter); } static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) @@ -3155,7 +3157,6 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; - int ret; struct pci_dev *pdev = card->dev; /* tx_buf_size might be changed to 3584 by firmware during @@ -3163,11 +3164,9 @@ static void mwifiex_pcie_up_dev(struct mwifiex_adapter *adapter) */ adapter->tx_buf_size = card->pcie.tx_buf_size; - ret = mwifiex_pcie_alloc_buffers(adapter); - if (!ret) - return; + mwifiex_pcie_alloc_buffers(adapter); - pci_iounmap(pdev, card->pci_mmap1); + pci_set_master(pdev); } /* This function cleans up the PCI-E host memory space. */ @@ -3175,10 +3174,13 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + struct pci_dev *pdev = card->dev; if (mwifiex_write_reg(adapter, reg->drv_rdy, 0x00000000)) mwifiex_dbg(adapter, ERROR, "Failed to write driver not-ready signature\n"); + pci_clear_master(pdev); + adapter->seq_num = 0; mwifiex_pcie_free_buffers(adapter); -- cgit v1.2.3-55-g7522 From 43a0c9aea64d50d126bc2d7babecc8a2a2b21e59 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:34 -0700 Subject: mwifiex: pcie: remove unnecessary 'pdev' check 'card->dev' is initialized once and is never cleared. Drop the unnecessary "safety" check, as it simply obscures things, and we don't do this check everywhere (and therefore it's not really "safe"). Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index fc42697d7ec8..3da1eeb730eb 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2971,15 +2971,12 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) "Failed to write driver not-ready signature\n"); } - if (pdev) { - pci_disable_device(pdev); + pci_disable_device(pdev); - pci_iounmap(pdev, card->pci_mmap); - pci_iounmap(pdev, card->pci_mmap1); - pci_disable_device(pdev); - pci_release_region(pdev, 2); - pci_release_region(pdev, 0); - } + pci_iounmap(pdev, card->pci_mmap); + pci_iounmap(pdev, card->pci_mmap1); + pci_release_region(pdev, 2); + pci_release_region(pdev, 0); mwifiex_pcie_free_buffers(adapter); } -- cgit v1.2.3-55-g7522 From 2d98cfd17e928ec3c89ed7cffc9d340fb703389b Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:35 -0700 Subject: mwifiex: keep mwifiex_cancel_pending_ioctl() static It has some scary comments about "only being called" from the timeout handler, so let's help keep it that way. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cmdevt.c | 4 +++- drivers/net/wireless/marvell/mwifiex/main.h | 1 - 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index 3f5e822673bf..0edc5d621304 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -26,6 +26,8 @@ #include "11n.h" #include "11ac.h" +static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); + /* * This function initializes a command node. * @@ -1074,7 +1076,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter) * In case of scan commands, all pending commands in scan pending queue * are cancelled. */ -void +static void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter) { struct cmd_ctrl_node *cmd_node = NULL; diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 909bd1ad3838..537a0ad795ff 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1080,7 +1080,6 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter); void mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter); void mwifiex_free_cmd_buffers(struct mwifiex_adapter *adapter); void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter); -void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter); void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter); void mwifiex_cancel_scan(struct mwifiex_adapter *adapter); -- cgit v1.2.3-55-g7522 From 0bc03cfd824789b0efacf3cc204034db1f3e6f0a Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 24 Jul 2017 18:13:36 -0700 Subject: mwifiex: drop num CPU notice This print isn't very useful. It's also different between mwifiex_add_card() and mwifiex_reinit_sw(), and I'd like to consolidate them eventually. Signed-off-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 0448dcc07139..13fc7b6ed11d 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1619,10 +1619,8 @@ mwifiex_add_card(void *card, struct completion *fw_done, adapter->cmd_wait_q.status = 0; adapter->scan_wait_q_woken = false; - if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) { + if ((num_possible_cpus() > 1) || adapter->iface_type == MWIFIEX_USB) adapter->rx_work_enabled = true; - pr_notice("rx work enabled, cpus %d\n", num_possible_cpus()); - } adapter->workqueue = alloc_workqueue("MWIFIEX_WORK_QUEUE", -- cgit v1.2.3-55-g7522 From f46a5b0156b166fcfdf05bca02d13bf482d95bf1 Mon Sep 17 00:00:00 2001 From: Shawn Lin Date: Thu, 6 Jul 2017 15:50:33 +0800 Subject: mwifiex: fix compile warning of unused variable We got a compile warning shows below: drivers/net/wireless/marvell/mwifiex/sdio.c: In function 'mwifiex_sdio_remove': drivers/net/wireless/marvell/mwifiex/sdio.c:377:6: warning: variable 'ret' set but not used [-Wunused-but-set-variable] Per the code, it didn't check if mwifiex_sdio_read_fw_status finish successfully. We should at least check the return of mwifiex_sdio_read_fw_status, otherwise the following check of firmware_stat and adapter->mfg_mode is pointless as the device is probably dead. Signed-off-by: Shawn Lin Reviewed-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index f81a006668f3..fd5183c10c4e 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -390,7 +390,8 @@ mwifiex_sdio_remove(struct sdio_func *func) mwifiex_dbg(adapter, INFO, "info: SDIO func num=%d\n", func->num); ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); - if (firmware_stat == FIRMWARE_READY_SDIO && !adapter->mfg_mode) { + if (!ret && firmware_stat == FIRMWARE_READY_SDIO && + !adapter->mfg_mode) { mwifiex_deauthenticate_all(adapter); priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); -- cgit v1.2.3-55-g7522 From f101d9649c42daecaa9650b362be8068f8f8c933 Mon Sep 17 00:00:00 2001 From: Jeffy Chen Date: Thu, 6 Jul 2017 15:55:28 +0800 Subject: mwifiex: uninit wakeup info in the error handling We inited wakeup info at the beginning of mwifiex_add_card, so we need to uninit it in the error handling. It's much the same as what we did in: 36908c4 mwifiex: uninit wakeup info when removing device Signed-off-by: Jeffy Chen Reviewed-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 13fc7b6ed11d..386993aa2ac1 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -667,8 +667,11 @@ done: release_firmware(adapter->firmware); adapter->firmware = NULL; } - if (init_failed) + if (init_failed) { + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); + } /* Tell all current and future waiters we're finished */ complete_all(fw_done); @@ -1668,6 +1671,8 @@ err_registerdev: mwifiex_free_cmd_buffers(adapter); } err_kmalloc: + if (adapter->irq_wakeup >= 0) + device_init_wakeup(adapter->dev, false); mwifiex_free_adapter(adapter); err_init_sw: -- cgit v1.2.3-55-g7522 From fe0c94be77a0b69456d6a35c838ee23d68204ba2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 10 Jul 2017 10:21:15 +0300 Subject: mwifiex: usb: unlock on error in mwifiex_usb_tx_aggr_tmo() We need to unlock if mwifiex_usb_prepare_tx_aggr_skb() fails. Fixes: a2ca85ad721d ("mwifiex: usb: add timer to flush aggregation packets") Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/usb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index cb1753e43ef4..880ef1cb4088 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -1112,7 +1112,7 @@ static void mwifiex_usb_tx_aggr_tmo(unsigned long context) if (err) { mwifiex_dbg(adapter, ERROR, "prepare tx aggr skb failed, err=%d\n", err); - return; + goto unlock; } if (atomic_read(&port->tx_data_urb_pending) >= @@ -1133,6 +1133,7 @@ static void mwifiex_usb_tx_aggr_tmo(unsigned long context) done: if (err == -1) mwifiex_write_data_complete(adapter, skb_send, 0, -1); +unlock: spin_unlock_irqrestore(&port->tx_aggr_lock, flags); } -- cgit v1.2.3-55-g7522 From 9e467c52ae4c4dde3f3aa1427264578f7141eda5 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Fri, 21 Jul 2017 09:57:37 +0000 Subject: mwifiex: disable uapsd in tdls config Tdls uapsd support capability is default disabled during tdls setup, correspondingly it should also been disabled in tdls config. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Zhiyuan Yang Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index b71ad4de5e54..fb090144a6d8 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1751,7 +1751,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, struct mwifiex_ie_types_vhtcap *vht_capab; struct mwifiex_ie_types_aid *aid; struct mwifiex_ie_types_tdls_idle_timeout *timeout; - u8 *pos, qos_info; + u8 *pos; u16 config_len = 0; struct station_parameters *params = priv->sta_params; @@ -1785,12 +1785,11 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv, put_unaligned_le16(params->capability, pos); config_len += sizeof(params->capability); - qos_info = params->uapsd_queues | (params->max_sp << 5); - wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos + - config_len); + wmm_qos_info = (void *)(pos + config_len); wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA); - wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info)); - wmm_qos_info->qos_info = qos_info; + wmm_qos_info->header.len = + cpu_to_le16(sizeof(wmm_qos_info->qos_info)); + wmm_qos_info->qos_info = 0; config_len += sizeof(struct mwifiex_ie_types_qos_info); if (params->ht_capa) { -- cgit v1.2.3-55-g7522 From c55971726c40b5d861f3bb457478545173a4e136 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 24 Jul 2017 23:26:23 +0100 Subject: mwifiex: usb: fix spelling mistake: "aggreataon"-> "aggregation" Trivial fix to spelling mistake in aggr_ctrl module parameter message text. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 386993aa2ac1..d67d70002ea9 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -46,7 +46,7 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0"); bool aggr_ctrl; module_param(aggr_ctrl, bool, 0000); -MODULE_PARM_DESC(aggr_ctrl, "usb tx aggreataon enable:1, disable:0"); +MODULE_PARM_DESC(aggr_ctrl, "usb tx aggregation enable:1, disable:0"); /* * This function registers the device and performs all the necessary -- cgit v1.2.3-55-g7522 From 17830147c40a26b7c323fbab525e6dd63bd45d49 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 27 Jul 2017 23:06:22 +0100 Subject: mwifiex: fix spelling mistake: "Insuffient" -> "Insufficient" Trivial fix to spelling mistake in mwifiex_dbg debug message Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/tdls.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 39cd677d4159..e76af2866a19 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -130,7 +130,7 @@ mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv, if (skb_tailroom(skb) < rates_size + 4) { mwifiex_dbg(priv->adapter, ERROR, - "Insuffient space while adding rates\n"); + "Insufficient space while adding rates\n"); return -ENOMEM; } -- cgit v1.2.3-55-g7522 From 185ffc194800a914f793643adf0ce09592124ef6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 12 Jul 2017 11:47:02 -0500 Subject: ipw2100: don't return positive values to PCI probe on error Causes the PCI stack to complain, and then eventually call the PCI remove function, which ipw2100 is not expecting. It then tries to unregister an already-released netdev and other nasty things, leading to a panic. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1185518 Signed-off-by: Dan Williams Signed-off-by: Kalle Valo --- drivers/net/wireless/intel/ipw2x00/ipw2100.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index ccbe74589eec..77f3f92b3c85 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -1724,7 +1724,7 @@ static const struct libipw_geo ipw_geos[] = { static int ipw2100_up(struct ipw2100_priv *priv, int deferred) { unsigned long flags; - int rc = 0; + int err = 0; u32 lock; u32 ord_len = sizeof(lock); @@ -1757,33 +1757,33 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) if (priv->status & STATUS_POWERED || (priv->status & STATUS_RESET_PENDING)) { /* Power cycle the card ... */ - if (ipw2100_power_cycle_adapter(priv)) { + err = ipw2100_power_cycle_adapter(priv); + if (err) { printk(KERN_WARNING DRV_NAME ": %s: Could not cycle adapter.\n", priv->net_dev->name); - rc = 1; goto exit; } } else priv->status |= STATUS_POWERED; /* Load the firmware, start the clocks, etc. */ - if (ipw2100_start_adapter(priv)) { + err = ipw2100_start_adapter(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the firmware.\n", priv->net_dev->name); - rc = 1; goto exit; } ipw2100_initialize_ordinals(priv); /* Determine capabilities of this particular HW configuration */ - if (ipw2100_get_hw_features(priv)) { + err = ipw2100_get_hw_features(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to determine HW features.\n", priv->net_dev->name); - rc = 1; goto exit; } @@ -1792,11 +1792,11 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) priv->ieee->freq_band = LIBIPW_24GHZ_BAND; lock = LOCK_NONE; - if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { + err = ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to clear ordinal lock.\n", priv->net_dev->name); - rc = 1; goto exit; } @@ -1820,21 +1820,21 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) /* Send all of the commands that must be sent prior to * HOST_COMPLETE */ - if (ipw2100_adapter_setup(priv)) { + err = ipw2100_adapter_setup(priv); + if (err) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n", priv->net_dev->name); - rc = 1; goto exit; } if (!deferred) { /* Enable the adapter - sends HOST_COMPLETE */ - if (ipw2100_enable_adapter(priv)) { + err = ipw2100_enable_adapter(priv); + if (err) { printk(KERN_ERR DRV_NAME ": " "%s: failed in call to enable adapter.\n", priv->net_dev->name); ipw2100_hw_stop_adapter(priv); - rc = 1; goto exit; } @@ -1844,7 +1844,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred) } exit: - return rc; + return err; } static void ipw2100_down(struct ipw2100_priv *priv) -- cgit v1.2.3-55-g7522 From af643fe9bbe0d4835ca271063db8bbb7232f2200 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 13 Jul 2017 23:48:29 +0100 Subject: zd1211rw: fix spelling mistake 'hybernate' -> 'hibernate' Trivial fix to spelling mistake in PDEBUG debug message. Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c index a93f657a41c7..d4e512f50945 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c @@ -61,7 +61,7 @@ static void dump_regwrite(u32 rw) switch (reg) { case 0: - PDEBUG("reg0 CFG1 ref_sel %d hybernate %d rf_vco_reg_en %d" + PDEBUG("reg0 CFG1 ref_sel %d hibernate %d rf_vco_reg_en %d" " if_vco_reg_en %d if_vga_en %d", bits(rw, 14, 15), bit(rw, 3), bit(rw, 2), bit(rw, 1), bit(rw, 0)); -- cgit v1.2.3-55-g7522 From e66d70b789d1db8e152d79dd070459ae9e566acc Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:46:42 +0530 Subject: brcmfmac: constify pci_device_id pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index f878706613e6..e6e9b00b79d7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1951,7 +1951,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } -static struct pci_device_id brcmf_pcie_devid_table[] = { +static const struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), -- cgit v1.2.3-55-g7522 From fcc870d76a2cb4451fb5685dfebe1f37a6a17b93 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 27 Jul 2017 23:09:54 +0100 Subject: wl3501_cs: fix spelling mistake: "Insupported" -> "Unsupported" Trivial fix to spelling mistake in printk message Signed-off-by: Colin Ian King Signed-off-by: Kalle Valo --- drivers/net/wireless/wl3501_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index acec0d9ec422..da62220b9c01 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -965,7 +965,7 @@ static inline void wl3501_md_ind_interrupt(struct net_device *dev, &addr4, sizeof(addr4)); if (!(addr4[0] == 0xAA && addr4[1] == 0xAA && addr4[2] == 0x03 && addr4[4] == 0x00)) { - printk(KERN_INFO "Insupported packet type!\n"); + printk(KERN_INFO "Unsupported packet type!\n"); return; } pkt_len = sig.size + 12 - 24 - 4 - 6; -- cgit v1.2.3-55-g7522 From 11f35c9504669c9c3ff8108c3993a46197edddd3 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:30 -0500 Subject: rtlwifi: Fill in_4way field by driver Because it isn't always correct to use EAPOL to check 4-way, we add a timer to handle exception. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 11 +++++++++++ drivers/net/wireless/realtek/rtlwifi/core.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/wifi.h | 3 +++ 3 files changed, 16 insertions(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 208f56297a75..1231ca5879d6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1408,6 +1408,11 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx, return true; } else if (ETH_P_PAE == ether_type) { + /* EAPOL is seens as in-4way */ + rtlpriv->btcoexist.btc_info.in_4way = true; + rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; + rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies; + RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG, "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"); @@ -1959,6 +1964,12 @@ label_lps_done: if (rtlpriv->cfg->ops->get_btc_status()) rtlpriv->btcoexist.btc_ops->btc_periodical(rtlpriv); + if (rtlpriv->btcoexist.btc_info.in_4way) { + if (time_after(jiffies, rtlpriv->btcoexist.btc_info.in_4way_ts + + msecs_to_jiffies(IN_4WAY_TIMEOUT_TIME))) + rtlpriv->btcoexist.btc_info.in_4way = false; + } + rtlpriv->link_info.bcn_rx_inperiod = 0; /* <6> scan list */ diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index b0ad061048c5..c53cbf3d52bd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1505,6 +1505,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, u8 mac_addr[ETH_ALEN]; u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + rtlpriv->btcoexist.btc_info.in_4way = false; + if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "not open hw encryption\n"); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index fb1ebb01133f..7ec0d502a0d9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2477,6 +2477,8 @@ struct rtl_global_var { spinlock_t glb_list_lock; }; +#define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */ + struct rtl_btc_info { u8 bt_type; u8 btcoexist; @@ -2485,6 +2487,7 @@ struct rtl_btc_info { u8 ap_num; bool in_4way; + unsigned long in_4way_ts; }; struct bt_coexist_info { -- cgit v1.2.3-55-g7522 From 6aad6075ccd540910438fb0eaa0264c886f36304 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:31 -0500 Subject: rtlwifi: Add BT_MP_INFO to c2h handler. We use H2C to ask BT's status, and C2H will return the status. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbtcoutsrc.c | 17 ++++++++++++- .../wireless/realtek/rtlwifi/btcoexist/rtl_btc.c | 28 ++++++++++++++++++++++ .../wireless/realtek/rtlwifi/btcoexist/rtl_btc.h | 1 + .../net/wireless/realtek/rtlwifi/rtl8192ee/fw.c | 9 +++++-- .../net/wireless/realtek/rtlwifi/rtl8723be/fw.c | 9 +++++-- .../net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 13 +++++++--- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 ++ 7 files changed, 71 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index e6024b013ca5..c1eacd8352a2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -327,7 +327,22 @@ static void halbtc_aggregation_check(struct btc_coexist *btcoexist) static u32 halbtc_get_bt_patch_version(struct btc_coexist *btcoexist) { - return 0; + struct rtl_priv *rtlpriv = btcoexist->adapter; + u8 cmd_buffer[4] = {0}; + u8 oper_ver = 0; + u8 req_num = 0x0E; + + if (btcoexist->bt_info.bt_real_fw_ver) + goto label_done; + + cmd_buffer[0] |= (oper_ver & 0x0f); /* Set OperVer */ + cmd_buffer[0] |= ((req_num << 4) & 0xf0); /* Set ReqNum */ + cmd_buffer[1] = 0; /* BT_OP_GET_BT_VERSION = 0 */ + rtlpriv->cfg->ops->fill_h2c_cmd(rtlpriv->mac80211.hw, 0x67, 4, + &cmd_buffer[0]); + +label_done: + return btcoexist->bt_info.bt_real_fw_ver; } u32 halbtc_get_wifi_link_status(struct btc_coexist *btcoexist) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c index 4366c9817e1e..7d296a401b6f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c @@ -41,6 +41,7 @@ static struct rtl_btc_ops rtl_btc_operation = { .btc_periodical = rtl_btc_periodical, .btc_halt_notify = rtl_btc_halt_notify, .btc_btinfo_notify = rtl_btc_btinfo_notify, + .btc_btmpinfo_notify = rtl_btc_btmpinfo_notify, .btc_is_limited_dig = rtl_btc_is_limited_dig, .btc_is_disable_edca_turbo = rtl_btc_is_disable_edca_turbo, .btc_is_bt_disabled = rtl_btc_is_bt_disabled, @@ -165,6 +166,33 @@ void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) exhalbtc_bt_info_notify(&gl_bt_coexist, tmp_buf, length); } +void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length) +{ + u8 extid, seq, len; + u16 bt_real_fw_ver; + u8 bt_fw_ver; + + if ((length < 4) || (!tmp_buf)) + return; + + extid = tmp_buf[0]; + /* not response from BT FW then exit*/ + if (extid != 1) /* C2H_TRIG_BY_BT_FW = 1 */ + return; + + len = tmp_buf[1] >> 4; + seq = tmp_buf[2] >> 4; + + /* BT Firmware version response */ + if (seq == 0x0E) { + bt_real_fw_ver = tmp_buf[3] | (tmp_buf[4] << 8); + bt_fw_ver = tmp_buf[5]; + + gl_bt_coexist.bt_info.bt_real_fw_ver = bt_real_fw_ver; + gl_bt_coexist.bt_info.bt_fw_ver = bt_fw_ver; + } +} + bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv) { return gl_bt_coexist.bt_info.limited_dig; diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h index 6fe521cbe7f0..ac1253c46f44 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h @@ -39,6 +39,7 @@ void rtl_btc_mediastatus_notify(struct rtl_priv *rtlpriv, void rtl_btc_periodical(struct rtl_priv *rtlpriv); void rtl_btc_halt_notify(void); void rtl_btc_btinfo_notify(struct rtl_priv *rtlpriv, u8 *tmpbuf, u8 length); +void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); bool rtl_btc_is_limited_dig(struct rtl_priv *rtlpriv); bool rtl_btc_is_disable_edca_turbo(struct rtl_priv *rtlpriv); bool rtl_btc_is_bt_disabled(struct rtl_priv *rtlpriv); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c index f5d4df985c37..7eae27f8e173 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c @@ -887,6 +887,7 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8192E_DBG: @@ -905,12 +906,16 @@ void rtl92ee_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_id, case C2H_8192E_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8192E_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8192E_RA_RPT: _rtl92ee_c2h_ra_report_handler(hw, tmp_buf, c2h_cmd_len); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c index dd6f95cfaec9..4b963fd27d64 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c @@ -709,6 +709,7 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, u8 c2h_cmd_len, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8723B_DBG: @@ -723,12 +724,16 @@ void rtl8723be_c2h_content_parsing(struct ieee80211_hw *hw, case C2H_8723B_BT_INFO: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_INFO!!\n"); - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, - c2h_cmd_len); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; case C2H_8723B_BT_MP: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "[C2H], C2H_8723BE_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; default: RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 03259aa150fd..b84b4fa7b71c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -1923,6 +1923,7 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, u8 *tmp_buf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; switch (c2h_cmd_id) { case C2H_8812_DBG: @@ -1938,9 +1939,15 @@ void rtl8821ae_c2h_content_parsing(struct ieee80211_hw *hw, RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_8812_BT_INFO!!\n"); if (rtlpriv->cfg->ops->get_btc_status()) - rtlpriv->btcoexist.btc_ops->btc_btinfo_notify(rtlpriv, - tmp_buf, - c2h_cmd_len); + btc_ops->btc_btinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); + break; + case C2H_8812_BT_MP: + RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, + "[C2H], C2H_8812_BT_MP!!\n"); + if (rtlpriv->cfg->ops->get_btc_status()) + btc_ops->btc_btmpinfo_notify(rtlpriv, tmp_buf, + c2h_cmd_len); break; default: break; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 7ec0d502a0d9..77c3b186900e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2562,6 +2562,8 @@ struct rtl_btc_ops { void (*btc_halt_notify) (void); void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length); + void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv, + u8 *tmp_buf, u8 length); bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); -- cgit v1.2.3-55-g7522 From 881d53ab1f433d50f9e8bbdbf199a005815f84b9 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:32 -0500 Subject: rtlwifi: Add board type for 8723be and 8192ee With correct board_type, the phy praser can choose correct parameters. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 5 +++++ drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index d84ac7adfd82..ef9394be7016 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2133,7 +2133,12 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw) if ((*(u8 *)&hwinfo[EEPROM_RF_BOARD_OPTION_92E]) == 0xFF) rtlefuse->board_type = 0; + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) + rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */ + rtlhal->board_type = rtlefuse->board_type; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "board_type = 0x%x\n", rtlefuse->board_type); /*parse xtal*/ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_92E]; if (hwinfo[EEPROM_XTAL_92E] == 0xFF) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 2a7ad5ffe997..0b9366e7acbd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -2114,6 +2114,13 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw, rtlefuse->autoload_failflag, hwinfo); + if (rtlpriv->btcoexist.btc_info.btcoexist == 1) + rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */ + + rtlhal->board_type = rtlefuse->board_type; + RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, + "board_type = 0x%x\n", rtlefuse->board_type); + rtlhal->package_type = _rtl8723be_read_package_type(hw); /* set channel plan from efuse */ -- cgit v1.2.3-55-g7522 From ca0e657bda7e6f6768050e9f0e2221f2a28a0a30 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:33 -0500 Subject: rtlwifi: add amplifier type for 8812ae With correct amplifier_type, the phy praser can choose correct parameters. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 41 +++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 2bc6bace069c..8f4abb3d7669 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2966,6 +2966,44 @@ static void _rtl8812ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, } } +static void _rtl8812ae_read_amplifier_type(struct ieee80211_hw *hw, u8 *hwinfo, + bool autoload_fail) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + u8 ext_type_pa_2g_a = (hwinfo[0xBD] & BIT(2)) >> 2; /* 0xBD[2] */ + u8 ext_type_pa_2g_b = (hwinfo[0xBD] & BIT(6)) >> 6; /* 0xBD[6] */ + u8 ext_type_pa_5g_a = (hwinfo[0xBF] & BIT(2)) >> 2; /* 0xBF[2] */ + u8 ext_type_pa_5g_b = (hwinfo[0xBF] & BIT(6)) >> 6; /* 0xBF[6] */ + /* 0xBD[1:0] */ + u8 ext_type_lna_2g_a = (hwinfo[0xBD] & (BIT(1) | BIT(0))) >> 0; + /* 0xBD[5:4] */ + u8 ext_type_lna_2g_b = (hwinfo[0xBD] & (BIT(5) | BIT(4))) >> 4; + /* 0xBF[1:0] */ + u8 ext_type_lna_5g_a = (hwinfo[0xBF] & (BIT(1) | BIT(0))) >> 0; + /* 0xBF[5:4] */ + u8 ext_type_lna_5g_b = (hwinfo[0xBF] & (BIT(5) | BIT(4))) >> 4; + + _rtl8812ae_read_pa_type(hw, hwinfo, autoload_fail); + + /* [2.4G] Path A and B are both extPA */ + if ((rtlhal->pa_type_2g & (BIT(5) | BIT(4))) == (BIT(5) | BIT(4))) + rtlhal->type_gpa = ext_type_pa_2g_b << 2 | ext_type_pa_2g_a; + + /* [5G] Path A and B are both extPA */ + if ((rtlhal->pa_type_5g & (BIT(1) | BIT(0))) == (BIT(1) | BIT(0))) + rtlhal->type_apa = ext_type_pa_5g_b << 2 | ext_type_pa_5g_a; + + /* [2.4G] Path A and B are both extLNA */ + if ((rtlhal->lna_type_2g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3))) + rtlhal->type_glna = ext_type_lna_2g_b << 2 | ext_type_lna_2g_a; + + /* [5G] Path A and B are both extLNA */ + if ((rtlhal->lna_type_5g & (BIT(7) | BIT(3))) == (BIT(7) | BIT(3))) + rtlhal->type_alna = ext_type_lna_5g_b << 2 | ext_type_lna_5g_a; +} + static void _rtl8821ae_read_pa_type(struct ieee80211_hw *hw, u8 *hwinfo, bool autoload_fail) { @@ -3114,7 +3152,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_ hwinfo); if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { - _rtl8812ae_read_pa_type(hw, hwinfo, rtlefuse->autoload_failflag); + _rtl8812ae_read_amplifier_type(hw, hwinfo, + rtlefuse->autoload_failflag); _rtl8812ae_read_bt_coexist_info_from_hwpg(hw, rtlefuse->autoload_failflag, hwinfo); } else { -- cgit v1.2.3-55-g7522 From 66970e38e544138e3656f4feda56a5d88618538f Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:34 -0500 Subject: rtlwifi: Update 8723be new phy parameters and its parser. There are new PHY table values for the RTL8723BE. The changes require new parsing code. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8723be/phy.c | 365 +++++++++------------ .../net/wireless/realtek/rtlwifi/rtl8723be/table.c | 192 ++++++----- .../net/wireless/realtek/rtlwifi/rtl8723be/table.h | 10 +- 3 files changed, 281 insertions(+), 286 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c index 9752175cc466..9606641519e7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c @@ -152,33 +152,86 @@ bool rtl8723be_phy_rf_config(struct ieee80211_hw *hw) return rtl8723be_phy_rf6052_config(hw); } -static bool _rtl8723be_check_condition(struct ieee80211_hw *hw, - const u32 condition) +static bool _rtl8723be_check_positive(struct ieee80211_hw *hw, + const u32 condition1, + const u32 condition2) { - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); - u32 _board = rtlefuse->board_type; /*need efuse define*/ - u32 _interface = rtlhal->interface; - u32 _platform = 0x08;/*SupportPlatform */ - u32 cond = condition; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 cut_ver = ((rtlhal->version & CHIP_VER_RTL_MASK) + >> CHIP_VER_RTL_SHIFT); + u32 intf = (rtlhal->interface == INTF_USB ? BIT(1) : BIT(0)); + + u8 board_type = ((rtlhal->board_type & BIT(4)) >> 4) << 0 | /* _GLNA */ + ((rtlhal->board_type & BIT(3)) >> 3) << 1 | /* _GPA */ + ((rtlhal->board_type & BIT(7)) >> 7) << 2 | /* _ALNA */ + ((rtlhal->board_type & BIT(6)) >> 6) << 3 | /* _APA */ + ((rtlhal->board_type & BIT(2)) >> 2) << 4; /* _BT */ + + u32 cond1 = condition1, cond2 = condition2; + u32 driver1 = cut_ver << 24 | /* CUT ver */ + 0 << 20 | /* interface 2/2 */ + 0x04 << 16 | /* platform */ + rtlhal->package_type << 12 | + intf << 8 | /* interface 1/2 */ + board_type; + + u32 driver2 = rtlhal->type_glna << 0 | + rtlhal->type_gpa << 8 | + rtlhal->type_alna << 16 | + rtlhal->type_apa << 24; - if (condition == 0xCDCDCDCD) - return true; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n", + cond1, cond2); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n", + driver1, driver2); - cond = condition & 0xFF; - if ((_board & cond) == 0 && cond != 0x1F) - return false; + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf); + RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, + " (Board, Package) = (0x%X, 0x%X)\n", + rtlhal->board_type, rtlhal->package_type); - cond = condition & 0xFF00; - cond = cond >> 8; - if ((_interface & cond) == 0 && cond != 0x07) - return false; + /*============== Value Defined Check ===============*/ + /*QFN Type [15:12] and Cut Version [27:24] need to do value check*/ - cond = condition & 0xFF0000; - cond = cond >> 16; - if ((_platform & cond) == 0 && cond != 0x0F) + if (((cond1 & 0x0000F000) != 0) && ((cond1 & 0x0000F000) != + (driver1 & 0x0000F000))) return false; - return true; + if (((cond1 & 0x0F000000) != 0) && ((cond1 & 0x0F000000) != + (driver1 & 0x0F000000))) + return false; + + /*=============== Bit Defined Check ================*/ + /* We don't care [31:28] */ + + cond1 &= 0x00FF0FFF; + driver1 &= 0x00FF0FFF; + + if ((cond1 & driver1) == cond1) { + u32 mask = 0; + + if ((cond1 & 0x0F) == 0) /* BoardType is DONTCARE*/ + return true; + + if ((cond1 & BIT(0)) != 0) /*GLNA*/ + mask |= 0x000000FF; + if ((cond1 & BIT(1)) != 0) /*GPA*/ + mask |= 0x0000FF00; + if ((cond1 & BIT(2)) != 0) /*ALNA*/ + mask |= 0x00FF0000; + if ((cond1 & BIT(3)) != 0) /*APA*/ + mask |= 0xFF000000; + + /* BoardType of each RF path is matched*/ + if ((cond2 & mask) == (driver2 & mask)) + return true; + else + return false; + } + return false; } static void _rtl8723be_config_rf_reg(struct ieee80211_hw *hw, u32 addr, @@ -464,6 +517,16 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); bool rtstatus; + /* switch ant to BT */ + if (rtlpriv->rtlhal.interface == INTF_USB) { + rtl_write_dword(rtlpriv, 0x948, 0x0); + } else { + if (rtlpriv->btcoexist.btc_info.single_ant_path == 0) + rtl_write_dword(rtlpriv, 0x948, 0x280); + else + rtl_write_dword(rtlpriv, 0x948, 0x0); + } + rtstatus = _rtl8723be_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_PHY_REG); if (!rtstatus) { @@ -493,142 +556,84 @@ static bool _rtl8723be_phy_bb8723b_config_parafile(struct ieee80211_hw *hw) return true; } +static bool rtl8723be_phy_config_with_headerfile(struct ieee80211_hw *hw, + u32 *array_table, + u16 arraylen, + void (*set_reg)(struct ieee80211_hw *hw, u32 regaddr, u32 data)) +{ + #define COND_ELSE 2 + #define COND_ENDIF 3 + + int i = 0; + u8 cond; + bool matched = true, skipped = false; + + while ((i + 1) < arraylen) { + u32 v1 = array_table[i]; + u32 v2 = array_table[i + 1]; + + if (v1 & (BIT(31) | BIT(30))) {/*positive & negative condition*/ + if (v1 & BIT(31)) {/* positive condition*/ + cond = (u8)((v1 & (BIT(29) | BIT(28))) >> 28); + if (cond == COND_ENDIF) { /*end*/ + matched = true; + skipped = false; + } else if (cond == COND_ELSE) { /*else*/ + matched = skipped ? false : true; + } else {/*if , else if*/ + if (skipped) { + matched = false; + } else { + if (_rtl8723be_check_positive( + hw, v1, v2)) { + matched = true; + skipped = true; + } else { + matched = false; + skipped = false; + } + } + } + } else if (v1 & BIT(30)) { /*negative condition*/ + /*do nothing*/ + } + } else { + if (matched) + set_reg(hw, v1, v2); + } + i = i + 2; + } + + return true; +} + static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 i; - u32 arraylength; - u32 *ptrarray; RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n"); - arraylength = RTL8723BEMAC_1T_ARRAYLEN; - ptrarray = RTL8723BEMAC_1T_ARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Img:RTL8723bEMAC_1T_ARRAY LEN %d\n", arraylength); - for (i = 0; i < arraylength; i = i + 2) - rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); - return true; + + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEMAC_1T_ARRAY, RTL8723BEMAC_1T_ARRAYLEN, + rtl_write_byte_with_val32); } static bool _rtl8723be_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, u8 configtype) { - #define READ_NEXT_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = array_table[i];\ - v2 = array_table[i+1]; \ - } while (0) - - int i; - u32 *array_table; - u16 arraylen; - struct rtl_priv *rtlpriv = rtl_priv(hw); - u32 v1 = 0, v2 = 0; - - if (configtype == BASEBAND_CONFIG_PHY_REG) { - arraylen = RTL8723BEPHY_REG_1TARRAYLEN; - array_table = RTL8723BEPHY_REG_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8723be_config_bb_reg(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /*Configure matched pairs and - *skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - _rtl8723be_config_bb_reg(hw, - v1, v2); - READ_NEXT_PAIR(v1, v2, i); - } - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - } - } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { - arraylen = RTL8723BEAGCTAB_1TARRAYLEN; - array_table = RTL8723BEAGCTAB_1TARRAY; - - for (i = 0; i < arraylen; i = i + 2) { - v1 = array_table[i]; - v2 = array_table[i+1]; - if (v1 < 0xCDCDCDCD) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - continue; - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - READ_NEXT_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - /*Configure matched pairs and - *skip to end of if-else. - */ - } else { - READ_NEXT_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < arraylen - 2) { - rtl_set_bbreg(hw, array_table[i], - MASKDWORD, - array_table[i + 1]); - udelay(1); - READ_NEXT_PAIR(v1, v2, i); - } + if (configtype == BASEBAND_CONFIG_PHY_REG) + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEPHY_REG_1TARRAY, + RTL8723BEPHY_REG_1TARRAYLEN, + _rtl8723be_config_bb_reg); + else if (configtype == BASEBAND_CONFIG_AGC_TAB) + return rtl8723be_phy_config_with_headerfile(hw, + RTL8723BEAGCTAB_1TARRAY, + RTL8723BEAGCTAB_1TARRAYLEN, + rtl_set_bbreg_with_dwmask); - while (v2 != 0xDEAD && i < arraylen - 2) - READ_NEXT_PAIR(v1, v2, i); - } - } - RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, - "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n", - array_table[i], array_table[i + 1]); - } - } - return true; + return false; } static u8 _rtl8723be_get_rate_section_index(u32 regaddr) @@ -761,73 +766,17 @@ static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, enum radio_path rfpath) { - #define READ_NEXT_RF_PAIR(v1, v2, i) \ - do { \ - i += 2; \ - v1 = radioa_array_table[i]; \ - v2 = radioa_array_table[i+1]; \ - } while (0) - - int i; - bool rtstatus = true; - u32 *radioa_array_table; - u16 radioa_arraylen; struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); - u32 v1 = 0, v2 = 0; + bool ret = true; - radioa_arraylen = RTL8723BE_RADIOA_1TARRAYLEN; - radioa_array_table = RTL8723BE_RADIOA_1TARRAY; - RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, - "Radio_A:RTL8723BE_RADIOA_1TARRAY %d\n", radioa_arraylen); RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath); - rtstatus = true; switch (rfpath) { case RF90_PATH_A: - for (i = 0; i < radioa_arraylen; i = i + 2) { - v1 = radioa_array_table[i]; - v2 = radioa_array_table[i+1]; - if (v1 < 0xcdcdcdcd) { - _rtl8723be_config_rf_radio_a(hw, v1, v2); - } else {/*This line is the start line of branch.*/ - /* to protect READ_NEXT_PAIR not overrun */ - if (i >= radioa_arraylen - 2) - break; - - if (!_rtl8723be_check_condition(hw, - radioa_array_table[i])) { - /*Discard the following - *(offset, data) pairs - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < radioa_arraylen - 2) { - READ_NEXT_RF_PAIR(v1, v2, i); - } - i -= 2; /* prevent from for-loop += 2*/ - } else { - /*Configure matched pairs - *and skip to end of if-else. - */ - READ_NEXT_RF_PAIR(v1, v2, i); - while (v2 != 0xDEAD && - v2 != 0xCDEF && - v2 != 0xCDCD && - i < radioa_arraylen - 2) { - _rtl8723be_config_rf_radio_a(hw, - v1, v2); - READ_NEXT_RF_PAIR(v1, v2, i); - } - - while (v2 != 0xDEAD && - i < radioa_arraylen - 2) { - READ_NEXT_RF_PAIR(v1, v2, i); - } - } - } - } + ret = rtl8723be_phy_config_with_headerfile(hw, + RTL8723BE_RADIOA_1TARRAY, + RTL8723BE_RADIOA_1TARRAYLEN, + _rtl8723be_config_rf_radio_a); if (rtlhal->oem_id == RT_CID_819X_HP) _rtl8723be_config_rf_radio_a(hw, 0x52, 0x7E4BD); @@ -840,7 +789,7 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, "switch case %#x not processed\n", rfpath); break; } - return true; + return ret; } void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw) @@ -1350,7 +1299,7 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw) struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_phy *rtlphy = &rtlpriv->phy; - u32 delay; + u32 delay = 0; RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n", rtlphy->current_channel); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c index a180761e8810..381c16b9b3a9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c @@ -26,6 +26,7 @@ *****************************************************************************/ #include "table.h" + u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x800, 0x80040000, 0x804, 0x00000003, @@ -36,7 +37,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x818, 0x02200385, 0x81C, 0x00000000, 0x820, 0x01000100, - 0x824, 0x00390204, + 0x824, 0x00190204, 0x828, 0x00000000, 0x82C, 0x00000000, 0x830, 0x00000000, @@ -73,9 +74,8 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0x90C, 0x81121111, 0x910, 0x00000002, 0x914, 0x00000201, - 0x948, 0x00000280, 0xA00, 0x00D047C8, - 0xA04, 0x80FF000C, + 0xA04, 0x80FF800C, 0xA08, 0x8C838300, 0xA0C, 0x2E7F120F, 0xA10, 0x9500BB78, @@ -114,7 +114,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0xC4C, 0x007F037F, 0xC50, 0x69553420, 0xC54, 0x43BC0094, - 0xC58, 0x00023169, + 0xC58, 0x00013147, 0xC5C, 0x00250492, 0xC60, 0x00000000, 0xC64, 0x7112848B, @@ -125,7 +125,7 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { 0xC78, 0x0000001F, 0xC7C, 0x00B91612, 0xC80, 0x390000E4, - 0xC84, 0x20F60000, + 0xC84, 0x21F60000, 0xC88, 0x40000100, 0xC8C, 0x20200000, 0xC90, 0x00020E1A, @@ -224,15 +224,21 @@ u32 RTL8723BEPHY_REG_1TARRAY[] = { }; +u32 RTL8723BEPHY_REG_1TARRAYLEN = + sizeof(RTL8723BEPHY_REG_1TARRAY) / sizeof(u32); + u32 RTL8723BEPHY_REG_ARRAY_PG[] = { - 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00004000, - 0, 0, 0, 0x0000086c, 0xffffff00, 0x34363800, - 0, 0, 0, 0x00000e00, 0xffffffff, 0x42444646, - 0, 0, 0, 0x00000e04, 0xffffffff, 0x30343840, + 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003800, + 0, 0, 0, 0x0000086c, 0xffffff00, 0x32343600, + 0, 0, 0, 0x00000e00, 0xffffffff, 0x40424444, + 0, 0, 0, 0x00000e04, 0xffffffff, 0x28323638, 0, 0, 0, 0x00000e10, 0xffffffff, 0x38404244, 0, 0, 0, 0x00000e14, 0xffffffff, 0x26303436 }; +u32 RTL8723BEPHY_REG_ARRAY_PGLEN = + sizeof(RTL8723BEPHY_REG_ARRAY_PG) / sizeof(u32); + u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x000, 0x00010000, 0x0B0, 0x000DFFE0, @@ -257,15 +263,37 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x01E, 0x00000000, 0x0DF, 0x00000780, 0x050, 0x00067435, + 0x80002000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0x90003000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0x90004000, 0x00000000, 0x40000000, 0x00000000, + 0x051, 0x0006F10E, + 0x052, 0x000007D3, + 0xA0000000, 0x00000000, 0x051, 0x0006B04E, 0x052, 0x000007D2, + 0xB0000000, 0x00000000, 0x053, 0x00000000, 0x054, 0x00050400, 0x055, 0x0004026E, 0x0DD, 0x0000004C, 0x070, 0x00067435, + 0x80002000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0x90003000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0x90004000, 0x00000000, 0x40000000, 0x00000000, + 0x071, 0x0006F10E, + 0x072, 0x000007D3, + 0xA0000000, 0x00000000, 0x071, 0x0006B04E, 0x072, 0x000007D2, + 0xB0000000, 0x00000000, 0x073, 0x00000000, 0x074, 0x00050400, 0x075, 0x0004026E, @@ -308,6 +336,7 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x044, 0x00000051, 0x0EF, 0x00000000, 0x0ED, 0x00000000, + 0x07F, 0x00020080, 0x0EF, 0x00002000, 0x03B, 0x000380EF, 0x03B, 0x000302FE, @@ -336,14 +365,24 @@ u32 RTL8723BE_RADIOA_1TARRAY[] = { 0x0A3, 0x00008000, 0x0A4, 0x00048D80, 0x0A5, 0x00068000, - 0x000, 0x00033D80, + 0x0ED, 0x00000002, + 0x0EF, 0x00000002, + 0x056, 0x00000032, + 0x076, 0x00000032, + 0x001, 0x00000780, }; +u32 RTL8723BE_RADIOA_1TARRAYLEN = + sizeof(RTL8723BE_RADIOA_1TARRAY) / sizeof(u32); + u32 RTL8723BEMAC_1T_ARRAY[] = { 0x02F, 0x00000030, 0x035, 0x00000000, + 0x039, 0x00000008, + 0x064, 0x00000000, 0x067, 0x00000020, + 0x421, 0x0000000F, 0x428, 0x0000000A, 0x429, 0x00000010, 0x430, 0x00000000, @@ -439,9 +478,13 @@ u32 RTL8723BEMAC_1T_ARRAY[] = { 0x709, 0x00000043, 0x70A, 0x00000065, 0x70B, 0x00000087, + 0x765, 0x00000018, + 0x76E, 0x00000004, }; +u32 RTL8723BEMAC_1T_ARRAYLEN = sizeof(RTL8723BEMAC_1T_ARRAY) / sizeof(u32); + u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xFD000001, 0xC78, 0xFC010001, @@ -466,21 +509,21 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xE9140001, 0xC78, 0xE8150001, 0xC78, 0xE7160001, - 0xC78, 0xAA170001, - 0xC78, 0xA9180001, - 0xC78, 0xA8190001, - 0xC78, 0xA71A0001, - 0xC78, 0xA61B0001, - 0xC78, 0xA51C0001, - 0xC78, 0xA41D0001, - 0xC78, 0xA31E0001, - 0xC78, 0x671F0001, - 0xC78, 0x66200001, - 0xC78, 0x65210001, - 0xC78, 0x64220001, - 0xC78, 0x63230001, - 0xC78, 0x62240001, - 0xC78, 0x61250001, + 0xC78, 0xE6170001, + 0xC78, 0xE5180001, + 0xC78, 0xE4190001, + 0xC78, 0xE31A0001, + 0xC78, 0xA51B0001, + 0xC78, 0xA41C0001, + 0xC78, 0xA31D0001, + 0xC78, 0x671E0001, + 0xC78, 0x661F0001, + 0xC78, 0x65200001, + 0xC78, 0x64210001, + 0xC78, 0x63220001, + 0xC78, 0x4A230001, + 0xC78, 0x49240001, + 0xC78, 0x48250001, 0xC78, 0x47260001, 0xC78, 0x46270001, 0xC78, 0x45280001, @@ -491,22 +534,22 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0x282D0001, 0xC78, 0x272E0001, 0xC78, 0x262F0001, - 0xC78, 0x25300001, - 0xC78, 0x24310001, - 0xC78, 0x09320001, - 0xC78, 0x08330001, - 0xC78, 0x07340001, - 0xC78, 0x06350001, - 0xC78, 0x05360001, - 0xC78, 0x04370001, - 0xC78, 0x03380001, - 0xC78, 0x02390001, + 0xC78, 0x0A300001, + 0xC78, 0x09310001, + 0xC78, 0x08320001, + 0xC78, 0x07330001, + 0xC78, 0x06340001, + 0xC78, 0x05350001, + 0xC78, 0x04360001, + 0xC78, 0x03370001, + 0xC78, 0x02380001, + 0xC78, 0x01390001, 0xC78, 0x013A0001, - 0xC78, 0x003B0001, - 0xC78, 0x003C0001, - 0xC78, 0x003D0001, - 0xC78, 0x003E0001, - 0xC78, 0x003F0001, + 0xC78, 0x013B0001, + 0xC78, 0x013C0001, + 0xC78, 0x013D0001, + 0xC78, 0x013E0001, + 0xC78, 0x013F0001, 0xC78, 0xFC400001, 0xC78, 0xFB410001, 0xC78, 0xFA420001, @@ -531,47 +574,50 @@ u32 RTL8723BEAGCTAB_1TARRAY[] = { 0xC78, 0xE7550001, 0xC78, 0xE6560001, 0xC78, 0xE5570001, - 0xC78, 0xAA580001, - 0xC78, 0xA9590001, - 0xC78, 0xA85A0001, - 0xC78, 0xA75B0001, - 0xC78, 0xA65C0001, - 0xC78, 0xA55D0001, - 0xC78, 0xA45E0001, - 0xC78, 0x675F0001, - 0xC78, 0x66600001, - 0xC78, 0x65610001, - 0xC78, 0x64620001, - 0xC78, 0x63630001, - 0xC78, 0x62640001, - 0xC78, 0x61650001, + 0xC78, 0xE4580001, + 0xC78, 0xE3590001, + 0xC78, 0xA65A0001, + 0xC78, 0xA55B0001, + 0xC78, 0xA45C0001, + 0xC78, 0xA35D0001, + 0xC78, 0x675E0001, + 0xC78, 0x665F0001, + 0xC78, 0x65600001, + 0xC78, 0x64610001, + 0xC78, 0x63620001, + 0xC78, 0x62630001, + 0xC78, 0x61640001, + 0xC78, 0x48650001, 0xC78, 0x47660001, 0xC78, 0x46670001, 0xC78, 0x45680001, 0xC78, 0x44690001, 0xC78, 0x436A0001, 0xC78, 0x426B0001, - 0xC78, 0x296C0001, - 0xC78, 0x286D0001, - 0xC78, 0x276E0001, - 0xC78, 0x266F0001, - 0xC78, 0x25700001, - 0xC78, 0x24710001, - 0xC78, 0x09720001, - 0xC78, 0x08730001, - 0xC78, 0x07740001, - 0xC78, 0x06750001, - 0xC78, 0x05760001, - 0xC78, 0x04770001, - 0xC78, 0x03780001, - 0xC78, 0x02790001, + 0xC78, 0x286C0001, + 0xC78, 0x276D0001, + 0xC78, 0x266E0001, + 0xC78, 0x256F0001, + 0xC78, 0x24700001, + 0xC78, 0x09710001, + 0xC78, 0x08720001, + 0xC78, 0x07730001, + 0xC78, 0x06740001, + 0xC78, 0x05750001, + 0xC78, 0x04760001, + 0xC78, 0x03770001, + 0xC78, 0x02780001, + 0xC78, 0x01790001, 0xC78, 0x017A0001, - 0xC78, 0x007B0001, - 0xC78, 0x007C0001, - 0xC78, 0x007D0001, - 0xC78, 0x007E0001, - 0xC78, 0x007F0001, + 0xC78, 0x017B0001, + 0xC78, 0x017C0001, + 0xC78, 0x017D0001, + 0xC78, 0x017E0001, + 0xC78, 0x017F0001, 0xC50, 0x69553422, 0xC50, 0x69553420, + 0x824, 0x00390204, }; + +u32 RTL8723BEAGCTAB_1TARRAYLEN = sizeof(RTL8723BEAGCTAB_1TARRAY) / sizeof(u32); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h index dc17001632f7..1deaffe22251 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h @@ -29,15 +29,15 @@ #define __RTL8723BE_TABLE__H_ #include -#define RTL8723BEPHY_REG_1TARRAYLEN 388 +extern u32 RTL8723BEPHY_REG_1TARRAYLEN; extern u32 RTL8723BEPHY_REG_1TARRAY[]; -#define RTL8723BEPHY_REG_ARRAY_PGLEN 36 +extern u32 RTL8723BEPHY_REG_ARRAY_PGLEN; extern u32 RTL8723BEPHY_REG_ARRAY_PG[]; -#define RTL8723BE_RADIOA_1TARRAYLEN 206 +extern u32 RTL8723BE_RADIOA_1TARRAYLEN; extern u32 RTL8723BE_RADIOA_1TARRAY[]; -#define RTL8723BEMAC_1T_ARRAYLEN 196 +extern u32 RTL8723BEMAC_1T_ARRAYLEN; extern u32 RTL8723BEMAC_1T_ARRAY[]; -#define RTL8723BEAGCTAB_1TARRAYLEN 260 +extern u32 RTL8723BEAGCTAB_1TARRAYLEN; extern u32 RTL8723BEAGCTAB_1TARRAY[]; #endif -- cgit v1.2.3-55-g7522 From 6ec9dfbc057cefe50e3ac55bdf6bfc24f24e4a1b Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:35 -0500 Subject: rtlwifi: Rename rtl_desc92_rate to rtl_desc_rate This is a common enumeration, so we use a common name. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 77c3b186900e..4c6d01eb065d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -592,7 +592,7 @@ enum rtl_hal_state { _HAL_STATE_START = 1, }; -enum rtl_desc92_rate { +enum rtl_desc_rate { DESC_RATE1M = 0x00, DESC_RATE2M = 0x01, DESC_RATE5_5M = 0x02, -- cgit v1.2.3-55-g7522 From e7c528615e34a924c94d135e2f8e17bb9bf3b611 Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:36 -0500 Subject: rtlwifi: Uses addr1 instead DA to determine broadcast and multicast addr. We should check addr1 to indicate a packet as broadcast or multicast in tx desc. An obvious example, a STA transmit an *unicast* ARP packet where addr1 and DA are the addresses of AP and broadcast respectively. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 1231ca5879d6..8b833e21b5cb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -1166,9 +1166,9 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw, } } - if (is_multicast_ether_addr(ieee80211_get_DA(hdr))) + if (is_multicast_ether_addr(hdr->addr1)) tcb_desc->multicast = 1; - else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + else if (is_broadcast_ether_addr(hdr->addr1)) tcb_desc->broadcast = 1; _rtl_txrate_selectmode(hw, sta, tcb_desc); -- cgit v1.2.3-55-g7522 From 58438d9ae1724f6920adbb749f931a024bd00d5d Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Sun, 2 Jul 2017 13:12:37 -0500 Subject: rtlwifi: move IS_HARDWARE_TYPE_xxx checker to wifi.h Use rtlpriv instead of rtlhal as argument, so driver and btcoex use the same definitions. Signed-off-by: Ping-Ke Shih Signed-off-by: Larry Finger Cc: Yan-Hsuan Chuang Cc: Birming Chiu Cc: Shaofu Cc: Steven Ting Signed-off-by: Kalle Valo --- .../realtek/rtlwifi/btcoexist/halbt_precomp.h | 16 --------- .../net/wireless/realtek/rtlwifi/rtl8192cu/mac.c | 3 +- drivers/net/wireless/realtek/rtlwifi/wifi.h | 42 ++++++++++------------ 3 files changed, 19 insertions(+), 42 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h index 2ac989a4b2bb..02dff4c3f664 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h @@ -43,22 +43,6 @@ #define RT_SDIO_INTERFACE 3 #define DEV_BUS_TYPE RT_PCI_INTERFACE -/* IC type */ -#define RTL_HW_TYPE(adapter) (rtl_hal((struct rtl_priv *)adapter)->hw_type) - -#define IS_NEW_GENERATION_IC(adapter) \ - (RTL_HW_TYPE(adapter) >= HARDWARE_TYPE_RTL8192EE) -#define IS_HARDWARE_TYPE_8812(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8812AE) -#define IS_HARDWARE_TYPE_8821(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8821AE) -#define IS_HARDWARE_TYPE_8723A(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723AE) -#define IS_HARDWARE_TYPE_8723B(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8723BE) -#define IS_HARDWARE_TYPE_8192E(adapter) \ - (RTL_HW_TYPE(adapter) == HARDWARE_TYPE_RTL8192EE) - #include "halbtc8192e2ant.h" #include "halbtc8723b1ant.h" #include "halbtc8723b2ant.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c index 1b124eade846..5657b1e34ad0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c @@ -352,11 +352,10 @@ u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw) void rtl92c_enable_interrupt(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); - if (IS_HARDWARE_TYPE_8192CE(rtlhal)) { + if (IS_HARDWARE_TYPE_8192CE(rtlpriv)) { rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF); rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 4c6d01eb065d..f77acd329e0e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -314,35 +314,29 @@ enum hardware_type { HARDWARE_TYPE_RTL8192EE, HARDWARE_TYPE_RTL8821AE, HARDWARE_TYPE_RTL8812AE, + HARDWARE_TYPE_RTL8822BE, /* keep it last */ HARDWARE_TYPE_NUM }; -#define IS_HARDWARE_TYPE_8192SU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SU) -#define IS_HARDWARE_TYPE_8192SE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) -#define IS_HARDWARE_TYPE_8192CE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) -#define IS_HARDWARE_TYPE_8192CU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) -#define IS_HARDWARE_TYPE_8192DE(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) -#define IS_HARDWARE_TYPE_8192DU(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DU) -#define IS_HARDWARE_TYPE_8723E(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8723E) -#define IS_HARDWARE_TYPE_8723U(rtlhal) \ - (rtlhal->hw_type == HARDWARE_TYPE_RTL8723U) -#define IS_HARDWARE_TYPE_8192S(rtlhal) \ -(IS_HARDWARE_TYPE_8192SE(rtlhal) || IS_HARDWARE_TYPE_8192SU(rtlhal)) -#define IS_HARDWARE_TYPE_8192C(rtlhal) \ -(IS_HARDWARE_TYPE_8192CE(rtlhal) || IS_HARDWARE_TYPE_8192CU(rtlhal)) -#define IS_HARDWARE_TYPE_8192D(rtlhal) \ -(IS_HARDWARE_TYPE_8192DE(rtlhal) || IS_HARDWARE_TYPE_8192DU(rtlhal)) -#define IS_HARDWARE_TYPE_8723(rtlhal) \ -(IS_HARDWARE_TYPE_8723E(rtlhal) || IS_HARDWARE_TYPE_8723U(rtlhal)) +#define RTL_HW_TYPE(rtlpriv) (rtl_hal((struct rtl_priv *)rtlpriv)->hw_type) +#define IS_NEW_GENERATION_IC(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) >= HARDWARE_TYPE_RTL8192EE) +#define IS_HARDWARE_TYPE_8192CE(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192CE) +#define IS_HARDWARE_TYPE_8812(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8812AE) +#define IS_HARDWARE_TYPE_8821(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8821AE) +#define IS_HARDWARE_TYPE_8723A(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723AE) +#define IS_HARDWARE_TYPE_8723B(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8723BE) +#define IS_HARDWARE_TYPE_8192E(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8192EE) +#define IS_HARDWARE_TYPE_8822B(rtlpriv) \ + (RTL_HW_TYPE(rtlpriv) == HARDWARE_TYPE_RTL8822BE) #define RX_HAL_IS_CCK_RATE(rxmcs) \ ((rxmcs) == DESC_RATE1M || \ -- cgit v1.2.3-55-g7522 From f81372ed8b1ea04d3cee5e8f7d3e0323a1e7b92e Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Wed, 28 Jun 2017 13:27:11 +0530 Subject: rtlwifi: remove dummy function call _rtl92cu_init_usb_aggregation() can be removed as it is dummy one Signed-off-by: Souptick Joarder Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c index f95a64507f17..530e80f0ef0b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c @@ -777,10 +777,6 @@ static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw, queue_sel); } -static void _rtl92cu_init_usb_aggregation(struct ieee80211_hw *hw) -{ -} - static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw) { u16 value16; @@ -870,7 +866,6 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw) rtl92c_init_edca(hw); rtl92c_init_rate_fallback(hw); rtl92c_init_retry_function(hw); - _rtl92cu_init_usb_aggregation(hw); rtlpriv->cfg->ops->set_bw_mode(hw, NL80211_CHAN_HT20); rtl92c_set_min_space(hw, IS_92C_SERIAL(rtlhal->version)); _rtl92cu_init_beacon_parameters(hw); -- cgit v1.2.3-55-g7522 From 4de95fc5fa1192b6c4d89545b6a788cc6de97429 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Wed, 28 Jun 2017 18:02:33 +0530 Subject: rtlwifi: Remove unused dummy function Removing unused dummy function Signed-off-by: Souptick Joarder Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c | 12 ------------ drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h | 3 --- 3 files changed, 1 insertion(+), 16 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 96c923b3feb4..62d9214b6fdc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -173,7 +173,7 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { .rx_urb_num = RTL92C_NUM_RX_URBS, .rx_max_size = RTL92C_SIZE_MAX_RX_BUFFER, .usb_rx_hdl = rtl8192cu_rx_hdl, - .usb_rx_segregate_hdl = NULL, /* rtl8192c_rx_segregate_hdl; */ + .usb_rx_segregate_hdl = NULL, /* tx */ .usb_tx_cleanup = rtl8192c_tx_cleanup, .usb_tx_post_hdl = rtl8192c_tx_post_hdl, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c index de6c3428f7c6..ac4a82de40c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c @@ -436,13 +436,6 @@ void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb) _rtl_rx_process(hw, skb); } -void rtl8192c_rx_segregate_hdl( - struct ieee80211_hw *hw, - struct sk_buff *skb, - struct sk_buff_head *skb_list) -{ -} - /*---------------------------------------------------------------------- * * Tx handler @@ -675,8 +668,3 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD, "H2C Tx Cmd Content", pdesc, RTL_TX_DESC_SIZE); } - -bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - return true; -} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h index 487eec89bc29..15a66c547287 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h @@ -385,8 +385,6 @@ bool rtl92cu_rx_query_desc(struct ieee80211_hw *hw, struct ieee80211_rx_status *rx_status, u8 *p_desc, struct sk_buff *skb); void rtl8192cu_rx_hdl(struct ieee80211_hw *hw, struct sk_buff * skb); -void rtl8192c_rx_segregate_hdl(struct ieee80211_hw *, struct sk_buff *, - struct sk_buff_head *); void rtl8192c_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb); int rtl8192c_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, struct sk_buff *skb); @@ -404,6 +402,5 @@ void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 * pDesc, void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool b_firstseg, bool b_lastseg, struct sk_buff *skb); -bool rtl92cu_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); #endif -- cgit v1.2.3-55-g7522 From f2764f61fa10593204b0c5e4e9a68dba02112e50 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Wed, 5 Jul 2017 19:55:06 +0530 Subject: rtlwifi: Fix memory leak when firmware request fails This patch will fix memory leak when firmware request fails Signed-off-by: Souptick Joarder Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | 4 ++++ drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 2 ++ drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 6 ++++++ 9 files changed, 24 insertions(+) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 774e72058d24..bddd5a5ebe52 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -175,6 +175,8 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_info("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index bcbb0c60f1f1..38f85bfdf0c7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -176,6 +176,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 62d9214b6fdc..dfbbd35bb966 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -85,6 +85,10 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) err = request_firmware_nowait(THIS_MODULE, 1, fw_name, rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); + if (err) { + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + } return err; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 16132c66e5e1..e38d6f7370aa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -183,6 +183,8 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index eaa503b7c4b4..745e9c32655c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -177,6 +177,8 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 2006b09ea74f..1ec20efb9ce1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -216,6 +216,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) rtl92se_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index 7bf9f2557920..aab86667a7f3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -184,6 +184,8 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } return 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index f9d10f1e7cf8..56c05c4e1499 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -195,6 +195,8 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index d71d2776ca03..ec2d577ba85b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -196,6 +196,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->rtlhal.wowlan_firmware = vzalloc(0x8000); if (!rtlpriv->rtlhal.wowlan_firmware) { pr_err("Can't alloc buffer for wowlan fw.\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; return 1; } @@ -222,6 +224,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl_fw_cb); if (err) { pr_err("Failed to request normal firmware!\n"); + vfree(rtlpriv->rtlhal.wowlan_firmware); + vfree(rtlpriv->rtlhal.pfirmware); return 1; } } @@ -233,6 +237,8 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtl_wowlan_fw_cb); if (err) { pr_err("Failed to request wowlan firmware!\n"); + vfree(rtlpriv->rtlhal.wowlan_firmware); + vfree(rtlpriv->rtlhal.pfirmware); return 1; } return 0; -- cgit v1.2.3-55-g7522 From d28ac7be15c70343cb270648e252f54d770eca6a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 13 Jul 2017 10:43:23 +0300 Subject: rtlwifi: rtl8821ae: Fix HW_VAR_NAV_UPPER operation The cast here is wrong. We want to cast the pointer but we accidentally do a no-op cast of the value. We normally want to set us_nav_upper to WIFI_NAV_UPPER_US (30000) but because of this bug we instead set it to 184 on little endian systems and 0 on big endian ones. Fixes: 3c05bedb5fef ("Staging: rtl8812ae: Add Realtek 8821 PCI WIFI driver") Signed-off-by: Dan Carpenter Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 8f4abb3d7669..4f73012978e9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -779,7 +779,7 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) _rtl8821ae_resume_tx_beacon(hw); break; } case HW_VAR_NAV_UPPER: { - u32 us_nav_upper = ((u32)*val); + u32 us_nav_upper = *(u32 *)val; if (us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF) { RT_TRACE(rtlpriv, COMP_INIT , DBG_WARNING, -- cgit v1.2.3-55-g7522 From c3327bde514d5945f5de1e28bed3eaa0c4d622ec Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 28 Jul 2017 17:41:11 +0300 Subject: Bluetooth: btrtl: Fix a error code in rtl_load_config() We accidentally return success if the kmemdup() fails. It results in a NULL dereference in the caller. Fixes: 1110a2dbe698 ("Bluetooth: btrtl: Add RTL8822BE Bluetooth device") Signed-off-by: Dan Carpenter Signed-off-by: Johan Hedberg --- drivers/bluetooth/btrtl.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 8279094dd713..d9a99b4302ea 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -279,6 +279,8 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff) return ret; ret = fw->size; *buff = kmemdup(fw->data, ret, GFP_KERNEL); + if (!*buff) + ret = -ENOMEM; release_firmware(fw); -- cgit v1.2.3-55-g7522 From 3a3a4e3054137c5ff5d4d306ec834f6d25d7f95b Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 28 Jul 2017 22:18:57 +0200 Subject: ipv6: constify inet6_protocol structures The inet6_protocol structure is only passed as the first argument to inet6_add_protocol or inet6_del_protocol, both of which are declared as const. Thus the inet6_protocol structure itself can be const. Also drop __read_mostly where present on the newly const structures. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- net/ipv6/udp.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 67ff2aaf5dcb..33865d67bcb4 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1080,7 +1080,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) } -static struct inet6_protocol ip6gre_protocol __read_mostly = { +static const struct inet6_protocol ip6gre_protocol = { .handler = gre_rcv, .err_handler = ip6gre_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 90a32576c3d0..2968a33cca7d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1945,7 +1945,7 @@ struct proto tcpv6_prot = { .diag_destroy = tcp_abort, }; -static struct inet6_protocol tcpv6_protocol = { +static const struct inet6_protocol tcpv6_protocol = { .early_demux = tcp_v6_early_demux, .early_demux_handler = tcp_v6_early_demux, .handler = tcp_v6_rcv, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4a3e65626e8b..5f8b8d766c63 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1448,7 +1448,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, } #endif -static struct inet6_protocol udpv6_protocol = { +static const struct inet6_protocol udpv6_protocol = { .early_demux = udp_v6_early_demux, .early_demux_handler = udp_v6_early_demux, .handler = udpv6_rcv, -- cgit v1.2.3-55-g7522 From d04916a48ad4a3db892b664fa9c3a2a693c378ad Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Fri, 28 Jul 2017 22:18:58 +0200 Subject: l2tp: constify inet6_protocol structures The inet6_protocol structure is only passed as the first argument to inet6_add_protocol or inet6_del_protocol, both of which are declared as const. Thus the inet6_protocol structure itself can be const. Also drop __read_mostly on the newly const structure. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 88b397c30d86..d2efcd93e1e2 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -788,7 +788,7 @@ static struct inet_protosw l2tp_ip6_protosw = { .ops = &l2tp_ip6_ops, }; -static struct inet6_protocol l2tp_ip6_protocol __read_mostly = { +static const struct inet6_protocol l2tp_ip6_protocol = { .handler = l2tp_ip6_recv, }; -- cgit v1.2.3-55-g7522 From a311abdffb06b66b5160cb3305450a79a3918a8f Mon Sep 17 00:00:00 2001 From: Simon Wunderlich Date: Fri, 28 Jul 2017 13:39:52 +0200 Subject: batman-adv: Start new development cycle Signed-off-by: Simon Wunderlich --- net/batman-adv/main.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index 2be8f1f46529..05cc7637c064 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -24,7 +24,7 @@ #define BATADV_DRIVER_DEVICE "batman-adv" #ifndef BATADV_SOURCE_VERSION -#define BATADV_SOURCE_VERSION "2017.2" +#define BATADV_SOURCE_VERSION "2017.3" #endif /* B.A.T.M.A.N. parameters */ -- cgit v1.2.3-55-g7522 From cd0edf3ada13e9c268713a913387a88be30634dc Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 14 Jun 2017 02:33:52 -0700 Subject: batman-adv: Remove unnecessary length qualifier in %14pM It's misleading and unnecessary. Signed-off-by: Joe Perches Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/distributed-arp-table.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c index 6930d6b50f99..b6cfa78e9381 100644 --- a/net/batman-adv/distributed-arp-table.c +++ b/net/batman-adv/distributed-arp-table.c @@ -834,7 +834,7 @@ int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset) last_seen_msecs = last_seen_msecs % 60000; last_seen_secs = last_seen_msecs / 1000; - seq_printf(seq, " * %15pI4 %14pM %4i %6i:%02i\n", + seq_printf(seq, " * %15pI4 %pM %4i %6i:%02i\n", &dat_entry->ip, dat_entry->mac_addr, batadv_print_vid(dat_entry->vid), last_seen_mins, last_seen_secs); -- cgit v1.2.3-55-g7522 From 6a04be8d860e86ad84b99045395d9e46ed7304cf Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 18 Jun 2017 10:12:30 +0200 Subject: batman-adv: Remove too short %pM printk field width The string representation for a mac address produced by %pM is 17 characters long. Left-aligning the output in a 15 character wide field width %-15pM is therefore misleading and unnecessary. Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/bat_iv_ogm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a3501173e200..c27001d38574 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1281,7 +1281,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, batadv_ogm_packet->tq = combined_tq; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", + "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n", orig_node->orig, orig_neigh_node->orig, total_count, neigh_rq_count, tq_own, tq_asym_penalty, tq_iface_penalty, batadv_ogm_packet->tq, if_incoming->net_dev->name, -- cgit v1.2.3-55-g7522 From e04de4861cdebdfaa13507ad3d40e6542aeab857 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Sun, 18 Jun 2017 09:59:28 +0200 Subject: batman-adv: Remove variable deprecated by skb_put_data skb_put_data makes it unnecessary to store the skb_put return value to copy some data to the packet. The returned pointer of skb_put_data should therefore not stored by functions which previously only used it to copy some data. Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/bat_iv_ogm.c | 4 +--- net/batman-adv/bat_v_ogm.c | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index c27001d38574..83ba5483455a 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -729,11 +729,9 @@ static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr, const unsigned char *packet_buff, int packet_len, bool direct_link) { - unsigned char *skb_buff; unsigned long new_direct_link_flag; - skb_buff = skb_put_data(forw_packet_aggr->skb, packet_buff, - packet_len); + skb_put_data(forw_packet_aggr->skb, packet_buff, packet_len); forw_packet_aggr->packet_len += packet_len; forw_packet_aggr->num_packets++; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 1e3dc374bfde..894c96371e41 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -137,7 +137,7 @@ static void batadv_v_ogm_send(struct work_struct *work) struct batadv_priv *bat_priv; struct batadv_ogm2_packet *ogm_packet; struct sk_buff *skb, *skb_tmp; - unsigned char *ogm_buff, *pkt_buff; + unsigned char *ogm_buff; int ogm_buff_len; u16 tvlv_len = 0; int ret; @@ -166,7 +166,7 @@ static void batadv_v_ogm_send(struct work_struct *work) goto reschedule; skb_reserve(skb, ETH_HLEN); - pkt_buff = skb_put_data(skb, ogm_buff, ogm_buff_len); + skb_put_data(skb, ogm_buff, ogm_buff_len); ogm_packet = (struct batadv_ogm2_packet *)skb->data; ogm_packet->seqno = htonl(atomic_read(&bat_priv->bat_v.ogm_seqno)); -- cgit v1.2.3-55-g7522 From f25cbb2a6ac1ce702d8c39222bc13c24a69bb5ef Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 26 Jun 2017 11:26:44 +0100 Subject: batman-adv: fix various spelling mistakes Trivial fix to spelling mistakes in batadv_dbg debug messages and also in a comment and ensure comment line is not wider than 80 characters "ourselve" -> "ourselves" "surpressed" -> "suppressed" "troughput" -> "throughput" Signed-off-by: Colin Ian King Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- net/batman-adv/bat_v_ogm.c | 12 ++++++------ net/batman-adv/send.c | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 894c96371e41..8be61734fc43 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -200,7 +200,7 @@ static void batadv_v_ogm_send(struct work_struct *work) type = "unknown"; } - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselve on %s surpressed: %s\n", + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 from ourselves on %s suppressed: %s\n", hard_iface->net_dev->name, type); batadv_hardif_put(hard_iface); @@ -683,18 +683,18 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, ogm_throughput = ntohl(ogm_packet->throughput); batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Received OGM2 packet via NB: %pM, IF: %s [%pM] (from OG: %pM, seqno %u, troughput %u, TTL %u, V %u, tvlv_len %u)\n", + "Received OGM2 packet via NB: %pM, IF: %s [%pM] (from OG: %pM, seqno %u, throughput %u, TTL %u, V %u, tvlv_len %u)\n", ethhdr->h_source, if_incoming->net_dev->name, if_incoming->net_dev->dev_addr, ogm_packet->orig, ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl, ogm_packet->version, ntohs(ogm_packet->tvlv_len)); - /* If the troughput metric is 0, immediately drop the packet. No need to - * create orig_node / neigh_node for an unusable route. + /* If the throughput metric is 0, immediately drop the packet. No need + * to create orig_node / neigh_node for an unusable route. */ if (ogm_throughput == 0) { batadv_dbg(BATADV_DBG_BATMAN, bat_priv, - "Drop packet: originator packet with troughput metric of 0\n"); + "Drop packet: originator packet with throughput metric of 0\n"); return; } @@ -762,7 +762,7 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset, type = "unknown"; } - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s surpressed: %s\n", + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "OGM2 packet from %pM on %s suppressed: %s\n", ogm_packet->orig, hard_iface->net_dev->name, type); diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c index d239a9d72ac3..054a65e6eb68 100644 --- a/net/batman-adv/send.c +++ b/net/batman-adv/send.c @@ -911,7 +911,7 @@ static void batadv_send_outstanding_bcast_packet(struct work_struct *work) type = "unknown"; } - batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s surpressed: %s\n", + batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "BCAST packet from orig %pM on %s suppressed: %s\n", bcast_packet->orig, hard_iface->net_dev->name, type); -- cgit v1.2.3-55-g7522 From e45eba2467bd64fd196dc6f8b50ff5e59c0058da Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 12 Jul 2017 13:14:48 +0200 Subject: batman-adv: Convert batman-adv.txt to reStructuredText Converting the freeform text to parsable reStructuredText, allows the integration in the sphinx based documentation system of the kernel. It will therefore be accessible as hypertext under https://www.kernel.org/doc/html/latest/ Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich --- Documentation/networking/00-INDEX | 2 - Documentation/networking/batman-adv.rst | 220 ++++++++++++++++++++++++++++++++ Documentation/networking/batman-adv.txt | 215 ------------------------------- Documentation/networking/index.rst | 1 + MAINTAINERS | 2 +- 5 files changed, 222 insertions(+), 218 deletions(-) create mode 100644 Documentation/networking/batman-adv.rst delete mode 100644 Documentation/networking/batman-adv.txt diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index c6beb5f1637f..7a79b3587dd3 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX @@ -30,8 +30,6 @@ atm.txt - info on where to get ATM programs and support for Linux. ax25.txt - info on using AX.25 and NET/ROM code for Linux -batman-adv.txt - - B.A.T.M.A.N routing protocol on top of layer 2 Ethernet Frames. baycom.txt - info on the driver for Baycom style amateur radio modems bonding.txt diff --git a/Documentation/networking/batman-adv.rst b/Documentation/networking/batman-adv.rst new file mode 100644 index 000000000000..a342b2cc3dc6 --- /dev/null +++ b/Documentation/networking/batman-adv.rst @@ -0,0 +1,220 @@ +========== +batman-adv +========== + +Batman advanced is a new approach to wireless networking which does no longer +operate on the IP basis. Unlike the batman daemon, which exchanges information +using UDP packets and sets routing tables, batman-advanced operates on ISO/OSI +Layer 2 only and uses and routes (or better: bridges) Ethernet Frames. It +emulates a virtual network switch of all nodes participating. Therefore all +nodes appear to be link local, thus all higher operating protocols won't be +affected by any changes within the network. You can run almost any protocol +above batman advanced, prominent examples are: IPv4, IPv6, DHCP, IPX. + +Batman advanced was implemented as a Linux kernel driver to reduce the overhead +to a minimum. It does not depend on any (other) network driver, and can be used +on wifi as well as ethernet lan, vpn, etc ... (anything with ethernet-style +layer 2). + + +Configuration +============= + +Load the batman-adv module into your kernel:: + + $ insmod batman-adv.ko + +The module is now waiting for activation. You must add some interfaces on which +batman can operate. After loading the module batman advanced will scan your +systems interfaces to search for compatible interfaces. Once found, it will +create subfolders in the ``/sys`` directories of each supported interface, +e.g.:: + + $ ls /sys/class/net/eth0/batman_adv/ + elp_interval iface_status mesh_iface throughput_override + +If an interface does not have the ``batman_adv`` subfolder, it probably is not +supported. Not supported interfaces are: loopback, non-ethernet and batman's +own interfaces. + +Note: After the module was loaded it will continuously watch for new +interfaces to verify the compatibility. There is no need to reload the module +if you plug your USB wifi adapter into your machine after batman advanced was +initially loaded. + +The batman-adv soft-interface can be created using the iproute2 tool ``ip``:: + + $ ip link add name bat0 type batadv + +To activate a given interface simply attach it to the ``bat0`` interface:: + + $ ip link set dev eth0 master bat0 + +Repeat this step for all interfaces you wish to add. Now batman starts +using/broadcasting on this/these interface(s). + +By reading the "iface_status" file you can check its status:: + + $ cat /sys/class/net/eth0/batman_adv/iface_status + active + +To deactivate an interface you have to detach it from the "bat0" interface:: + + $ ip link set dev eth0 nomaster + + +All mesh wide settings can be found in batman's own interface folder:: + + $ ls /sys/class/net/bat0/mesh/ + aggregated_ogms fragmentation isolation_mark routing_algo + ap_isolation gw_bandwidth log_level vlan0 + bonding gw_mode multicast_mode + bridge_loop_avoidance gw_sel_class network_coding + distributed_arp_table hop_penalty orig_interval + +There is a special folder for debugging information:: + + $ ls /sys/kernel/debug/batman_adv/bat0/ + bla_backbone_table log neighbors transtable_local + bla_claim_table mcast_flags originators + dat_cache nc socket + gateways nc_nodes transtable_global + +Some of the files contain all sort of status information regarding the mesh +network. For example, you can view the table of originators (mesh +participants) with:: + + $ cat /sys/kernel/debug/batman_adv/bat0/originators + +Other files allow to change batman's behaviour to better fit your requirements. +For instance, you can check the current originator interval (value in +milliseconds which determines how often batman sends its broadcast packets):: + + $ cat /sys/class/net/bat0/mesh/orig_interval + 1000 + +and also change its value:: + + $ echo 3000 > /sys/class/net/bat0/mesh/orig_interval + +In very mobile scenarios, you might want to adjust the originator interval to a +lower value. This will make the mesh more responsive to topology changes, but +will also increase the overhead. + + +Usage +===== + +To make use of your newly created mesh, batman advanced provides a new +interface "bat0" which you should use from this point on. All interfaces added +to batman advanced are not relevant any longer because batman handles them for +you. Basically, one "hands over" the data by using the batman interface and +batman will make sure it reaches its destination. + +The "bat0" interface can be used like any other regular interface. It needs an +IP address which can be either statically configured or dynamically (by using +DHCP or similar services):: + + NodeA: ip link set up dev bat0 + NodeA: ip addr add 192.168.0.1/24 dev bat0 + + NodeB: ip link set up dev bat0 + NodeB: ip addr add 192.168.0.2/24 dev bat0 + NodeB: ping 192.168.0.1 + +Note: In order to avoid problems remove all IP addresses previously assigned to +interfaces now used by batman advanced, e.g.:: + + $ ip addr flush dev eth0 + + +Logging/Debugging +================= + +All error messages, warnings and information messages are sent to the kernel +log. Depending on your operating system distribution this can be read in one of +a number of ways. Try using the commands: ``dmesg``, ``logread``, or looking in +the files ``/var/log/kern.log`` or ``/var/log/syslog``. All batman-adv messages +are prefixed with "batman-adv:" So to see just these messages try:: + + $ dmesg | grep batman-adv + +When investigating problems with your mesh network, it is sometimes necessary to +see more detail debug messages. This must be enabled when compiling the +batman-adv module. When building batman-adv as part of kernel, use "make +menuconfig" and enable the option ``B.A.T.M.A.N. debugging`` +(``CONFIG_BATMAN_ADV_DEBUG=y``). + +Those additional debug messages can be accessed using a special file in +debugfs:: + + $ cat /sys/kernel/debug/batman_adv/bat0/log + +The additional debug output is by default disabled. It can be enabled during +run time. Following log_levels are defined: + +.. flat-table:: + + * - 0 + - All debug output disabled + * - 1 + - Enable messages related to routing / flooding / broadcasting + * - 2 + - Enable messages related to route added / changed / deleted + * - 4 + - Enable messages related to translation table operations + * - 8 + - Enable messages related to bridge loop avoidance + * - 16 + - Enable messages related to DAT, ARP snooping and parsing + * - 32 + - Enable messages related to network coding + * - 64 + - Enable messages related to multicast + * - 128 + - Enable messages related to throughput meter + * - 255 + - Enable all messages + +The debug output can be changed at runtime using the file +``/sys/class/net/bat0/mesh/log_level``. e.g.:: + + $ echo 6 > /sys/class/net/bat0/mesh/log_level + +will enable debug messages for when routes change. + +Counters for different types of packets entering and leaving the batman-adv +module are available through ethtool:: + + $ ethtool --statistics bat0 + + +batctl +====== + +As batman advanced operates on layer 2, all hosts participating in the virtual +switch are completely transparent for all protocols above layer 2. Therefore +the common diagnosis tools do not work as expected. To overcome these problems, +batctl was created. At the moment the batctl contains ping, traceroute, tcpdump +and interfaces to the kernel module settings. + +For more information, please see the manpage (``man batctl``). + +batctl is available on https://www.open-mesh.org/ + + +Contact +======= + +Please send us comments, experiences, questions, anything :) + +IRC: + #batman on irc.freenode.org +Mailing-list: + b.a.t.m.a.n@open-mesh.org (optional subscription at + https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n) + +You can also contact the Authors: + +* Marek Lindner +* Simon Wunderlich diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt deleted file mode 100644 index ccf94677b240..000000000000 --- a/Documentation/networking/batman-adv.txt +++ /dev/null @@ -1,215 +0,0 @@ -BATMAN-ADV ----------- - -Batman advanced is a new approach to wireless networking which -does no longer operate on the IP basis. Unlike the batman daemon, -which exchanges information using UDP packets and sets routing -tables, batman-advanced operates on ISO/OSI Layer 2 only and uses -and routes (or better: bridges) Ethernet Frames. It emulates a -virtual network switch of all nodes participating. Therefore all -nodes appear to be link local, thus all higher operating proto- -cols won't be affected by any changes within the network. You can -run almost any protocol above batman advanced, prominent examples -are: IPv4, IPv6, DHCP, IPX. - -Batman advanced was implemented as a Linux kernel driver to re- -duce the overhead to a minimum. It does not depend on any (other) -network driver, and can be used on wifi as well as ethernet lan, -vpn, etc ... (anything with ethernet-style layer 2). - - -CONFIGURATION -------------- - -Load the batman-adv module into your kernel: - -# insmod batman-adv.ko - -The module is now waiting for activation. You must add some in- -terfaces on which batman can operate. After loading the module -batman advanced will scan your systems interfaces to search for -compatible interfaces. Once found, it will create subfolders in -the /sys directories of each supported interface, e.g. - -# ls /sys/class/net/eth0/batman_adv/ -# elp_interval iface_status mesh_iface throughput_override - -If an interface does not have the "batman_adv" subfolder it prob- -ably is not supported. Not supported interfaces are: loopback, -non-ethernet and batman's own interfaces. - -Note: After the module was loaded it will continuously watch for -new interfaces to verify the compatibility. There is no need to -reload the module if you plug your USB wifi adapter into your ma- -chine after batman advanced was initially loaded. - -The batman-adv soft-interface can be created using the iproute2 -tool "ip" - -# ip link add name bat0 type batadv - -To activate a given interface simply attach it to the "bat0" -interface - -# ip link set dev eth0 master bat0 - -Repeat this step for all interfaces you wish to add. Now batman -starts using/broadcasting on this/these interface(s). - -By reading the "iface_status" file you can check its status: - -# cat /sys/class/net/eth0/batman_adv/iface_status -# active - -To deactivate an interface you have to detach it from the -"bat0" interface: - -# ip link set dev eth0 nomaster - - -All mesh wide settings can be found in batman's own interface -folder: - -# ls /sys/class/net/bat0/mesh/ -# aggregated_ogms fragmentation isolation_mark routing_algo -# ap_isolation gw_bandwidth log_level vlan0 -# bonding gw_mode multicast_mode -# bridge_loop_avoidance gw_sel_class network_coding -# distributed_arp_table hop_penalty orig_interval - -There is a special folder for debugging information: - -# ls /sys/kernel/debug/batman_adv/bat0/ -# bla_backbone_table log neighbors transtable_local -# bla_claim_table mcast_flags originators -# dat_cache nc socket -# gateways nc_nodes transtable_global - -Some of the files contain all sort of status information regard- -ing the mesh network. For example, you can view the table of -originators (mesh participants) with: - -# cat /sys/kernel/debug/batman_adv/bat0/originators - -Other files allow to change batman's behaviour to better fit your -requirements. For instance, you can check the current originator -interval (value in milliseconds which determines how often batman -sends its broadcast packets): - -# cat /sys/class/net/bat0/mesh/orig_interval -# 1000 - -and also change its value: - -# echo 3000 > /sys/class/net/bat0/mesh/orig_interval - -In very mobile scenarios, you might want to adjust the originator -interval to a lower value. This will make the mesh more respon- -sive to topology changes, but will also increase the overhead. - - -USAGE ------ - -To make use of your newly created mesh, batman advanced provides -a new interface "bat0" which you should use from this point on. -All interfaces added to batman advanced are not relevant any -longer because batman handles them for you. Basically, one "hands -over" the data by using the batman interface and batman will make -sure it reaches its destination. - -The "bat0" interface can be used like any other regular inter- -face. It needs an IP address which can be either statically con- -figured or dynamically (by using DHCP or similar services): - -# NodeA: ip link set up dev bat0 -# NodeA: ip addr add 192.168.0.1/24 dev bat0 - -# NodeB: ip link set up dev bat0 -# NodeB: ip addr add 192.168.0.2/24 dev bat0 -# NodeB: ping 192.168.0.1 - -Note: In order to avoid problems remove all IP addresses previ- -ously assigned to interfaces now used by batman advanced, e.g. - -# ip addr flush dev eth0 - - -LOGGING/DEBUGGING ------------------ - -All error messages, warnings and information messages are sent to -the kernel log. Depending on your operating system distribution -this can be read in one of a number of ways. Try using the com- -mands: dmesg, logread, or looking in the files /var/log/kern.log -or /var/log/syslog. All batman-adv messages are prefixed with -"batman-adv:" So to see just these messages try - -# dmesg | grep batman-adv - -When investigating problems with your mesh network it is some- -times necessary to see more detail debug messages. This must be -enabled when compiling the batman-adv module. When building bat- -man-adv as part of kernel, use "make menuconfig" and enable the -option "B.A.T.M.A.N. debugging". - -Those additional debug messages can be accessed using a special -file in debugfs - -# cat /sys/kernel/debug/batman_adv/bat0/log - -The additional debug output is by default disabled. It can be en- -abled during run time. Following log_levels are defined: - - 0 - All debug output disabled - 1 - Enable messages related to routing / flooding / broadcasting - 2 - Enable messages related to route added / changed / deleted - 4 - Enable messages related to translation table operations - 8 - Enable messages related to bridge loop avoidance - 16 - Enable messages related to DAT, ARP snooping and parsing - 32 - Enable messages related to network coding - 64 - Enable messages related to multicast -128 - Enable messages related to throughput meter -255 - Enable all messages - -The debug output can be changed at runtime using the file -/sys/class/net/bat0/mesh/log_level. e.g. - -# echo 6 > /sys/class/net/bat0/mesh/log_level - -will enable debug messages for when routes change. - -Counters for different types of packets entering and leaving the -batman-adv module are available through ethtool: - -# ethtool --statistics bat0 - - -BATCTL ------- - -As batman advanced operates on layer 2 all hosts participating in -the virtual switch are completely transparent for all protocols -above layer 2. Therefore the common diagnosis tools do not work -as expected. To overcome these problems batctl was created. At -the moment the batctl contains ping, traceroute, tcpdump and -interfaces to the kernel module settings. - -For more information, please see the manpage (man batctl). - -batctl is available on https://www.open-mesh.org/ - - -CONTACT -------- - -Please send us comments, experiences, questions, anything :) - -IRC: #batman on irc.freenode.org -Mailing-list: b.a.t.m.a.n@open-mesh.org (optional subscription - at https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n) - -You can also contact the Authors: - -Marek Lindner -Simon Wunderlich diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index b5bd87e01f52..66e620866245 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -6,6 +6,7 @@ Contents: .. toctree:: :maxdepth: 2 + batman-adv kapi z8530book diff --git a/MAINTAINERS b/MAINTAINERS index 205d3977ac46..c28a1325724c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2481,7 +2481,7 @@ Q: https://patchwork.open-mesh.org/project/batman/list/ S: Maintained F: Documentation/ABI/testing/sysfs-class-net-batman-adv F: Documentation/ABI/testing/sysfs-class-net-mesh -F: Documentation/networking/batman-adv.txt +F: Documentation/networking/batman-adv.rst F: include/uapi/linux/batman_adv.h F: net/batman-adv/ -- cgit v1.2.3-55-g7522 From 977bb32440e7342e28838d9c0260681637ac97c4 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:13 +0200 Subject: net/smc: serialize connection creation in all cases If a link group for a new server connection exists already, the mutex serializing the determination of link groups is given up early. The coming registration of memory regions benefits from the serialization as well, if the mutex is held till connection creation is finished. This patch postpones the unlocking of the link group creation mutex. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 6793d7348cc8..120a7b9b4d8e 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -779,11 +779,6 @@ static void smc_listen_work(struct work_struct *work) mutex_lock(&smc_create_lgr_pending); local_contact = smc_conn_create(new_smc, peeraddr.sin_addr.s_addr, smcibdev, ibport, &pclc.lcl, 0); - if (local_contact == SMC_REUSE_CONTACT) - /* lock no longer needed, free it due to following - * smc_clc_wait_msg() call - */ - mutex_unlock(&smc_create_lgr_pending); if (local_contact < 0) { rc = local_contact; if (rc == -ENOMEM) @@ -853,8 +848,7 @@ out_connected: if (newsmcsk->sk_state == SMC_INIT) newsmcsk->sk_state = SMC_ACTIVE; enqueue: - if (local_contact == SMC_FIRST_CONTACT) - mutex_unlock(&smc_create_lgr_pending); + mutex_unlock(&smc_create_lgr_pending); lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); if (lsmc->sk.sk_state == SMC_LISTEN) { smc_accept_enqueue(&lsmc->sk, newsmcsk); -- cgit v1.2.3-55-g7522 From c45abf31e72f7640c5e84adda4683b72cb32a222 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:14 +0200 Subject: net/smc: shorten local bufsize variables Initiate the coming rework of SMC buffer handling with this small code cleanup. No functional changes here. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 54 +++++++++++++++++++++++++----------------------------- 1 file changed, 25 insertions(+), 29 deletions(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 3ac09a629ea1..6159488cb94b 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -508,25 +508,25 @@ int smc_sndbuf_create(struct smc_sock *smc) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; - int tmp_bufsize, tmp_bufsize_short; struct smc_buf_desc *sndbuf_desc; + int bufsize, bufsize_short; int rc; /* use socket send buffer size (w/o overhead) as start value */ - for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); - tmp_bufsize_short >= 0; tmp_bufsize_short--) { - tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short); + for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); + bufsize_short >= 0; bufsize_short--) { + bufsize = smc_uncompress_bufsize(bufsize_short); /* check for reusable sndbuf_slot in the link group */ - sndbuf_desc = smc_sndbuf_get_slot(lgr, tmp_bufsize_short); + sndbuf_desc = smc_sndbuf_get_slot(lgr, bufsize_short); if (sndbuf_desc) { - memset(sndbuf_desc->cpu_addr, 0, tmp_bufsize); + memset(sndbuf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } /* try to alloc a new send buffer */ sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL); if (!sndbuf_desc) break; /* give up with -ENOMEM */ - sndbuf_desc->cpu_addr = kzalloc(tmp_bufsize, + sndbuf_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_NORETRY); @@ -539,8 +539,7 @@ int smc_sndbuf_create(struct smc_sock *smc) continue; } rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - tmp_bufsize, sndbuf_desc, - DMA_TO_DEVICE); + bufsize, sndbuf_desc, DMA_TO_DEVICE); if (rc) { kfree(sndbuf_desc->cpu_addr); kfree(sndbuf_desc); @@ -549,16 +548,15 @@ int smc_sndbuf_create(struct smc_sock *smc) } sndbuf_desc->used = 1; write_lock_bh(&lgr->sndbufs_lock); - list_add(&sndbuf_desc->list, - &lgr->sndbufs[tmp_bufsize_short]); + list_add(&sndbuf_desc->list, &lgr->sndbufs[bufsize_short]); write_unlock_bh(&lgr->sndbufs_lock); break; } if (sndbuf_desc && sndbuf_desc->cpu_addr) { conn->sndbuf_desc = sndbuf_desc; - conn->sndbuf_size = tmp_bufsize; - smc->sk.sk_sndbuf = tmp_bufsize * 2; - atomic_set(&conn->sndbuf_space, tmp_bufsize); + conn->sndbuf_size = bufsize; + smc->sk.sk_sndbuf = bufsize * 2; + atomic_set(&conn->sndbuf_space, bufsize); return 0; } else { return -ENOMEM; @@ -574,25 +572,25 @@ int smc_rmb_create(struct smc_sock *smc) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; - int tmp_bufsize, tmp_bufsize_short; struct smc_buf_desc *rmb_desc; + int bufsize, bufsize_short; int rc; /* use socket recv buffer size (w/o overhead) as start value */ - for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_rcvbuf / 2); - tmp_bufsize_short >= 0; tmp_bufsize_short--) { - tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short); + for (bufsize_short = smc_compress_bufsize(smc->sk.sk_rcvbuf / 2); + bufsize_short >= 0; bufsize_short--) { + bufsize = smc_uncompress_bufsize(bufsize_short); /* check for reusable rmb_slot in the link group */ - rmb_desc = smc_rmb_get_slot(lgr, tmp_bufsize_short); + rmb_desc = smc_rmb_get_slot(lgr, bufsize_short); if (rmb_desc) { - memset(rmb_desc->cpu_addr, 0, tmp_bufsize); + memset(rmb_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } /* try to alloc a new RMB */ rmb_desc = kzalloc(sizeof(*rmb_desc), GFP_KERNEL); if (!rmb_desc) break; /* give up with -ENOMEM */ - rmb_desc->cpu_addr = kzalloc(tmp_bufsize, + rmb_desc->cpu_addr = kzalloc(bufsize, GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_NORETRY); @@ -605,8 +603,7 @@ int smc_rmb_create(struct smc_sock *smc) continue; } rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - tmp_bufsize, rmb_desc, - DMA_FROM_DEVICE); + bufsize, rmb_desc, DMA_FROM_DEVICE); if (rc) { kfree(rmb_desc->cpu_addr); kfree(rmb_desc); @@ -617,18 +614,17 @@ int smc_rmb_create(struct smc_sock *smc) lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey; rmb_desc->used = 1; write_lock_bh(&lgr->rmbs_lock); - list_add(&rmb_desc->list, - &lgr->rmbs[tmp_bufsize_short]); + list_add(&rmb_desc->list, &lgr->rmbs[bufsize_short]); write_unlock_bh(&lgr->rmbs_lock); break; } if (rmb_desc && rmb_desc->cpu_addr) { conn->rmb_desc = rmb_desc; - conn->rmbe_size = tmp_bufsize; - conn->rmbe_size_short = tmp_bufsize_short; - smc->sk.sk_rcvbuf = tmp_bufsize * 2; + conn->rmbe_size = bufsize; + conn->rmbe_size_short = bufsize_short; + smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); - conn->rmbe_update_limit = smc_rmb_wnd_update_limit(tmp_bufsize); + conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize); return 0; } else { return -ENOMEM; -- cgit v1.2.3-55-g7522 From a3fe3d01bd0d7cd6ee7a5e3eebc0926c47954fe7 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:15 +0200 Subject: net/smc: introduce sg-logic for RMBs The follow-on patch makes use of ib_map_mr_sg() when introducing separate memory regions for RMBs. This function is based on scatterlists; thus this patch introduces scatterlists for RMBs. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_clc.c | 12 ++++++------ net/smc/smc_core.c | 47 +++++++++++++++++++++++++++++++---------------- net/smc/smc_core.h | 6 ++---- net/smc/smc_ib.c | 31 +++++++++++++++++++++++++++++++ net/smc/smc_ib.h | 6 ++++++ 5 files changed, 76 insertions(+), 26 deletions(-) diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 03ec058d18df..15cb76019009 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -204,13 +204,13 @@ int smc_clc_send_confirm(struct smc_sock *smc) memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(cclc.qpn, link->roce_qp->qp_num); cclc.rmb_rkey = - htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]); + htonl(link->roce_pd->unsafe_global_rkey); cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ cclc.rmbe_alert_token = htonl(conn->alert_token_local); cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); cclc.rmbe_size = conn->rmbe_size_short; - cclc.rmb_dma_addr = - cpu_to_be64((u64)conn->rmb_desc->dma_addr[SMC_SINGLE_LINK]); + cclc.rmb_dma_addr = cpu_to_be64( + (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); hton24(cclc.psn, link->psn_initial); memcpy(cclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); @@ -256,13 +256,13 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(aclc.qpn, link->roce_qp->qp_num); aclc.rmb_rkey = - htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]); + htonl(link->roce_pd->unsafe_global_rkey); aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ aclc.rmbe_alert_token = htonl(conn->alert_token_local); aclc.qp_mtu = link->path_mtu; aclc.rmbe_size = conn->rmbe_size_short, - aclc.rmb_dma_addr = - cpu_to_be64((u64)conn->rmb_desc->dma_addr[SMC_SINGLE_LINK]); + aclc.rmb_dma_addr = cpu_to_be64( + (u64)sg_dma_address(conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl)); hton24(aclc.psn, link->psn_initial); memcpy(aclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)); diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 6159488cb94b..bfdbda795f67 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -266,17 +266,16 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) static void smc_lgr_free_rmbs(struct smc_link_group *lgr) { - struct smc_buf_desc *rmb_desc, *bf_desc; struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; + struct smc_buf_desc *rmb_desc, *bf_desc; int i; for (i = 0; i < SMC_RMBE_SIZES; i++) { list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i], list) { list_del(&rmb_desc->list); - smc_ib_buf_unmap(lnk->smcibdev, - smc_uncompress_bufsize(i), - rmb_desc, DMA_FROM_DEVICE); + smc_ib_buf_unmap_sg(lnk->smcibdev, rmb_desc, + DMA_FROM_DEVICE); kfree(rmb_desc->cpu_addr); kfree(rmb_desc); } @@ -580,38 +579,54 @@ int smc_rmb_create(struct smc_sock *smc) for (bufsize_short = smc_compress_bufsize(smc->sk.sk_rcvbuf / 2); bufsize_short >= 0; bufsize_short--) { bufsize = smc_uncompress_bufsize(bufsize_short); + if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) + continue; + /* check for reusable rmb_slot in the link group */ rmb_desc = smc_rmb_get_slot(lgr, bufsize_short); if (rmb_desc) { memset(rmb_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } + /* try to alloc a new RMB */ rmb_desc = kzalloc(sizeof(*rmb_desc), GFP_KERNEL); if (!rmb_desc) break; /* give up with -ENOMEM */ - rmb_desc->cpu_addr = kzalloc(bufsize, - GFP_KERNEL | __GFP_NOWARN | - __GFP_NOMEMALLOC | - __GFP_NORETRY); + rmb_desc->cpu_addr = + (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | + __GFP_NOMEMALLOC | + __GFP_NORETRY | __GFP_ZERO, + get_order(bufsize)); if (!rmb_desc->cpu_addr) { kfree(rmb_desc); rmb_desc = NULL; - /* if RMB allocation has failed, - * try a smaller one - */ continue; } - rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - bufsize, rmb_desc, DMA_FROM_DEVICE); + rmb_desc->order = get_order(bufsize); + + rc = sg_alloc_table(&rmb_desc->sgt[SMC_SINGLE_LINK], 1, + GFP_KERNEL); if (rc) { - kfree(rmb_desc->cpu_addr); + free_pages((unsigned long)rmb_desc->cpu_addr, + rmb_desc->order); + kfree(rmb_desc); + rmb_desc = NULL; + continue; + } + sg_set_buf(rmb_desc->sgt[SMC_SINGLE_LINK].sgl, + rmb_desc->cpu_addr, bufsize); + + rc = smc_ib_buf_map_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + rmb_desc, DMA_FROM_DEVICE); + if (rc != 1) { + sg_free_table(&rmb_desc->sgt[SMC_SINGLE_LINK]); + free_pages((unsigned long)rmb_desc->cpu_addr, + rmb_desc->order); kfree(rmb_desc); rmb_desc = NULL; continue; /* if mapping failed, try smaller one */ } - rmb_desc->rkey[SMC_SINGLE_LINK] = - lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey; rmb_desc->used = 1; write_lock_bh(&lgr->rmbs_lock); list_add(&rmb_desc->list, &lgr->rmbs[bufsize_short]); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index b013cb43a327..0ee450d69907 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -93,10 +93,8 @@ struct smc_buf_desc { u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; /* mapped address of buffer */ void *cpu_addr; /* virtual address of buffer */ - u32 rkey[SMC_LINKS_PER_LGR_MAX]; - /* for rmb only: - * rkey provided to peer - */ + struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ + u32 order; /* allocation order */ u32 used; /* currently used / unused */ }; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index b31715505a35..fcfeb89b05d9 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -283,6 +283,37 @@ void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size, buf_slot->dma_addr[SMC_SINGLE_LINK] = 0; } +/* Map a new TX or RX buffer SG-table to DMA */ +int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + int mapped_nents; + + mapped_nents = ib_dma_map_sg(smcibdev->ibdev, + buf_slot->sgt[SMC_SINGLE_LINK].sgl, + buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, + data_direction); + if (!mapped_nents) + return -ENOMEM; + + return mapped_nents; +} + +void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address) + return; /* already unmapped */ + + ib_dma_unmap_sg(smcibdev->ibdev, + buf_slot->sgt[SMC_SINGLE_LINK].sgl, + buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, + data_direction); + buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0; +} + static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport) { struct net_device *ndev; diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index b567152a526d..b30e387854b6 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -57,6 +57,12 @@ int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); +int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); +void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); void smc_ib_dealloc_protection_domain(struct smc_link *lnk); int smc_ib_create_protection_domain(struct smc_link *lnk); void smc_ib_destroy_queue_pair(struct smc_link *lnk); -- cgit v1.2.3-55-g7522 From 897e1c245773d93f26f125a99674f585a3aeef5d Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:16 +0200 Subject: net/smc: use separate memory regions for RMBs SMC currently uses the unsafe_global_rkey of the protection domain, which exposes all memory for remote reads and writes once a connection is established. This patch introduces separate memory regions with separate rkeys for every RMB. Now the unsafe_global_rkey of the protection domain is no longer needed. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_clc.c | 4 ++-- net/smc/smc_core.c | 18 ++++++++++++++++++ net/smc/smc_core.h | 6 +++++- net/smc/smc_ib.c | 45 +++++++++++++++++++++++++++++++++++++++++++-- net/smc/smc_ib.h | 5 +++-- 5 files changed, 71 insertions(+), 7 deletions(-) diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 15cb76019009..3934913ab835 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -204,7 +204,7 @@ int smc_clc_send_confirm(struct smc_sock *smc) memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(cclc.qpn, link->roce_qp->qp_num); cclc.rmb_rkey = - htonl(link->roce_pd->unsafe_global_rkey); + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ cclc.rmbe_alert_token = htonl(conn->alert_token_local); cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); @@ -256,7 +256,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact) memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); hton24(aclc.qpn, link->roce_qp->qp_num); aclc.rmb_rkey = - htonl(link->roce_pd->unsafe_global_rkey); + htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ aclc.rmbe_alert_token = htonl(conn->alert_token_local); aclc.qp_mtu = link->path_mtu; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index bfdbda795f67..f1dd4e1cd3e1 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -218,6 +218,7 @@ static void smc_sndbuf_unuse(struct smc_connection *conn) static void smc_rmb_unuse(struct smc_connection *conn) { if (conn->rmb_desc) { + conn->rmb_desc->reused = true; conn->rmb_desc->used = 0; conn->rmbe_size = 0; } @@ -274,6 +275,8 @@ static void smc_lgr_free_rmbs(struct smc_link_group *lgr) list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i], list) { list_del(&rmb_desc->list); + smc_ib_put_memory_region( + rmb_desc->mr_rx[SMC_SINGLE_LINK]); smc_ib_buf_unmap_sg(lnk->smcibdev, rmb_desc, DMA_FROM_DEVICE); kfree(rmb_desc->cpu_addr); @@ -627,6 +630,21 @@ int smc_rmb_create(struct smc_sock *smc) rmb_desc = NULL; continue; /* if mapping failed, try smaller one */ } + rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_LOCAL_WRITE, + rmb_desc); + if (rc) { + smc_ib_buf_unmap_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + rmb_desc, DMA_FROM_DEVICE); + sg_free_table(&rmb_desc->sgt[SMC_SINGLE_LINK]); + free_pages((unsigned long)rmb_desc->cpu_addr, + rmb_desc->order); + kfree(rmb_desc); + rmb_desc = NULL; + continue; + } + rmb_desc->used = 1; write_lock_bh(&lgr->rmbs_lock); list_add(&rmb_desc->list, &lgr->rmbs[bufsize_short]); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 0ee450d69907..17b5fea09901 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -94,8 +94,13 @@ struct smc_buf_desc { /* mapped address of buffer */ void *cpu_addr; /* virtual address of buffer */ struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ + struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; + /* for rmb only: memory region + * incl. rkey provided to peer + */ u32 order; /* allocation order */ u32 used; /* currently used / unused */ + bool reused; /* new created / reused */ }; struct smc_rtoken { /* address/key of remote RMB */ @@ -175,5 +180,4 @@ int smc_sndbuf_create(struct smc_sock *smc); int smc_rmb_create(struct smc_sock *smc); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_clc_msg_accept_confirm *clc); - #endif diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index fcfeb89b05d9..08233492ec45 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -192,8 +192,7 @@ int smc_ib_create_protection_domain(struct smc_link *lnk) { int rc; - lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, - IB_PD_UNSAFE_GLOBAL_RKEY); + lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); rc = PTR_ERR_OR_ZERO(lnk->roce_pd); if (IS_ERR(lnk->roce_pd)) lnk->roce_pd = NULL; @@ -254,6 +253,48 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) return rc; } +void smc_ib_put_memory_region(struct ib_mr *mr) +{ + ib_dereg_mr(mr); +} + +static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot) +{ + unsigned int offset = 0; + int sg_num; + + /* map the largest prefix of a dma mapped SG list */ + sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK], + buf_slot->sgt[SMC_SINGLE_LINK].sgl, + buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, + &offset, PAGE_SIZE); + + return sg_num; +} + +/* Allocate a memory region and map the dma mapped SG list of buf_slot */ +int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, + struct smc_buf_desc *buf_slot) +{ + if (buf_slot->mr_rx[SMC_SINGLE_LINK]) + return 0; /* already done */ + + buf_slot->mr_rx[SMC_SINGLE_LINK] = + ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); + if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) { + int rc; + + rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]); + buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL; + return rc; + } + + if (smc_ib_map_mr_sg(buf_slot) != 1) + return -EINVAL; + + return 0; +} + /* map a new TX or RX buffer to DMA */ int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, struct smc_buf_desc *buf_slot, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index b30e387854b6..b57d29f29042 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -71,6 +71,7 @@ int smc_ib_ready_link(struct smc_link *lnk); int smc_ib_modify_qp_rts(struct smc_link *lnk); int smc_ib_modify_qp_reset(struct smc_link *lnk); long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); - - +int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, + struct smc_buf_desc *buf_slot); +void smc_ib_put_memory_region(struct ib_mr *mr); #endif -- cgit v1.2.3-55-g7522 From 652a1e41eca7dfaacc47a79badb4a51aea570d35 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:17 +0200 Subject: net/smc: register RMB-related memory region A memory region created for a new RMB must be registered explicitly, before the peer can make use of it for remote DMA transfer. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 38 ++++++++++++++++++++++++++++++++ net/smc/smc_core.c | 1 - net/smc/smc_core.h | 12 +++++++++++ net/smc/smc_ib.c | 2 +- net/smc/smc_wr.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ net/smc/smc_wr.h | 1 + 6 files changed, 115 insertions(+), 2 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 120a7b9b4d8e..e0a95d50bf87 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -338,6 +338,12 @@ static int smc_clnt_conf_first_link(struct smc_sock *smc, union ib_gid *gid) return SMC_CLC_DECL_INTERR; smc_wr_remember_qp_attr(link); + + rc = smc_wr_reg_send(link, + smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) + return SMC_CLC_DECL_INTERR; + /* send CONFIRM LINK response over RoCE fabric */ rc = smc_llc_send_confirm_link(link, link->smcibdev->mac[link->ibport - 1], @@ -459,6 +465,18 @@ static int smc_connect_rdma(struct smc_sock *smc) reason_code = SMC_CLC_DECL_INTERR; goto decline_rdma_unlock; } + } else { + struct smc_buf_desc *buf_desc = smc->conn.rmb_desc; + + if (!buf_desc->reused) { + /* register memory region for new rmb */ + rc = smc_wr_reg_send(link, + buf_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma_unlock; + } + } } rc = smc_clc_send_confirm(smc); @@ -692,6 +710,12 @@ static int smc_serv_conf_first_link(struct smc_sock *smc) int rc; link = &lgr->lnk[SMC_SINGLE_LINK]; + + rc = smc_wr_reg_send(link, + smc->conn.rmb_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) + return SMC_CLC_DECL_INTERR; + /* send CONFIRM LINK request to client over the RoCE fabric */ rc = smc_llc_send_confirm_link(link, link->smcibdev->mac[link->ibport - 1], @@ -803,6 +827,20 @@ static void smc_listen_work(struct work_struct *work) smc_close_init(new_smc); smc_rx_init(new_smc); + if (local_contact != SMC_FIRST_CONTACT) { + struct smc_buf_desc *buf_desc = new_smc->conn.rmb_desc; + + if (!buf_desc->reused) { + /* register memory region for new rmb */ + rc = smc_wr_reg_send(link, + buf_desc->mr_rx[SMC_SINGLE_LINK]); + if (rc) { + reason_code = SMC_CLC_DECL_INTERR; + goto decline_rdma; + } + } + } + rc = smc_clc_send_accept(new_smc, local_contact); if (rc) goto out_err; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index f1dd4e1cd3e1..87bb3e4771a8 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -175,7 +175,6 @@ static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, rc = smc_wr_alloc_link_mem(lnk); if (rc) goto free_lgr; - init_waitqueue_head(&lnk->wr_tx_wait); rc = smc_ib_create_protection_domain(lnk); if (rc) goto free_link_mem; diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 17b5fea09901..f7b40bdbf24c 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -37,6 +37,14 @@ struct smc_wr_buf { u8 raw[SMC_WR_BUF_SIZE]; }; +#define SMC_WR_REG_MR_WAIT_TIME (5 * HZ)/* wait time for ib_wr_reg_mr result */ + +enum smc_wr_reg_state { + POSTED, /* ib_wr_reg_mr request posted */ + CONFIRMED, /* ib_wr_reg_mr response: successful */ + FAILED /* ib_wr_reg_mr response: failure */ +}; + struct smc_link { struct smc_ib_device *smcibdev; /* ib-device */ u8 ibport; /* port - values 1 | 2 */ @@ -65,6 +73,10 @@ struct smc_link { u64 wr_rx_id; /* seq # of last recv WR */ u32 wr_rx_cnt; /* number of WR recv buffers */ + struct ib_reg_wr wr_reg; /* WR register memory region */ + wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */ + enum smc_wr_reg_state wr_reg_state; /* state of wr_reg request */ + union ib_gid gid; /* gid matching used vlan id */ u32 peer_qpn; /* QP number of peer */ enum ib_mtu path_mtu; /* used mtu */ diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 08233492ec45..85e1831f591e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -231,10 +231,10 @@ int smc_ib_create_queue_pair(struct smc_link *lnk) .recv_cq = lnk->smcibdev->roce_cq_recv, .srq = NULL, .cap = { - .max_send_wr = SMC_WR_BUF_CNT, /* include unsolicited rdma_writes as well, * there are max. 2 RDMA_WRITE per 1 WR_SEND */ + .max_send_wr = SMC_WR_BUF_CNT * 3, .max_recv_wr = SMC_WR_BUF_CNT * 3, .max_send_sge = SMC_IB_MAX_SEND_SGE, .max_recv_sge = 1, diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 874ee9f9d796..ab56bda66783 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c @@ -68,6 +68,16 @@ static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) int i; link = wc->qp->qp_context; + + if (wc->opcode == IB_WC_REG_MR) { + if (wc->status) + link->wr_reg_state = FAILED; + else + link->wr_reg_state = CONFIRMED; + wake_up(&link->wr_reg_wait); + return; + } + pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); if (pnd_snd_idx == link->wr_tx_cnt) return; @@ -243,6 +253,52 @@ int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) return rc; } +/* Register a memory region and wait for result. */ +int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) +{ + struct ib_send_wr *failed_wr = NULL; + int rc; + + ib_req_notify_cq(link->smcibdev->roce_cq_send, + IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); + link->wr_reg_state = POSTED; + link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; + link->wr_reg.mr = mr; + link->wr_reg.key = mr->rkey; + failed_wr = &link->wr_reg.wr; + rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, &failed_wr); + WARN_ON(failed_wr != &link->wr_reg.wr); + if (rc) + return rc; + + rc = wait_event_interruptible_timeout(link->wr_reg_wait, + (link->wr_reg_state != POSTED), + SMC_WR_REG_MR_WAIT_TIME); + if (!rc) { + /* timeout - terminate connections */ + struct smc_link_group *lgr; + + lgr = container_of(link, struct smc_link_group, + lnk[SMC_SINGLE_LINK]); + smc_lgr_terminate(lgr); + return -EPIPE; + } + if (rc == -ERESTARTSYS) + return -EINTR; + switch (link->wr_reg_state) { + case CONFIRMED: + rc = 0; + break; + case FAILED: + rc = -EIO; + break; + case POSTED: + rc = -EPIPE; + break; + } + return rc; +} + void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_rx_hdr_type, smc_wr_tx_filter filter, smc_wr_tx_dismisser dismisser, @@ -458,6 +514,11 @@ static void smc_wr_init_sge(struct smc_link *lnk) lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i]; lnk->wr_rx_ibs[i].num_sge = 1; } + lnk->wr_reg.wr.next = NULL; + lnk->wr_reg.wr.num_sge = 0; + lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED; + lnk->wr_reg.wr.opcode = IB_WR_REG_MR; + lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; } void smc_wr_free_link(struct smc_link *lnk) @@ -602,6 +663,8 @@ int smc_wr_create_link(struct smc_link *lnk) smc_wr_init_sge(lnk); memset(lnk->wr_tx_mask, 0, BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); + init_waitqueue_head(&lnk->wr_tx_wait); + init_waitqueue_head(&lnk->wr_reg_wait); return rc; dma_unmap: diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 0b9beeda6053..45eb53833052 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h @@ -102,5 +102,6 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type, int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler); int smc_wr_rx_post_init(struct smc_link *link); void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context); +int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr); #endif /* SMC_WR_H */ -- cgit v1.2.3-55-g7522 From d5b361b0dc3aa319522ef593b9bbf19ac3db23c5 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:18 +0200 Subject: net/smc: remove Kconfig warning Now separate memory regions are created and registered for separate RMBs. The unsafe_global_rkey of the protection domain is no longer used. Thus the exposing memory warning can be removed. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/Kconfig | 4 ---- 1 file changed, 4 deletions(-) diff --git a/net/smc/Kconfig b/net/smc/Kconfig index 33954852f3f8..c717ef0896aa 100644 --- a/net/smc/Kconfig +++ b/net/smc/Kconfig @@ -8,10 +8,6 @@ config SMC The Linux implementation of the SMC-R solution is designed as a separate socket family SMC. - Warning: SMC will expose all memory for remote reads and writes - once a connection is established. Don't enable this option except - for tightly controlled lab environment. - Select this option if you want to run SMC socket applications config SMC_DIAG -- cgit v1.2.3-55-g7522 From 9d8fb6173477ad61364eeab652a87c2a295fa601 Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:19 +0200 Subject: net/smc: introduce sg-logic for send buffers SMC send buffers are processed the same way as RMBs. Since RMBs have been converted to sg-logic, do the same for send buffers. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 46 +++++++++++++++++++++++++++++++++++----------- net/smc/smc_core.h | 2 -- net/smc/smc_ib.c | 29 ----------------------------- net/smc/smc_ib.h | 6 ------ net/smc/smc_tx.c | 6 +++--- 5 files changed, 38 insertions(+), 51 deletions(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 87bb3e4771a8..8795c7ed9ce4 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -248,6 +248,7 @@ static void smc_link_clear(struct smc_link *lnk) static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) { + struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; struct smc_buf_desc *sndbuf_desc, *bf_desc; int i; @@ -255,10 +256,11 @@ static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i], list) { list_del(&sndbuf_desc->list); - smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - smc_uncompress_bufsize(i), - sndbuf_desc, DMA_TO_DEVICE); - kfree(sndbuf_desc->cpu_addr); + smc_ib_buf_unmap_sg(lnk->smcibdev, sndbuf_desc, + DMA_TO_DEVICE); + sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]); + free_pages((unsigned long)sndbuf_desc->cpu_addr, + sndbuf_desc->order); kfree(sndbuf_desc); } } @@ -517,6 +519,9 @@ int smc_sndbuf_create(struct smc_sock *smc) for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); bufsize_short >= 0; bufsize_short--) { bufsize = smc_uncompress_bufsize(bufsize_short); + if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) + continue; + /* check for reusable sndbuf_slot in the link group */ sndbuf_desc = smc_sndbuf_get_slot(lgr, bufsize_short); if (sndbuf_desc) { @@ -527,10 +532,12 @@ int smc_sndbuf_create(struct smc_sock *smc) sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL); if (!sndbuf_desc) break; /* give up with -ENOMEM */ - sndbuf_desc->cpu_addr = kzalloc(bufsize, - GFP_KERNEL | __GFP_NOWARN | - __GFP_NOMEMALLOC | - __GFP_NORETRY); + + sndbuf_desc->cpu_addr = + (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | + __GFP_NOMEMALLOC | + __GFP_NORETRY | __GFP_ZERO, + get_order(bufsize)); if (!sndbuf_desc->cpu_addr) { kfree(sndbuf_desc); sndbuf_desc = NULL; @@ -539,14 +546,31 @@ int smc_sndbuf_create(struct smc_sock *smc) */ continue; } - rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - bufsize, sndbuf_desc, DMA_TO_DEVICE); + sndbuf_desc->order = get_order(bufsize); + + rc = sg_alloc_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK], 1, + GFP_KERNEL); if (rc) { - kfree(sndbuf_desc->cpu_addr); + free_pages((unsigned long)sndbuf_desc->cpu_addr, + sndbuf_desc->order); + kfree(sndbuf_desc); + sndbuf_desc = NULL; + continue; + } + sg_set_buf(sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl, + sndbuf_desc->cpu_addr, bufsize); + + rc = smc_ib_buf_map_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + sndbuf_desc, DMA_TO_DEVICE); + if (rc != 1) { + sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]); + free_pages((unsigned long)sndbuf_desc->cpu_addr, + sndbuf_desc->order); kfree(sndbuf_desc); sndbuf_desc = NULL; continue; /* if mapping failed, try smaller one */ } + sndbuf_desc->used = 1; write_lock_bh(&lgr->sndbufs_lock); list_add(&sndbuf_desc->list, &lgr->sndbufs[bufsize_short]); diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index f7b40bdbf24c..72c25cb3eb89 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -102,8 +102,6 @@ struct smc_link { /* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */ struct smc_buf_desc { struct list_head list; - u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; - /* mapped address of buffer */ void *cpu_addr; /* virtual address of buffer */ struct sg_table sgt[SMC_LINKS_PER_LGR_MAX];/* virtual buffer */ struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 85e1831f591e..021f061609f5 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -295,35 +295,6 @@ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, return 0; } -/* map a new TX or RX buffer to DMA */ -int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction) -{ - int rc = 0; - - if (buf_slot->dma_addr[SMC_SINGLE_LINK]) - return rc; /* already mapped */ - buf_slot->dma_addr[SMC_SINGLE_LINK] = - ib_dma_map_single(smcibdev->ibdev, buf_slot->cpu_addr, - buf_size, data_direction); - if (ib_dma_mapping_error(smcibdev->ibdev, - buf_slot->dma_addr[SMC_SINGLE_LINK])) - rc = -EIO; - return rc; -} - -void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int buf_size, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction) -{ - if (!buf_slot->dma_addr[SMC_SINGLE_LINK]) - return; /* already unmapped */ - ib_dma_unmap_single(smcibdev->ibdev, *buf_slot->dma_addr, buf_size, - data_direction); - buf_slot->dma_addr[SMC_SINGLE_LINK] = 0; -} - /* Map a new TX or RX buffer SG-table to DMA */ int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index b57d29f29042..72acb19ffc67 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -51,12 +51,6 @@ int smc_ib_register_client(void) __init; void smc_ib_unregister_client(void); bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport); int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport); -int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction); -void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize, - struct smc_buf_desc *buf_slot, - enum dma_data_direction data_direction); int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, enum dma_data_direction data_direction); diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index 21ec1832ab51..f4d58e2dd559 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -277,6 +277,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) struct smc_link_group *lgr = conn->lgr; int to_send, rmbespace; struct smc_link *link; + dma_addr_t dma_addr; int num_sges; int rc; @@ -334,12 +335,11 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) src_len = conn->sndbuf_size - sent.count; } src_len_sum = src_len; + dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); for (dstchunk = 0; dstchunk < 2; dstchunk++) { num_sges = 0; for (srcchunk = 0; srcchunk < 2; srcchunk++) { - sges[srcchunk].addr = - conn->sndbuf_desc->dma_addr[SMC_SINGLE_LINK] + - src_off; + sges[srcchunk].addr = dma_addr + src_off; sges[srcchunk].length = src_len; sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; num_sges++; -- cgit v1.2.3-55-g7522 From 3e034725c0d814370a75b249c0ba794262cacd0f Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:20 +0200 Subject: net/smc: common functions for RMBs and send buffers Creation and deletion of SMC receive and send buffers shares a high amount of common code . This patch introduces common functions to get rid of duplicate code. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 16 +-- net/smc/smc_core.c | 348 ++++++++++++++++++++++------------------------------- net/smc/smc_core.h | 3 +- 3 files changed, 148 insertions(+), 219 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index e0a95d50bf87..75518879b68a 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -436,12 +436,8 @@ static int smc_connect_rdma(struct smc_sock *smc) smc_conn_save_peer_info(smc, &aclc); - rc = smc_sndbuf_create(smc); - if (rc) { - reason_code = SMC_CLC_DECL_MEM; - goto decline_rdma_unlock; - } - rc = smc_rmb_create(smc); + /* create send buffer and rmb */ + rc = smc_buf_create(smc); if (rc) { reason_code = SMC_CLC_DECL_MEM; goto decline_rdma_unlock; @@ -813,12 +809,8 @@ static void smc_listen_work(struct work_struct *work) } link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK]; - rc = smc_sndbuf_create(new_smc); - if (rc) { - reason_code = SMC_CLC_DECL_MEM; - goto decline_rdma; - } - rc = smc_rmb_create(new_smc); + /* create send buffer and rmb */ + rc = smc_buf_create(new_smc); if (rc) { reason_code = SMC_CLC_DECL_MEM; goto decline_rdma; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 8795c7ed9ce4..ab8cdac5edb3 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -206,16 +206,12 @@ out: return rc; } -static void smc_sndbuf_unuse(struct smc_connection *conn) +static void smc_buf_unuse(struct smc_connection *conn) { if (conn->sndbuf_desc) { conn->sndbuf_desc->used = 0; conn->sndbuf_size = 0; } -} - -static void smc_rmb_unuse(struct smc_connection *conn) -{ if (conn->rmb_desc) { conn->rmb_desc->reused = true; conn->rmb_desc->used = 0; @@ -232,8 +228,7 @@ void smc_conn_free(struct smc_connection *conn) return; smc_cdc_tx_dismiss_slots(conn); smc_lgr_unregister_conn(conn); - smc_rmb_unuse(conn); - smc_sndbuf_unuse(conn); + smc_buf_unuse(conn); } static void smc_link_clear(struct smc_link *lnk) @@ -246,51 +241,57 @@ static void smc_link_clear(struct smc_link *lnk) smc_wr_free_link_mem(lnk); } -static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) +static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk, + bool is_rmb) { - struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; - struct smc_buf_desc *sndbuf_desc, *bf_desc; - int i; - - for (i = 0; i < SMC_RMBE_SIZES; i++) { - list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i], - list) { - list_del(&sndbuf_desc->list); - smc_ib_buf_unmap_sg(lnk->smcibdev, sndbuf_desc, - DMA_TO_DEVICE); - sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]); - free_pages((unsigned long)sndbuf_desc->cpu_addr, - sndbuf_desc->order); - kfree(sndbuf_desc); - } + if (is_rmb) { + if (buf_desc->mr_rx[SMC_SINGLE_LINK]) + smc_ib_put_memory_region( + buf_desc->mr_rx[SMC_SINGLE_LINK]); + smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc, + DMA_FROM_DEVICE); + } else { + smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc, + DMA_TO_DEVICE); } + sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]); + if (buf_desc->cpu_addr) + free_pages((unsigned long)buf_desc->cpu_addr, buf_desc->order); + kfree(buf_desc); } -static void smc_lgr_free_rmbs(struct smc_link_group *lgr) +static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb) { struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; - struct smc_buf_desc *rmb_desc, *bf_desc; + struct smc_buf_desc *buf_desc, *bf_desc; + struct list_head *buf_list; int i; for (i = 0; i < SMC_RMBE_SIZES; i++) { - list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i], + if (is_rmb) + buf_list = &lgr->rmbs[i]; + else + buf_list = &lgr->sndbufs[i]; + list_for_each_entry_safe(buf_desc, bf_desc, buf_list, list) { - list_del(&rmb_desc->list); - smc_ib_put_memory_region( - rmb_desc->mr_rx[SMC_SINGLE_LINK]); - smc_ib_buf_unmap_sg(lnk->smcibdev, rmb_desc, - DMA_FROM_DEVICE); - kfree(rmb_desc->cpu_addr); - kfree(rmb_desc); + list_del(&buf_desc->list); + smc_buf_free(buf_desc, lnk, is_rmb); } } } +static void smc_lgr_free_bufs(struct smc_link_group *lgr) +{ + /* free send buffers */ + __smc_lgr_free_bufs(lgr, false); + /* free rmbs */ + __smc_lgr_free_bufs(lgr, true); +} + /* remove a link group */ void smc_lgr_free(struct smc_link_group *lgr) { - smc_lgr_free_rmbs(lgr); - smc_lgr_free_sndbufs(lgr); + smc_lgr_free_bufs(lgr); smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); kfree(lgr); } @@ -455,45 +456,25 @@ out: return rc ? rc : local_contact; } -/* try to reuse a sndbuf description slot of the sndbufs list for a certain - * buf_size; if not available, return NULL +/* try to reuse a sndbuf or rmb description slot for a certain + * buffer size; if not available, return NULL */ static inline -struct smc_buf_desc *smc_sndbuf_get_slot(struct smc_link_group *lgr, - int compressed_bufsize) +struct smc_buf_desc *smc_buf_get_slot(struct smc_link_group *lgr, + int compressed_bufsize, + rwlock_t *lock, + struct list_head *buf_list) { - struct smc_buf_desc *sndbuf_slot; - - read_lock_bh(&lgr->sndbufs_lock); - list_for_each_entry(sndbuf_slot, &lgr->sndbufs[compressed_bufsize], - list) { - if (cmpxchg(&sndbuf_slot->used, 0, 1) == 0) { - read_unlock_bh(&lgr->sndbufs_lock); - return sndbuf_slot; - } - } - read_unlock_bh(&lgr->sndbufs_lock); - return NULL; -} + struct smc_buf_desc *buf_slot; -/* try to reuse an rmb description slot of the rmbs list for a certain - * rmbe_size; if not available, return NULL - */ -static inline -struct smc_buf_desc *smc_rmb_get_slot(struct smc_link_group *lgr, - int compressed_bufsize) -{ - struct smc_buf_desc *rmb_slot; - - read_lock_bh(&lgr->rmbs_lock); - list_for_each_entry(rmb_slot, &lgr->rmbs[compressed_bufsize], - list) { - if (cmpxchg(&rmb_slot->used, 0, 1) == 0) { - read_unlock_bh(&lgr->rmbs_lock); - return rmb_slot; + read_lock_bh(lock); + list_for_each_entry(buf_slot, buf_list, list) { + if (cmpxchg(&buf_slot->used, 0, 1) == 0) { + read_unlock_bh(lock); + return buf_slot; } } - read_unlock_bh(&lgr->rmbs_lock); + read_unlock_bh(lock); return NULL; } @@ -506,185 +487,142 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size) return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } -/* create the tx buffer for an SMC socket */ -int smc_sndbuf_create(struct smc_sock *smc) +static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; - struct smc_buf_desc *sndbuf_desc; + struct smc_buf_desc *buf_desc = NULL; + struct list_head *buf_list; int bufsize, bufsize_short; + struct smc_link *lnk; + int sk_buf_size; + rwlock_t *lock; int rc; - /* use socket send buffer size (w/o overhead) as start value */ + lnk = &lgr->lnk[SMC_SINGLE_LINK]; + if (is_rmb) + /* use socket recv buffer size (w/o overhead) as start value */ + sk_buf_size = smc->sk.sk_rcvbuf / 2; + else + /* use socket send buffer size (w/o overhead) as start value */ + sk_buf_size = smc->sk.sk_sndbuf / 2; + for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); bufsize_short >= 0; bufsize_short--) { - bufsize = smc_uncompress_bufsize(bufsize_short); - if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) - continue; - - /* check for reusable sndbuf_slot in the link group */ - sndbuf_desc = smc_sndbuf_get_slot(lgr, bufsize_short); - if (sndbuf_desc) { - memset(sndbuf_desc->cpu_addr, 0, bufsize); - break; /* found reusable slot */ - } - /* try to alloc a new send buffer */ - sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL); - if (!sndbuf_desc) - break; /* give up with -ENOMEM */ - sndbuf_desc->cpu_addr = - (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | - __GFP_NOMEMALLOC | - __GFP_NORETRY | __GFP_ZERO, - get_order(bufsize)); - if (!sndbuf_desc->cpu_addr) { - kfree(sndbuf_desc); - sndbuf_desc = NULL; - /* if send buffer allocation has failed, - * try a smaller one - */ - continue; - } - sndbuf_desc->order = get_order(bufsize); - - rc = sg_alloc_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK], 1, - GFP_KERNEL); - if (rc) { - free_pages((unsigned long)sndbuf_desc->cpu_addr, - sndbuf_desc->order); - kfree(sndbuf_desc); - sndbuf_desc = NULL; - continue; + if (is_rmb) { + lock = &lgr->rmbs_lock; + buf_list = &lgr->rmbs[bufsize_short]; + } else { + lock = &lgr->sndbufs_lock; + buf_list = &lgr->sndbufs[bufsize_short]; } - sg_set_buf(sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl, - sndbuf_desc->cpu_addr, bufsize); - - rc = smc_ib_buf_map_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - sndbuf_desc, DMA_TO_DEVICE); - if (rc != 1) { - sg_free_table(&sndbuf_desc->sgt[SMC_SINGLE_LINK]); - free_pages((unsigned long)sndbuf_desc->cpu_addr, - sndbuf_desc->order); - kfree(sndbuf_desc); - sndbuf_desc = NULL; - continue; /* if mapping failed, try smaller one */ - } - - sndbuf_desc->used = 1; - write_lock_bh(&lgr->sndbufs_lock); - list_add(&sndbuf_desc->list, &lgr->sndbufs[bufsize_short]); - write_unlock_bh(&lgr->sndbufs_lock); - break; - } - if (sndbuf_desc && sndbuf_desc->cpu_addr) { - conn->sndbuf_desc = sndbuf_desc; - conn->sndbuf_size = bufsize; - smc->sk.sk_sndbuf = bufsize * 2; - atomic_set(&conn->sndbuf_space, bufsize); - return 0; - } else { - return -ENOMEM; - } -} - -/* create the RMB for an SMC socket (even though the SMC protocol - * allows more than one RMB-element per RMB, the Linux implementation - * uses just one RMB-element per RMB, i.e. uses an extra RMB for every - * connection in a link group - */ -int smc_rmb_create(struct smc_sock *smc) -{ - struct smc_connection *conn = &smc->conn; - struct smc_link_group *lgr = conn->lgr; - struct smc_buf_desc *rmb_desc; - int bufsize, bufsize_short; - int rc; - - /* use socket recv buffer size (w/o overhead) as start value */ - for (bufsize_short = smc_compress_bufsize(smc->sk.sk_rcvbuf / 2); - bufsize_short >= 0; bufsize_short--) { bufsize = smc_uncompress_bufsize(bufsize_short); if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC) continue; - /* check for reusable rmb_slot in the link group */ - rmb_desc = smc_rmb_get_slot(lgr, bufsize_short); - if (rmb_desc) { - memset(rmb_desc->cpu_addr, 0, bufsize); + /* check for reusable slot in the link group */ + buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list); + if (buf_desc) { + memset(buf_desc->cpu_addr, 0, bufsize); break; /* found reusable slot */ } - /* try to alloc a new RMB */ - rmb_desc = kzalloc(sizeof(*rmb_desc), GFP_KERNEL); - if (!rmb_desc) + /* try to allocate the determined number of pages */ + buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); + if (!buf_desc) break; /* give up with -ENOMEM */ - rmb_desc->cpu_addr = + + buf_desc->cpu_addr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_ZERO, get_order(bufsize)); - if (!rmb_desc->cpu_addr) { - kfree(rmb_desc); - rmb_desc = NULL; + if (!buf_desc->cpu_addr) { + kfree(buf_desc); + buf_desc = NULL; continue; } - rmb_desc->order = get_order(bufsize); - rc = sg_alloc_table(&rmb_desc->sgt[SMC_SINGLE_LINK], 1, + rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1, GFP_KERNEL); if (rc) { - free_pages((unsigned long)rmb_desc->cpu_addr, - rmb_desc->order); - kfree(rmb_desc); - rmb_desc = NULL; + smc_buf_free(buf_desc, lnk, is_rmb); + buf_desc = NULL; continue; } - sg_set_buf(rmb_desc->sgt[SMC_SINGLE_LINK].sgl, - rmb_desc->cpu_addr, bufsize); + sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl, + buf_desc->cpu_addr, bufsize); - rc = smc_ib_buf_map_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - rmb_desc, DMA_FROM_DEVICE); + /* map sg table to DMA address */ + rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, is_rmb ? + DMA_FROM_DEVICE : DMA_TO_DEVICE); + /* SMC protocol depends on mapping to one DMA address only */ if (rc != 1) { - sg_free_table(&rmb_desc->sgt[SMC_SINGLE_LINK]); - free_pages((unsigned long)rmb_desc->cpu_addr, - rmb_desc->order); - kfree(rmb_desc); - rmb_desc = NULL; - continue; /* if mapping failed, try smaller one */ + smc_buf_free(buf_desc, lnk, is_rmb); + buf_desc = NULL; + continue; /* if mapping failed, try smaller one */ } - rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_LOCAL_WRITE, - rmb_desc); - if (rc) { - smc_ib_buf_unmap_sg(lgr->lnk[SMC_SINGLE_LINK].smcibdev, - rmb_desc, DMA_FROM_DEVICE); - sg_free_table(&rmb_desc->sgt[SMC_SINGLE_LINK]); - free_pages((unsigned long)rmb_desc->cpu_addr, - rmb_desc->order); - kfree(rmb_desc); - rmb_desc = NULL; - continue; + + /* create a new memory region for the RMB */ + if (is_rmb) { + rc = smc_ib_get_memory_region(lnk->roce_pd, + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_LOCAL_WRITE, + buf_desc); + if (rc) { + smc_buf_free(buf_desc, lnk, is_rmb); + buf_desc = NULL; + continue; + } } - rmb_desc->used = 1; - write_lock_bh(&lgr->rmbs_lock); - list_add(&rmb_desc->list, &lgr->rmbs[bufsize_short]); - write_unlock_bh(&lgr->rmbs_lock); - break; + buf_desc->used = 1; + write_lock_bh(lock); + list_add(&buf_desc->list, buf_list); + write_unlock_bh(lock); + break; /* found */ } - if (rmb_desc && rmb_desc->cpu_addr) { - conn->rmb_desc = rmb_desc; + + if (!buf_desc || !buf_desc->cpu_addr) + return -ENOMEM; + + if (is_rmb) { + conn->rmb_desc = buf_desc; conn->rmbe_size = bufsize; conn->rmbe_size_short = bufsize_short; smc->sk.sk_rcvbuf = bufsize * 2; atomic_set(&conn->bytes_to_rcv, 0); conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize); - return 0; } else { - return -ENOMEM; + conn->sndbuf_desc = buf_desc; + conn->sndbuf_size = bufsize; + smc->sk.sk_sndbuf = bufsize * 2; + atomic_set(&conn->sndbuf_space, bufsize); } + return 0; +} + +/* create the send and receive buffer for an SMC socket; + * receive buffers are called RMBs; + * (even though the SMC protocol allows more than one RMB-element per RMB, + * the Linux implementation uses just one RMB-element per RMB, i.e. uses an + * extra RMB for every connection in a link group + */ +int smc_buf_create(struct smc_sock *smc) +{ + int rc; + + /* create send buffer */ + rc = __smc_buf_create(smc, false); + if (rc) + return rc; + /* create rmb */ + rc = __smc_buf_create(smc, true); + if (rc) + smc_buf_free(smc->conn.sndbuf_desc, + &smc->conn.lgr->lnk[SMC_SINGLE_LINK], false); + return rc; } static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr) diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 72c25cb3eb89..1d713e8e067c 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -186,8 +186,7 @@ struct smc_clc_msg_accept_confirm; void smc_lgr_free(struct smc_link_group *lgr); void smc_lgr_terminate(struct smc_link_group *lgr); -int smc_sndbuf_create(struct smc_sock *smc); -int smc_rmb_create(struct smc_sock *smc); +int smc_buf_create(struct smc_sock *smc); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_clc_msg_accept_confirm *clc); #endif -- cgit v1.2.3-55-g7522 From b33982c3a6838d13024ae5a32e1f21897767b04b Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:21 +0200 Subject: net/smc: cleanup function __smc_buf_create() Split function __smc_buf_create() for better readability. Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/smc_core.c | 114 +++++++++++++++++++++++++++++------------------------ 1 file changed, 63 insertions(+), 51 deletions(-) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index ab8cdac5edb3..447bd52da0e2 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -487,6 +487,64 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size) return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); } +static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr, + bool is_rmb, int bufsize) +{ + struct smc_buf_desc *buf_desc; + struct smc_link *lnk; + int rc; + + /* try to alloc a new buffer */ + buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); + if (!buf_desc) + return ERR_PTR(-ENOMEM); + + buf_desc->cpu_addr = + (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | + __GFP_NOMEMALLOC | + __GFP_NORETRY | __GFP_ZERO, + get_order(bufsize)); + if (!buf_desc->cpu_addr) { + kfree(buf_desc); + return ERR_PTR(-EAGAIN); + } + buf_desc->order = get_order(bufsize); + + /* build the sg table from the pages */ + lnk = &lgr->lnk[SMC_SINGLE_LINK]; + rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1, + GFP_KERNEL); + if (rc) { + smc_buf_free(buf_desc, lnk, is_rmb); + return ERR_PTR(rc); + } + sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl, + buf_desc->cpu_addr, bufsize); + + /* map sg table to DMA address */ + rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, + is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE); + /* SMC protocol depends on mapping to one DMA address only */ + if (rc != 1) { + smc_buf_free(buf_desc, lnk, is_rmb); + return ERR_PTR(-EAGAIN); + } + + /* create a new memory region for the RMB */ + if (is_rmb) { + rc = smc_ib_get_memory_region(lnk->roce_pd, + IB_ACCESS_REMOTE_WRITE | + IB_ACCESS_LOCAL_WRITE, + buf_desc); + if (rc) { + smc_buf_free(buf_desc, lnk, is_rmb); + return ERR_PTR(rc); + } + } + + return buf_desc; +} + static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) { struct smc_connection *conn = &smc->conn; @@ -494,12 +552,9 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) struct smc_buf_desc *buf_desc = NULL; struct list_head *buf_list; int bufsize, bufsize_short; - struct smc_link *lnk; int sk_buf_size; rwlock_t *lock; - int rc; - lnk = &lgr->lnk[SMC_SINGLE_LINK]; if (is_rmb) /* use socket recv buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_rcvbuf / 2; @@ -528,54 +583,11 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) break; /* found reusable slot */ } - /* try to allocate the determined number of pages */ - buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); - if (!buf_desc) - break; /* give up with -ENOMEM */ - - buf_desc->cpu_addr = - (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | - __GFP_NOMEMALLOC | - __GFP_NORETRY | __GFP_ZERO, - get_order(bufsize)); - if (!buf_desc->cpu_addr) { - kfree(buf_desc); - buf_desc = NULL; - continue; - } - - rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1, - GFP_KERNEL); - if (rc) { - smc_buf_free(buf_desc, lnk, is_rmb); - buf_desc = NULL; + buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize); + if (PTR_ERR(buf_desc) == -ENOMEM) + break; + if (IS_ERR(buf_desc)) continue; - } - sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl, - buf_desc->cpu_addr, bufsize); - - /* map sg table to DMA address */ - rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, is_rmb ? - DMA_FROM_DEVICE : DMA_TO_DEVICE); - /* SMC protocol depends on mapping to one DMA address only */ - if (rc != 1) { - smc_buf_free(buf_desc, lnk, is_rmb); - buf_desc = NULL; - continue; /* if mapping failed, try smaller one */ - } - - /* create a new memory region for the RMB */ - if (is_rmb) { - rc = smc_ib_get_memory_region(lnk->roce_pd, - IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_LOCAL_WRITE, - buf_desc); - if (rc) { - smc_buf_free(buf_desc, lnk, is_rmb); - buf_desc = NULL; - continue; - } - } buf_desc->used = 1; write_lock_bh(lock); @@ -584,7 +596,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) break; /* found */ } - if (!buf_desc || !buf_desc->cpu_addr) + if (IS_ERR(buf_desc)) return -ENOMEM; if (is_rmb) { -- cgit v1.2.3-55-g7522 From 10428dd8354cc1c74ee806df45c2227c1f9d7b0c Mon Sep 17 00:00:00 2001 From: Ursula Braun Date: Fri, 28 Jul 2017 13:56:22 +0200 Subject: net/smc: synchronize buffer usage with device Usage of send buffer "sndbuf" is synced (a) before filling sndbuf for cpu access (b) after filling sndbuf for device access Usage of receive buffer "RMB" is synced (a) before reading RMB content for cpu access (b) after reading RMB content for device access Signed-off-by: Ursula Braun Signed-off-by: David S. Miller --- net/smc/af_smc.c | 2 ++ net/smc/smc_core.c | 32 ++++++++++++++++++++++++++++++++ net/smc/smc_core.h | 4 ++++ net/smc/smc_ib.c | 41 +++++++++++++++++++++++++++++++++++++++++ net/smc/smc_ib.h | 6 ++++++ net/smc/smc_rx.c | 3 +++ net/smc/smc_tx.c | 3 +++ 7 files changed, 91 insertions(+) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 75518879b68a..8c6d24b2995d 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -474,6 +474,7 @@ static int smc_connect_rdma(struct smc_sock *smc) } } } + smc_rmb_sync_sg_for_device(&smc->conn); rc = smc_clc_send_confirm(smc); if (rc) @@ -832,6 +833,7 @@ static void smc_listen_work(struct work_struct *work) } } } + smc_rmb_sync_sg_for_device(&new_smc->conn); rc = smc_clc_send_accept(new_smc, local_contact); if (rc) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 447bd52da0e2..1a16d51e2330 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -615,6 +615,38 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) return 0; } +void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->sndbuf_desc, DMA_TO_DEVICE); +} + +void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->sndbuf_desc, DMA_TO_DEVICE); +} + +void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->rmb_desc, DMA_FROM_DEVICE); +} + +void smc_rmb_sync_sg_for_device(struct smc_connection *conn) +{ + struct smc_link_group *lgr = conn->lgr; + + smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev, + conn->rmb_desc, DMA_FROM_DEVICE); +} + /* create the send and receive buffer for an SMC socket; * receive buffers are called RMBs; * (even though the SMC protocol allows more than one RMB-element per RMB, diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index 1d713e8e067c..19c44bf4e391 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h @@ -189,4 +189,8 @@ void smc_lgr_terminate(struct smc_link_group *lgr); int smc_buf_create(struct smc_sock *smc); int smc_rmb_rtoken_handling(struct smc_connection *conn, struct smc_clc_msg_accept_confirm *clc); +void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn); +void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn); +void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn); +void smc_rmb_sync_sg_for_device(struct smc_connection *conn); #endif diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index 021f061609f5..547e0e113b17 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c @@ -13,6 +13,7 @@ #include #include +#include #include #include "smc_pnet.h" @@ -295,6 +296,46 @@ int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, return 0; } +/* synchronize buffer usage for cpu access */ +void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + struct scatterlist *sg; + unsigned int i; + + /* for now there is just one DMA address */ + for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, + buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { + if (!sg_dma_len(sg)) + break; + ib_dma_sync_single_for_cpu(smcibdev->ibdev, + sg_dma_address(sg), + sg_dma_len(sg), + data_direction); + } +} + +/* synchronize buffer usage for device access */ +void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction) +{ + struct scatterlist *sg; + unsigned int i; + + /* for now there is just one DMA address */ + for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, + buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { + if (!sg_dma_len(sg)) + break; + ib_dma_sync_single_for_device(smcibdev->ibdev, + sg_dma_address(sg), + sg_dma_len(sg), + data_direction); + } +} + /* Map a new TX or RX buffer SG-table to DMA */ int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, struct smc_buf_desc *buf_slot, diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h index 72acb19ffc67..9b927a33d5e6 100644 --- a/net/smc/smc_ib.h +++ b/net/smc/smc_ib.h @@ -68,4 +68,10 @@ long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev); int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, struct smc_buf_desc *buf_slot); void smc_ib_put_memory_region(struct ib_mr *mr); +void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); +void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev, + struct smc_buf_desc *buf_slot, + enum dma_data_direction data_direction); #endif diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index f0c8b089f770..b17a333e9bb0 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -170,6 +170,7 @@ copy: copylen, conn->rmbe_size - cons.count); chunk_len_sum = chunk_len; chunk_off = cons.count; + smc_rmb_sync_sg_for_cpu(conn); for (chunk = 0; chunk < 2; chunk++) { if (!(flags & MSG_TRUNC)) { rc = memcpy_to_msg(msg, rcvbuf_base + chunk_off, @@ -177,6 +178,7 @@ copy: if (rc) { if (!read_done) read_done = -EFAULT; + smc_rmb_sync_sg_for_device(conn); goto out; } } @@ -190,6 +192,7 @@ copy: chunk_len_sum += chunk_len; chunk_off = 0; /* modulo offset in recv ring buffer */ } + smc_rmb_sync_sg_for_device(conn); /* update cursors */ if (!(flags & MSG_PEEK)) { diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index f4d58e2dd559..3c656beb8820 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c @@ -174,10 +174,12 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) copylen, conn->sndbuf_size - tx_cnt_prep); chunk_len_sum = chunk_len; chunk_off = tx_cnt_prep; + smc_sndbuf_sync_sg_for_cpu(conn); for (chunk = 0; chunk < 2; chunk++) { rc = memcpy_from_msg(sndbuf_base + chunk_off, msg, chunk_len); if (rc) { + smc_sndbuf_sync_sg_for_device(conn); if (send_done) return send_done; goto out_err; @@ -192,6 +194,7 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) chunk_len_sum += chunk_len; chunk_off = 0; /* modulo offset in send ring buffer */ } + smc_sndbuf_sync_sg_for_device(conn); /* update cursors */ smc_curs_add(conn->sndbuf_size, &prep, copylen); smc_curs_write(&conn->tx_curs_prep, -- cgit v1.2.3-55-g7522 From 68f70d837d7e07ba118eb0ea4a44ea276ce53f60 Mon Sep 17 00:00:00 2001 From: SZ Lin Date: Sat, 29 Jul 2017 18:42:34 +0800 Subject: net: moxa: Remove braces from single-line body Remove unnecessary braces from single-line if statement This warning is found using checkpatch.pl Signed-off-by: SZ Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index c0d7d5eec7e7..105215862949 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -269,9 +269,8 @@ rx_next: priv->rx_head = rx_head; } - if (rx < budget) { + if (rx < budget) napi_complete_done(napi, rx); - } priv->reg_imr |= RPKT_FINISH_M; writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK); -- cgit v1.2.3-55-g7522 From d458f4c5fd55ae921ac6f3bf2c169da8d2379b07 Mon Sep 17 00:00:00 2001 From: SZ Lin Date: Sat, 29 Jul 2017 18:42:35 +0800 Subject: net: moxa: Prefer 'unsigned int' to bare use of 'unsigned' Use 'unsigned int' instead of 'unsigned' This warning is found using checkpatch.pl Signed-off-by: SZ Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 105215862949..9997e72103d5 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -288,8 +288,8 @@ static int moxart_tx_queue_space(struct net_device *ndev) static void moxart_tx_finished(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - unsigned tx_head = priv->tx_head; - unsigned tx_tail = priv->tx_tail; + unsigned int tx_head = priv->tx_head; + unsigned int tx_tail = priv->tx_tail; while (tx_tail != tx_head) { dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail], -- cgit v1.2.3-55-g7522 From e8048b84bbfd3e7010beb41c251ff5dc27dab955 Mon Sep 17 00:00:00 2001 From: SZ Lin Date: Sat, 29 Jul 2017 18:42:36 +0800 Subject: net: moxa: Fix comparison to NULL could be written with ! Fixed coding style for null comparisons in moxart_ether driver to be more consistent with the rest of the kernel coding style Signed-off-by: SZ Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 9997e72103d5..1d6384873393 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -494,7 +494,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM, &priv->tx_base, GFP_DMA | GFP_KERNEL); - if (priv->tx_desc_base == NULL) { + if (!priv->tx_desc_base) { ret = -ENOMEM; goto init_fail; } @@ -502,7 +502,7 @@ static int moxart_mac_probe(struct platform_device *pdev) priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM, &priv->rx_base, GFP_DMA | GFP_KERNEL); - if (priv->rx_desc_base == NULL) { + if (!priv->rx_desc_base) { ret = -ENOMEM; goto init_fail; } -- cgit v1.2.3-55-g7522 From c45c5d03937dfa5da38b3d2e2172199641787190 Mon Sep 17 00:00:00 2001 From: SZ Lin Date: Sat, 29 Jul 2017 18:42:37 +0800 Subject: net: moxa: Remove extra space after a cast No space is necessary after a cast This warning is found using checkpatch.pl Signed-off-by: SZ Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 1d6384873393..31e179a651ae 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -311,7 +311,7 @@ static void moxart_tx_finished(struct net_device *ndev) static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) { - struct net_device *ndev = (struct net_device *) dev_id; + struct net_device *ndev = (struct net_device *)dev_id; struct moxart_mac_priv_t *priv = netdev_priv(ndev); unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS); -- cgit v1.2.3-55-g7522 From 2fcc440267d52a4d2745d486b4d5001669fe6ac8 Mon Sep 17 00:00:00 2001 From: SZ Lin Date: Sat, 29 Jul 2017 18:42:38 +0800 Subject: net: moxa: Fix for typo in comment to function moxart_mac_setup_desc_ring() Signed-off-by: SZ Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 31e179a651ae..2e4effa9fe45 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -161,7 +161,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->rx_head = 0; - /* reset the MAC controller TX/RX desciptor base address */ + /* reset the MAC controller TX/RX descriptor base address */ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS); writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS); } -- cgit v1.2.3-55-g7522 From dd4de07f20c05727818c330d4aa9bfdec25e3815 Mon Sep 17 00:00:00 2001 From: SZ Lin Date: Sat, 29 Jul 2017 18:42:39 +0800 Subject: net: moxa: Add spaces preferred around that '{+,-}' This patch fixes all checkpatch occurences of "CHECK: spaces preferred around that '{+,-}' (ctx:VxV)" in moxart_ether code. Signed-off-by: SZ Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 686b8957d5cf..bee608b547d1 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -55,17 +55,17 @@ #define RX_DESC2_ADDRESS_VIRT 4 #define TX_DESC_NUM 64 -#define TX_DESC_NUM_MASK (TX_DESC_NUM-1) +#define TX_DESC_NUM_MASK (TX_DESC_NUM - 1) #define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK)) #define TX_BUF_SIZE 1600 -#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1) +#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK + 1) #define TX_WAKE_THRESHOLD 16 #define RX_DESC_NUM 64 -#define RX_DESC_NUM_MASK (RX_DESC_NUM-1) +#define RX_DESC_NUM_MASK (RX_DESC_NUM - 1) #define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK)) #define RX_BUF_SIZE 1600 -#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1) +#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK + 1) #define REG_INTERRUPT_STATUS 0 #define REG_INTERRUPT_MASK 4 -- cgit v1.2.3-55-g7522 From 81f6bf81270ce1052b5cd4d60b9edc40cd5ceefa Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 26 Jul 2017 17:32:07 -0700 Subject: bpf: testing: fix devmap tests Apparently through one of my revisions of the initial patches series I lost the devmap test. We can add more testing later but for now lets fix the simple one we have. Fixes: 546ac1ffb70d "bpf: add devmap, a map for storing net device references" Reported-by: Jakub Kicinski Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/include/uapi/linux/bpf.h | 1 + tools/testing/selftests/bpf/test_maps.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index ce2988be4f0e..1579cab49717 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -104,6 +104,7 @@ enum bpf_map_type { BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, + BPF_MAP_TYPE_DEVMAP, }; enum bpf_prog_type { diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index 36d6ac3f0c1c..c991ab69a720 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -440,7 +440,7 @@ static void test_arraymap_percpu_many_keys(void) static void test_devmap(int task, void *data) { - int next_key, fd; + int fd; __u32 key, value; fd = bpf_create_map(BPF_MAP_TYPE_DEVMAP, sizeof(key), sizeof(value), @@ -620,6 +620,8 @@ static void run_all_tests(void) test_arraymap_percpu_many_keys(); + test_devmap(0, NULL); + test_map_large(); test_map_parallel(); test_map_stress(); -- cgit v1.2.3-55-g7522 From 17f0d42c931b2c43d84440ebc16978bc76ef1201 Mon Sep 17 00:00:00 2001 From: Sathya Perla Date: Thu, 27 Jul 2017 06:29:51 -0400 Subject: bnxt_re: add MAY_USE_DEVLINK dependency bnxt_en depends on MAY_USE_DEVLINK; this is used to force bnxt_en to be =m when DEVLINK is =m. Now, bnxt_re selects bnxt_en. Unless bnxt_re also explicitly calls out dependency on MAY_USE_DEVLINK, Kconfig does not force bnxt_re to be =m when DEVLINK is =m, causing the following error: drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.o: In function `bnxt_dl_register': bnxt_vfr.c:(.text+0x1440): undefined reference to `devlink_alloc' bnxt_vfr.c:(.text+0x14c0): undefined reference to `devlink_register' bnxt_vfr.c:(.text+0x14e0): undefined reference to `devlink_free' drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.o: In function `bnxt_dl_unregister': bnxt_vfr.c:(.text+0x1534): undefined reference to `devlink_unregister' bnxt_vfr.c:(.text+0x153c): undefined reference to `devlink_free' Fix this by adding MAY_USE_DEVLINK dependency in bnxt_re. Fixes: 4ab0c6a8ffd7 ("bnxt_en: add support to enable VF-representors") Suggested-by: Arnd Bergmann Signed-off-by: Sathya Perla Signed-off-by: David S. Miller --- drivers/infiniband/hw/bnxt_re/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/bnxt_re/Kconfig b/drivers/infiniband/hw/bnxt_re/Kconfig index 19982a4a9bba..18f5ed082f41 100644 --- a/drivers/infiniband/hw/bnxt_re/Kconfig +++ b/drivers/infiniband/hw/bnxt_re/Kconfig @@ -1,6 +1,7 @@ config INFINIBAND_BNXT_RE tristate "Broadcom Netxtreme HCA support" depends on ETHERNET && NETDEVICES && PCI && INET && DCB + depends on MAY_USE_DEVLINK select NET_VENDOR_BROADCOM select BNXT ---help--- -- cgit v1.2.3-55-g7522 From a847135a56ff5d5994fcaabd36b2a8b581913433 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Thu, 27 Jul 2017 12:32:28 -0700 Subject: liquidio: bump up driver version to match newer NIC firmware Bump up driver version to match newer NIC firmware. Also update nic_rx_stats (a struct common to host driver and firmware) by adding a new field: fw_total_fwd_bytes. Signed-off-by: Felix Manlunas Signed-off-by: Raghu Vatsavayi Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 53aaf417e722..3b9e3646b971 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -27,8 +27,8 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 -#define LIQUIDIO_BASE_MINOR_VERSION 5 -#define LIQUIDIO_BASE_MICRO_VERSION 1 +#define LIQUIDIO_BASE_MINOR_VERSION 6 +#define LIQUIDIO_BASE_MICRO_VERSION 0 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) #define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION) @@ -768,6 +768,7 @@ struct nic_rx_stats { /* firmware stats */ u64 fw_total_rcvd; u64 fw_total_fwd; + u64 fw_total_fwd_bytes; u64 fw_err_pko; u64 fw_err_link; u64 fw_err_drop; -- cgit v1.2.3-55-g7522 From d6aac1f218873f2266de23280bffb909b4a98fbf Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 28 Jul 2017 08:59:41 -0700 Subject: netvsc: fix return value for set_channels The error and normal case got swapped. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 262486ce8e2a..f1eaf675d2e9 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -758,8 +758,8 @@ static int netvsc_set_channels(struct net_device *net, if (!IS_ERR(nvdev)) { netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); - ret = PTR_ERR(nvdev); } else { + ret = PTR_ERR(nvdev); device_info.num_chn = orig; rndis_filter_device_add(dev, &device_info); } -- cgit v1.2.3-55-g7522 From 867047c4512aa65fb4cf66b253b51b830c7fa172 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 28 Jul 2017 08:59:42 -0700 Subject: netvsc: fix warnings reported by lockdep This includes a bunch of fixups for issues reported by lockdep. * ethtool routines can assume RTNL * send is done with RCU lock (and BH disable) * avoid refetching internal device struct (netvsc) instead pass it as a parameter. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 3 +- drivers/net/hyperv/netvsc.c | 2 +- drivers/net/hyperv/netvsc_drv.c | 15 ++++--- drivers/net/hyperv/rndis_filter.c | 84 +++++++++++++++++++-------------------- 4 files changed, 54 insertions(+), 50 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 4e7ff348327e..fb62ea632914 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -217,7 +217,8 @@ int rndis_filter_receive(struct net_device *ndev, struct vmbus_channel *channel, void *data, u32 buflen); -int rndis_filter_set_device_mac(struct net_device *ndev, char *mac); +int rndis_filter_set_device_mac(struct netvsc_device *ndev, + const char *mac); void netvsc_switch_datapath(struct net_device *nv_dev, bool vf); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 06f39a99da7c..94c00acac58a 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -832,7 +832,7 @@ int netvsc_send(struct net_device_context *ndev_ctx, struct sk_buff *skb) { struct netvsc_device *net_device - = rcu_dereference_rtnl(ndev_ctx->nvdev); + = rcu_dereference_bh(ndev_ctx->nvdev); struct hv_device *device = ndev_ctx->device_ctx; int ret = 0; struct netvsc_channel *nvchan; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index f1eaf675d2e9..a04f2efbbc25 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -923,6 +923,8 @@ static void netvsc_get_stats64(struct net_device *net, static int netvsc_set_mac_addr(struct net_device *ndev, void *p) { + struct net_device_context *ndc = netdev_priv(ndev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); struct sockaddr *addr = p; char save_adr[ETH_ALEN]; unsigned char save_aatype; @@ -935,7 +937,10 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) if (err != 0) return err; - err = rndis_filter_set_device_mac(ndev, addr->sa_data); + if (!nvdev) + return -ENODEV; + + err = rndis_filter_set_device_mac(nvdev, addr->sa_data); if (err != 0) { /* roll back to saved MAC */ memcpy(ndev->dev_addr, save_adr, ETH_ALEN); @@ -981,7 +986,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; unsigned int start; @@ -1019,7 +1024,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); u8 *p = data; int i; @@ -1077,7 +1082,7 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *nvdev = rcu_dereference(ndc->nvdev); + struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); if (!nvdev) return -ENODEV; @@ -1127,7 +1132,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc) { struct net_device_context *ndc = netdev_priv(dev); - struct netvsc_device *ndev = rcu_dereference(ndc->nvdev); + struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev); struct rndis_device *rndis_dev; int i; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index eaa3f0d5682a..bf21ea92c743 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -85,14 +85,6 @@ static struct rndis_device *get_rndis_device(void) return device; } -static struct netvsc_device * -net_device_to_netvsc_device(struct net_device *ndev) -{ - struct net_device_context *net_device_ctx = netdev_priv(ndev); - - return rtnl_dereference(net_device_ctx->nvdev); -} - static struct rndis_request *get_rndis_request(struct rndis_device *dev, u32 msg_type, u32 msg_len) @@ -252,7 +244,10 @@ static int rndis_filter_send_request(struct rndis_device *dev, pb[0].len; } + rcu_read_lock_bh(); ret = netvsc_send(net_device_ctx, packet, NULL, &pb, NULL); + rcu_read_unlock_bh(); + return ret; } @@ -452,8 +447,9 @@ int rndis_filter_receive(struct net_device *ndev, return 0; } -static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, - void *result, u32 *result_size) +static int rndis_filter_query_device(struct rndis_device *dev, + struct netvsc_device *nvdev, + u32 oid, void *result, u32 *result_size) { struct rndis_request *request; u32 inresult_size = *result_size; @@ -480,8 +476,6 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid, query->dev_vc_handle = 0; if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) { - struct net_device_context *ndevctx = netdev_priv(dev->ndev); - struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct ndis_offload *hwcaps; u32 nvsp_version = nvdev->nvsp_version; u8 ndis_rev; @@ -550,14 +544,15 @@ cleanup: /* Get the hardware offload capabilities */ static int -rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) +rndis_query_hwcaps(struct rndis_device *dev, struct netvsc_device *net_device, + struct ndis_offload *caps) { u32 caps_len = sizeof(*caps); int ret; memset(caps, 0, sizeof(*caps)); - ret = rndis_filter_query_device(dev, + ret = rndis_filter_query_device(dev, net_device, OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES, caps, &caps_len); if (ret) @@ -586,11 +581,12 @@ rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps) return 0; } -static int rndis_filter_query_device_mac(struct rndis_device *dev) +static int rndis_filter_query_device_mac(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = ETH_ALEN; - return rndis_filter_query_device(dev, + return rndis_filter_query_device(dev, net_device, RNDIS_OID_802_3_PERMANENT_ADDRESS, dev->hw_mac_adr, &size); } @@ -598,9 +594,9 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev) #define NWADR_STR "NetworkAddress" #define NWADR_STRLEN 14 -int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) +int rndis_filter_set_device_mac(struct netvsc_device *nvdev, + const char *mac) { - struct netvsc_device *nvdev = net_device_to_netvsc_device(ndev); struct rndis_device *rdev = nvdev->extension; struct rndis_request *request; struct rndis_set_request *set; @@ -654,11 +650,8 @@ int rndis_filter_set_device_mac(struct net_device *ndev, char *mac) wait_for_completion(&request->wait_event); set_complete = &request->response_msg.msg.set_complete; - if (set_complete->status != RNDIS_STATUS_SUCCESS) { - netdev_err(ndev, "Fail to set MAC on host side:0x%x\n", - set_complete->status); - ret = -EINVAL; - } + if (set_complete->status != RNDIS_STATUS_SUCCESS) + ret = -EIO; cleanup: put_rndis_request(rdev, request); @@ -791,27 +784,27 @@ cleanup: return ret; } -static int rndis_filter_query_device_link_status(struct rndis_device *dev) +static int rndis_filter_query_device_link_status(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_status; - int ret; - ret = rndis_filter_query_device(dev, - RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, - &link_status, &size); - - return ret; + return rndis_filter_query_device(dev, net_device, + RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, + &link_status, &size); } -static int rndis_filter_query_link_speed(struct rndis_device *dev) +static int rndis_filter_query_link_speed(struct rndis_device *dev, + struct netvsc_device *net_device) { u32 size = sizeof(u32); u32 link_speed; struct net_device_context *ndc; int ret; - ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_LINK_SPEED, + ret = rndis_filter_query_device(dev, net_device, + RNDIS_OID_GEN_LINK_SPEED, &link_speed, &size); if (!ret) { @@ -880,14 +873,14 @@ void rndis_filter_update(struct netvsc_device *nvdev) schedule_work(&rdev->mcast_work); } -static int rndis_filter_init_device(struct rndis_device *dev) +static int rndis_filter_init_device(struct rndis_device *dev, + struct netvsc_device *nvdev) { struct rndis_request *request; struct rndis_initialize_request *init; struct rndis_initialize_complete *init_complete; u32 status; int ret; - struct netvsc_device *nvdev = net_device_to_netvsc_device(dev->ndev); request = get_rndis_request(dev, RNDIS_MSG_INIT, RNDIS_MESSAGE_SIZE(struct rndis_initialize_request)); @@ -1024,12 +1017,17 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) { struct net_device *ndev = hv_get_drvdata(new_sc->primary_channel->device_obj); - struct netvsc_device *nvscdev = net_device_to_netvsc_device(ndev); + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct netvsc_device *nvscdev; u16 chn_index = new_sc->offermsg.offer.sub_channel_index; struct netvsc_channel *nvchan; int ret; - if (chn_index >= nvscdev->num_chn) + /* This is safe because this callback only happens when + * new device is being setup and waiting on the channel_init_wait. + */ + nvscdev = rcu_dereference_raw(ndev_ctx->nvdev); + if (!nvscdev || chn_index >= nvscdev->num_chn) return; nvchan = nvscdev->chan_table + chn_index; @@ -1104,27 +1102,27 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, rndis_device->ndev = net; /* Send the rndis initialization message */ - ret = rndis_filter_init_device(rndis_device); + ret = rndis_filter_init_device(rndis_device, net_device); if (ret != 0) goto err_dev_remv; /* Get the MTU from the host */ size = sizeof(u32); - ret = rndis_filter_query_device(rndis_device, + ret = rndis_filter_query_device(rndis_device, net_device, RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, &mtu, &size); if (ret == 0 && size == sizeof(u32) && mtu < net->mtu) net->mtu = mtu; /* Get the mac address */ - ret = rndis_filter_query_device_mac(rndis_device); + ret = rndis_filter_query_device_mac(rndis_device, net_device); if (ret != 0) goto err_dev_remv; memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN); /* Find HW offload capabilities */ - ret = rndis_query_hwcaps(rndis_device, &hwcaps); + ret = rndis_query_hwcaps(rndis_device, net_device, &hwcaps); if (ret != 0) goto err_dev_remv; @@ -1185,7 +1183,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, if (ret) goto err_dev_remv; - rndis_filter_query_device_link_status(rndis_device); + rndis_filter_query_device_link_status(rndis_device, net_device); netdev_dbg(net, "Device MAC %pM link state %s\n", rndis_device->hw_mac_adr, @@ -1194,11 +1192,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5) return net_device; - rndis_filter_query_link_speed(rndis_device); + rndis_filter_query_link_speed(rndis_device, net_device); /* vRSS setup */ memset(&rsscap, 0, rsscap_size); - ret = rndis_filter_query_device(rndis_device, + ret = rndis_filter_query_device(rndis_device, net_device, OID_GEN_RECEIVE_SCALE_CAPABILITIES, &rsscap, &rsscap_size); if (ret || rsscap.num_recv_que < 2) -- cgit v1.2.3-55-g7522 From 4a2176c63b0f95fdb66d363c7e3dacc145a4f038 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 28 Jul 2017 08:59:43 -0700 Subject: netvsc: don't print pointer value in error message Using %p to print pointer to packet meta-data doesn't give any good info, and exposes kernel memory offsets. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 94c00acac58a..f0c15e782ce0 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -805,8 +805,10 @@ static inline int netvsc_send_pkt( ret = -ENOSPC; } } else { - netdev_err(ndev, "Unable to send packet %p ret %d\n", - packet, ret); + netdev_err(ndev, + "Unable to send packet pages %u len %u, ret %d\n", + packet->page_buf_cnt, packet->total_data_buflen, + ret); } return ret; -- cgit v1.2.3-55-g7522 From 02b6de01af1d116c107d61fa3e8583be3b97ac3c Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 28 Jul 2017 08:59:44 -0700 Subject: netvsc: remove unnecessary indirection of page_buffer The internal API was passing struct hv_page_buffer ** when only simple struct hv_page_buffer * was necessary for passing an array. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 2 +- drivers/net/hyperv/netvsc.c | 21 ++++++++++----------- drivers/net/hyperv/netvsc_drv.c | 10 ++++------ drivers/net/hyperv/rndis_filter.c | 4 ++-- 4 files changed, 17 insertions(+), 20 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index fb62ea632914..9ca3ed692d73 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -190,7 +190,7 @@ void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct net_device_context *ndc, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **page_buffer, + struct hv_page_buffer *page_buffer, struct sk_buff *skb); void netvsc_linkstatus_callback(struct hv_device *device_obj, struct rndis_message *resp); diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index f0c15e782ce0..d3c0b19f6d34 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -697,7 +697,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, u32 pend_size, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { char *start = net_device->send_buf; @@ -718,9 +718,9 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device, } for (i = 0; i < page_count; i++) { - char *src = phys_to_virt((*pb)[i].pfn << PAGE_SHIFT); - u32 offset = (*pb)[i].offset; - u32 len = (*pb)[i].len; + char *src = phys_to_virt(pb[i].pfn << PAGE_SHIFT); + u32 offset = pb[i].offset; + u32 len = pb[i].len; memcpy(dest, (src + offset), len); msg_size += len; @@ -739,7 +739,7 @@ static inline int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, struct netvsc_device *net_device, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { struct nvsp_message nvmsg; @@ -750,7 +750,6 @@ static inline int netvsc_send_pkt( struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); u64 req_id; int ret; - struct hv_page_buffer *pgbuf; u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; @@ -776,11 +775,11 @@ static inline int netvsc_send_pkt( return -ENODEV; if (packet->page_buf_cnt) { - pgbuf = packet->cp_partial ? (*pb) + - packet->rmsg_pgcnt : (*pb); + if (packet->cp_partial) + pb += packet->rmsg_pgcnt; + ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, - pgbuf, - packet->page_buf_cnt, + pb, packet->page_buf_cnt, &nvmsg, sizeof(struct nvsp_message), req_id, @@ -830,7 +829,7 @@ static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, int netvsc_send(struct net_device_context *ndev_ctx, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, - struct hv_page_buffer **pb, + struct hv_page_buffer *pb, struct sk_buff *skb) { struct netvsc_device *net_device diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index a04f2efbbc25..8ff4cbf582cc 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -282,9 +282,8 @@ static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, struct hv_netvsc_packet *packet, - struct hv_page_buffer **page_buf) + struct hv_page_buffer *pb) { - struct hv_page_buffer *pb = *page_buf; u32 slots_used = 0; char *data = skb->data; int frags = skb_shinfo(skb)->nr_frags; @@ -359,8 +358,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) u32 rndis_msg_size; struct rndis_per_packet_info *ppi; u32 hash; - struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT]; - struct hv_page_buffer *pb = page_buf; + struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; /* We can only transmit MAX_PAGE_BUFFER_COUNT number * of pages in a single packet. If skb is scattered around @@ -503,12 +501,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, - skb, packet, &pb); + skb, packet, pb); /* timestamp packet in software */ skb_tx_timestamp(skb); - ret = netvsc_send(net_device_ctx, packet, rndis_msg, &pb, skb); + ret = netvsc_send(net_device_ctx, packet, rndis_msg, pb, skb); if (likely(ret == 0)) return NETDEV_TX_OK; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index bf21ea92c743..d80e9e3f433e 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -214,11 +214,11 @@ static void dump_rndis_message(struct hv_device *hv_dev, static int rndis_filter_send_request(struct rndis_device *dev, struct rndis_request *req) { - int ret; struct hv_netvsc_packet *packet; struct hv_page_buffer page_buf[2]; struct hv_page_buffer *pb = page_buf; struct net_device_context *net_device_ctx = netdev_priv(dev->ndev); + int ret; /* Setup the packet to send it */ packet = &req->pkt; @@ -245,7 +245,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, } rcu_read_lock_bh(); - ret = netvsc_send(net_device_ctx, packet, NULL, &pb, NULL); + ret = netvsc_send(net_device_ctx, packet, NULL, pb, NULL); rcu_read_unlock_bh(); return ret; -- cgit v1.2.3-55-g7522 From 7426b1a51803ba2d368177363a134b98b0a8d1c0 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 28 Jul 2017 08:59:45 -0700 Subject: netvsc: optimize receive completions Optimize how receive completion ring are managed. * Allocate only as many slots as needed for all buffers from host * Allocate before setting up sub channel for better error detection * Don't need to keep copy of initial receive section message * Precompute the watermark for when receive flushing is needed * Replace division with conditional test * Replace atomic per-device variable with per-channel check. * Handle corner case where receive completion send fails if ring buffer to host is full. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 14 +- drivers/net/hyperv/netvsc.c | 267 ++++++++++++++++---------------------- drivers/net/hyperv/rndis_filter.c | 20 +-- 3 files changed, 126 insertions(+), 175 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 9ca3ed692d73..f2cef5aaed1f 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -186,6 +186,7 @@ struct net_device_context; struct netvsc_device *netvsc_device_add(struct hv_device *device, const struct netvsc_device_info *info); +int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx); void netvsc_device_remove(struct hv_device *device); int netvsc_send(struct net_device_context *ndc, struct hv_netvsc_packet *packet, @@ -657,13 +658,10 @@ struct recv_comp_data { u32 status; }; -/* Netvsc Receive Slots Max */ -#define NETVSC_RECVSLOT_MAX (NETVSC_RECEIVE_BUFFER_SIZE / ETH_DATA_LEN + 1) - struct multi_recv_comp { - void *buf; /* queued receive completions */ - u32 first; /* first data entry */ - u32 next; /* next entry for writing */ + struct recv_comp_data *slots; + u32 first; /* first data entry */ + u32 next; /* next entry for writing */ }; struct netvsc_stats { @@ -750,7 +748,7 @@ struct netvsc_device { u32 recv_buf_size; u32 recv_buf_gpadl_handle; u32 recv_section_cnt; - struct nvsp_1_receive_buffer_section *recv_section; + u32 recv_completion_cnt; /* Send buffer allocated by us */ void *send_buf; @@ -778,8 +776,6 @@ struct netvsc_device { u32 max_pkt; /* max number of pkt in one send, e.g. 8 */ u32 pkt_align; /* alignment bytes, e.g. 8 */ - atomic_t num_outstanding_recvs; - atomic_t open_cnt; struct netvsc_channel chan_table[VRSS_CHANNEL_MAX]; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d3c0b19f6d34..4c709b454d34 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -72,9 +72,6 @@ static struct netvsc_device *alloc_net_device(void) if (!net_device) return NULL; - net_device->chan_table[0].mrc.buf - = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); - init_waitqueue_head(&net_device->wait_drain); net_device->destroy = false; atomic_set(&net_device->open_cnt, 0); @@ -92,7 +89,7 @@ static void free_netvsc_device(struct rcu_head *head) int i; for (i = 0; i < VRSS_CHANNEL_MAX; i++) - vfree(nvdev->chan_table[i].mrc.buf); + vfree(nvdev->chan_table[i].mrc.slots); kfree(nvdev); } @@ -171,12 +168,6 @@ static void netvsc_destroy_buf(struct hv_device *device) net_device->recv_buf = NULL; } - if (net_device->recv_section) { - net_device->recv_section_cnt = 0; - kfree(net_device->recv_section); - net_device->recv_section = NULL; - } - /* Deal with the send buffer we may have setup. * If we got a send section size, it means we received a * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent @@ -239,11 +230,26 @@ static void netvsc_destroy_buf(struct hv_device *device) kfree(net_device->send_section_map); } +int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) +{ + struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; + int node = cpu_to_node(nvchan->channel->target_cpu); + size_t size; + + size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); + nvchan->mrc.slots = vzalloc_node(size, node); + if (!nvchan->mrc.slots) + nvchan->mrc.slots = vzalloc(size); + + return nvchan->mrc.slots ? 0 : -ENOMEM; +} + static int netvsc_init_buf(struct hv_device *device, struct netvsc_device *net_device) { int ret = 0; struct nvsp_message *init_packet; + struct nvsp_1_message_send_receive_buffer_complete *resp; struct net_device *ndev; size_t map_words; int node; @@ -300,43 +306,41 @@ static int netvsc_init_buf(struct hv_device *device, wait_for_completion(&net_device->channel_init_wait); /* Check the response */ - if (init_packet->msg.v1_msg. - send_recv_buf_complete.status != NVSP_STAT_SUCCESS) { - netdev_err(ndev, "Unable to complete receive buffer " - "initialization with NetVsp - status %d\n", - init_packet->msg.v1_msg. - send_recv_buf_complete.status); + resp = &init_packet->msg.v1_msg.send_recv_buf_complete; + if (resp->status != NVSP_STAT_SUCCESS) { + netdev_err(ndev, + "Unable to complete receive buffer initialization with NetVsp - status %d\n", + resp->status); ret = -EINVAL; goto cleanup; } /* Parse the response */ + netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n", + resp->num_sections, resp->sections[0].sub_alloc_size, + resp->sections[0].num_sub_allocs); - net_device->recv_section_cnt = init_packet->msg. - v1_msg.send_recv_buf_complete.num_sections; - - net_device->recv_section = kmemdup( - init_packet->msg.v1_msg.send_recv_buf_complete.sections, - net_device->recv_section_cnt * - sizeof(struct nvsp_1_receive_buffer_section), - GFP_KERNEL); - if (net_device->recv_section == NULL) { - ret = -EINVAL; - goto cleanup; - } + net_device->recv_section_cnt = resp->num_sections; /* * For 1st release, there should only be 1 section that represents the * entire receive buffer */ if (net_device->recv_section_cnt != 1 || - net_device->recv_section->offset != 0) { + resp->sections[0].offset != 0) { ret = -EINVAL; goto cleanup; } - /* Now setup the send buffer. - */ + /* Setup receive completion ring */ + net_device->recv_completion_cnt + = round_up(resp->sections[0].num_sub_allocs + 1, + PAGE_SIZE / sizeof(u64)); + ret = netvsc_alloc_recv_comp_ring(net_device, 0); + if (ret) + goto cleanup; + + /* Now setup the send buffer. */ net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); if (!net_device->send_buf) net_device->send_buf = vzalloc(net_device->send_buf_size); @@ -951,130 +955,94 @@ send_now: return ret; } -static int netvsc_send_recv_completion(struct vmbus_channel *channel, - u64 transaction_id, u32 status) +/* Send pending recv completions */ +static int send_recv_completions(struct netvsc_channel *nvchan) { - struct nvsp_message recvcompMessage; + struct netvsc_device *nvdev = nvchan->net_device; + struct multi_recv_comp *mrc = &nvchan->mrc; + struct recv_comp_msg { + struct nvsp_message_header hdr; + u32 status; + } __packed; + struct recv_comp_msg msg = { + .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, + }; int ret; - recvcompMessage.hdr.msg_type = - NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE; - - recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status = status; - - /* Send the completion */ - ret = vmbus_sendpacket(channel, &recvcompMessage, - sizeof(struct nvsp_message_header) + sizeof(u32), - transaction_id, VM_PKT_COMP, 0); + while (mrc->first != mrc->next) { + const struct recv_comp_data *rcd + = mrc->slots + mrc->first; - return ret; -} - -static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx, - u32 *filled, u32 *avail) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 first = mrc->first; - u32 next = mrc->next; + msg.status = rcd->status; + ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), + rcd->tid, VM_PKT_COMP, 0); + if (unlikely(ret)) + return ret; - *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next : - next - first; - - *avail = NETVSC_RECVSLOT_MAX - *filled - 1; -} - -/* Read the first filled slot, no change to index */ -static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device - *nvdev, u16 q_idx) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 filled, avail; - - if (unlikely(!mrc->buf)) - return NULL; + if (++mrc->first == nvdev->recv_completion_cnt) + mrc->first = 0; + } - count_recv_comp_slot(nvdev, q_idx, &filled, &avail); - if (!filled) - return NULL; + /* receive completion ring has been emptied */ + if (unlikely(nvdev->destroy)) + wake_up(&nvdev->wait_drain); - return mrc->buf + mrc->first * sizeof(struct recv_comp_data); + return 0; } -/* Put the first filled slot back to available pool */ -static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx) +/* Count how many receive completions are outstanding */ +static void recv_comp_slot_avail(const struct netvsc_device *nvdev, + const struct multi_recv_comp *mrc, + u32 *filled, u32 *avail) { - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - int num_recv; + u32 count = nvdev->recv_completion_cnt; - mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX; - - num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs); + if (mrc->next >= mrc->first) + *filled = mrc->next - mrc->first; + else + *filled = (count - mrc->first) + mrc->next; - if (nvdev->destroy && num_recv == 0) - wake_up(&nvdev->wait_drain); + *avail = count - *filled - 1; } -/* Check and send pending recv completions */ -static void netvsc_chk_recv_comp(struct netvsc_device *nvdev, - struct vmbus_channel *channel, u16 q_idx) +/* Add receive complete to ring to send to host. */ +static void enq_receive_complete(struct net_device *ndev, + struct netvsc_device *nvdev, u16 q_idx, + u64 tid, u32 status) { + struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; + struct multi_recv_comp *mrc = &nvchan->mrc; struct recv_comp_data *rcd; - int ret; - - while (true) { - rcd = read_recv_comp_slot(nvdev, q_idx); - if (!rcd) - break; + u32 filled, avail; - ret = netvsc_send_recv_completion(channel, rcd->tid, - rcd->status); - if (ret) - break; + recv_comp_slot_avail(nvdev, mrc, &filled, &avail); - put_recv_comp_slot(nvdev, q_idx); + if (unlikely(filled > NAPI_POLL_WEIGHT)) { + send_recv_completions(nvchan); + recv_comp_slot_avail(nvdev, mrc, &filled, &avail); } -} - -#define NETVSC_RCD_WATERMARK 80 -/* Get next available slot */ -static inline struct recv_comp_data *get_recv_comp_slot( - struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx) -{ - struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc; - u32 filled, avail, next; - struct recv_comp_data *rcd; - - if (unlikely(!nvdev->recv_section)) - return NULL; - - if (unlikely(!mrc->buf)) - return NULL; - - if (atomic_read(&nvdev->num_outstanding_recvs) > - nvdev->recv_section->num_sub_allocs * NETVSC_RCD_WATERMARK / 100) - netvsc_chk_recv_comp(nvdev, channel, q_idx); - - count_recv_comp_slot(nvdev, q_idx, &filled, &avail); - if (!avail) - return NULL; - - next = mrc->next; - rcd = mrc->buf + next * sizeof(struct recv_comp_data); - mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX; + if (unlikely(!avail)) { + netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", + q_idx, tid); + return; + } - atomic_inc(&nvdev->num_outstanding_recvs); + rcd = mrc->slots + mrc->next; + rcd->tid = tid; + rcd->status = status; - return rcd; + if (++mrc->next == nvdev->recv_completion_cnt) + mrc->next = 0; } static int netvsc_receive(struct net_device *ndev, - struct netvsc_device *net_device, - struct net_device_context *net_device_ctx, - struct hv_device *device, - struct vmbus_channel *channel, - const struct vmpacket_descriptor *desc, - struct nvsp_message *nvsp) + struct netvsc_device *net_device, + struct net_device_context *net_device_ctx, + struct hv_device *device, + struct vmbus_channel *channel, + const struct vmpacket_descriptor *desc, + struct nvsp_message *nvsp) { const struct vmtransfer_page_packet_header *vmxferpage_packet = container_of(desc, const struct vmtransfer_page_packet_header, d); @@ -1083,7 +1051,6 @@ static int netvsc_receive(struct net_device *ndev, u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; - int ret; /* Make sure this is a valid nvsp packet */ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { @@ -1114,25 +1081,9 @@ static int netvsc_receive(struct net_device *ndev, channel, data, buflen); } - if (net_device->chan_table[q_idx].mrc.buf) { - struct recv_comp_data *rcd; + enq_receive_complete(ndev, net_device, q_idx, + vmxferpage_packet->d.trans_id, status); - rcd = get_recv_comp_slot(net_device, channel, q_idx); - if (rcd) { - rcd->tid = vmxferpage_packet->d.trans_id; - rcd->status = status; - } else { - netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", - q_idx, vmxferpage_packet->d.trans_id); - } - } else { - ret = netvsc_send_recv_completion(channel, - vmxferpage_packet->d.trans_id, - status); - if (ret) - netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n", - q_idx, vmxferpage_packet->d.trans_id, ret); - } return count; } @@ -1231,7 +1182,6 @@ int netvsc_poll(struct napi_struct *napi, int budget) struct netvsc_device *net_device = nvchan->net_device; struct vmbus_channel *channel = nvchan->channel; struct hv_device *device = netvsc_channel_to_device(channel); - u16 q_idx = channel->offermsg.offer.sub_channel_index; struct net_device *ndev = hv_get_drvdata(device); int work_done = 0; @@ -1245,17 +1195,18 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* If receive ring was exhausted - * and not doing busy poll - * then re-enable host interrupts - * and reschedule if ring is not empty. + /* If send of pending receive completions suceeded + * and did not exhaust NAPI budget + * and not doing busy poll + * then reschedule if more data has arrived from host */ - if (work_done < budget && + if (send_recv_completions(nvchan) == 0 && + work_done < budget && napi_complete_done(napi, work_done) && - hv_end_read(&channel->inbound) != 0) + hv_end_read(&channel->inbound)) { + hv_begin_read(&channel->inbound); napi_reschedule(napi); - - netvsc_chk_recv_comp(net_device, channel, q_idx); + } /* Driver may overshoot since multiple packets per descriptor */ return min(work_done, budget); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index d80e9e3f433e..44165fe328a4 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -928,12 +928,12 @@ static bool netvsc_device_idle(const struct netvsc_device *nvdev) { int i; - if (atomic_read(&nvdev->num_outstanding_recvs) > 0) - return false; - for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; + if (nvchan->mrc.first != nvchan->mrc.next) + return false; + if (atomic_read(&nvchan->queue_sends) > 0) return false; } @@ -1031,11 +1031,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc) return; nvchan = nvscdev->chan_table + chn_index; - nvchan->mrc.buf - = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data)); - - if (!nvchan->mrc.buf) - return; /* Because the device uses NAPI, all the interrupt batching and * control is done via Net softirq, not the channel handling @@ -1225,6 +1220,15 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, if (num_rss_qs == 0) return net_device; + for (i = 1; i < net_device->num_chn; i++) { + ret = netvsc_alloc_recv_comp_ring(net_device, i); + if (ret) { + while (--i != 0) + vfree(net_device->chan_table[i].mrc.slots); + goto out; + } + } + refcount_set(&net_device->sc_offered, num_rss_qs); vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open); -- cgit v1.2.3-55-g7522 From 493933472d33d3e82d6323842edeb281199ee430 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 28 Jul 2017 08:59:46 -0700 Subject: netvsc: fix error unwind on device setup failure If setting receive buffer fails, the error unwind would cause kernel panic because it was not correctly doing RCU and NAPI unwind. RCU'd pointer needs to be reset to NULL, and NAPI needs to be disabled not deleted. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4c709b454d34..db95487807fd 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1307,7 +1307,8 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, return net_device; close: - netif_napi_del(&net_device->chan_table[0].napi); + RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); + napi_disable(&net_device->chan_table[0].napi); /* Now, we can close the channel safely */ vmbus_close(device->channel); -- cgit v1.2.3-55-g7522 From f4e403633bcd290a4db2568364657d07b42ff890 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 28 Jul 2017 08:59:47 -0700 Subject: netvsc: signal host if receive ring is emptied Latency improvement related to NAPI conversion. If all packets are processed from receive ring then need to signal host. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index db95487807fd..c64934c64dca 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1195,10 +1195,15 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* If send of pending receive completions suceeded - * and did not exhaust NAPI budget + /* if ring is empty, signal host */ + if (!nvchan->desc) + hv_pkt_iter_close(channel); + + /* If send of pending receive completions suceeded + * and did not exhaust NAPI budget this time * and not doing busy poll - * then reschedule if more data has arrived from host + * then re-enable host interrupts + * and reschedule if ring is not empty. */ if (send_recv_completions(nvchan) == 0 && work_done < budget && -- cgit v1.2.3-55-g7522 From 1a5f3da20bd966220931239fbd31e6ac6ff42251 Mon Sep 17 00:00:00 2001 From: Vidya Sagar Ravipati Date: Thu, 27 Jul 2017 16:47:26 -0700 Subject: net: ethtool: add support for forward error correction modes Forward Error Correction (FEC) modes i.e Base-R and Reed-Solomon modes are introduced in 25G/40G/100G standards for providing good BER at high speeds. Various networking devices which support 25G/40G/100G provides ability to manage supported FEC modes and the lack of FEC encoding control and reporting today is a source for interoperability issues for many vendors. FEC capability as well as specific FEC mode i.e. Base-R or RS modes can be requested or advertised through bits D44:47 of base link codeword. This patch set intends to provide option under ethtool to manage and report FEC encoding settings for networking devices as per IEEE 802.3 bj, bm and by specs. set-fec/show-fec option(s) are designed to provide control and report the FEC encoding on the link. SET FEC option: root@tor: ethtool --set-fec swp1 encoding [off | RS | BaseR | auto] Encoding: Types of encoding Off : Turning off any encoding RS : enforcing RS-FEC encoding on supported speeds BaseR : enforcing Base R encoding on supported speeds Auto : IEEE defaults for the speed/medium combination Here are a few examples of what we would expect if encoding=auto: - if autoneg is on, we are expecting FEC to be negotiated as on or off as long as protocol supports it - if the hardware is capable of detecting the FEC encoding on it's receiver it will reconfigure its encoder to match - in absence of the above, the configuration would be set to IEEE defaults. >From our understanding , this is essentially what most hardware/driver combinations are doing today in the absence of a way for users to control the behavior. SHOW FEC option: root@tor: ethtool --show-fec swp1 FEC parameters for swp1: Active FEC encodings: RS Configured FEC encodings: RS | BaseR ETHTOOL DEVNAME output modification: ethtool devname output: root@tor:~# ethtool swp1 Settings for swp1: root@hpe-7712-03:~# ethtool swp18 Settings for swp18: Supported ports: [ FIBRE ] Supported link modes: 40000baseCR4/Full 40000baseSR4/Full 40000baseLR4/Full 100000baseSR4/Full 100000baseCR4/Full 100000baseLR4_ER4/Full Supported pause frame use: No Supports auto-negotiation: Yes Supported FEC modes: [RS | BaseR | None | Not reported] Advertised link modes: Not reported Advertised pause frame use: No Advertised auto-negotiation: No Advertised FEC modes: [RS | BaseR | None | Not reported] <<<< One or more FEC modes Speed: 100000Mb/s Duplex: Full Port: FIBRE PHYAD: 106 Transceiver: internal Auto-negotiation: off Link detected: yes This patch includes following changes a) New ETHTOOL_SFECPARAM/SFECPARAM API, handled by the new get_fecparam/set_fecparam callbacks, provides support for configuration of forward error correction modes. b) Link mode bits for FEC modes i.e. None (No FEC mode), RS, BaseR/FC are defined so that users can configure these fec modes for supported and advertising fields as part of link autonegotiation. Signed-off-by: Vidya Sagar Ravipati Signed-off-by: Dustin Byford Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/ethtool.h | 4 ++++ include/uapi/linux/ethtool.h | 48 +++++++++++++++++++++++++++++++++++++++++++- net/core/ethtool.c | 34 +++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 1 deletion(-) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 83cc9863444b..afdbb701fdb4 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -374,5 +374,9 @@ struct ethtool_ops { struct ethtool_link_ksettings *); int (*set_link_ksettings)(struct net_device *, const struct ethtool_link_ksettings *); + int (*get_fecparam)(struct net_device *, + struct ethtool_fecparam *); + int (*set_fecparam)(struct net_device *, + struct ethtool_fecparam *); }; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 7d4a594d5d58..9c041dae8e2c 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -1238,6 +1238,47 @@ struct ethtool_per_queue_op { char data[]; }; +/** + * struct ethtool_fecparam - Ethernet forward error correction(fec) parameters + * @cmd: Command number = %ETHTOOL_GFECPARAM or %ETHTOOL_SFECPARAM + * @active_fec: FEC mode which is active on porte + * @fec: Bitmask of supported/configured FEC modes + * @rsvd: Reserved for future extensions. i.e FEC bypass feature. + * + * Drivers should reject a non-zero setting of @autoneg when + * autoneogotiation is disabled (or not supported) for the link. + * + */ +struct ethtool_fecparam { + __u32 cmd; + /* bitmask of FEC modes */ + __u32 active_fec; + __u32 fec; + __u32 reserved; +}; + +/** + * enum ethtool_fec_config_bits - flags definition of ethtool_fec_configuration + * @ETHTOOL_FEC_NONE: FEC mode configuration is not supported + * @ETHTOOL_FEC_AUTO: Default/Best FEC mode provided by driver + * @ETHTOOL_FEC_OFF: No FEC Mode + * @ETHTOOL_FEC_RS: Reed-Solomon Forward Error Detection mode + * @ETHTOOL_FEC_BASER: Base-R/Reed-Solomon Forward Error Detection mode + */ +enum ethtool_fec_config_bits { + ETHTOOL_FEC_NONE_BIT, + ETHTOOL_FEC_AUTO_BIT, + ETHTOOL_FEC_OFF_BIT, + ETHTOOL_FEC_RS_BIT, + ETHTOOL_FEC_BASER_BIT, +}; + +#define ETHTOOL_FEC_NONE (1 << ETHTOOL_FEC_NONE_BIT) +#define ETHTOOL_FEC_AUTO (1 << ETHTOOL_FEC_AUTO_BIT) +#define ETHTOOL_FEC_OFF (1 << ETHTOOL_FEC_OFF_BIT) +#define ETHTOOL_FEC_RS (1 << ETHTOOL_FEC_RS_BIT) +#define ETHTOOL_FEC_BASER (1 << ETHTOOL_FEC_BASER_BIT) + /* CMDs currently supported */ #define ETHTOOL_GSET 0x00000001 /* DEPRECATED, Get settings. * Please use ETHTOOL_GLINKSETTINGS @@ -1330,6 +1371,8 @@ struct ethtool_per_queue_op { #define ETHTOOL_SLINKSETTINGS 0x0000004d /* Set ethtool_link_settings */ #define ETHTOOL_PHY_GTUNABLE 0x0000004e /* Get PHY tunable configuration */ #define ETHTOOL_PHY_STUNABLE 0x0000004f /* Set PHY tunable configuration */ +#define ETHTOOL_GFECPARAM 0x00000050 /* Get FEC settings */ +#define ETHTOOL_SFECPARAM 0x00000051 /* Set FEC settings */ /* compatibility with older code */ #define SPARC_ETH_GSET ETHTOOL_GSET @@ -1387,6 +1430,9 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47, ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48, + ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49, + ETHTOOL_LINK_MODE_FEC_RS_BIT = 50, + ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51, /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_* @@ -1395,7 +1441,7 @@ enum ethtool_link_mode_bit_indices { */ __ETHTOOL_LINK_MODE_LAST - = ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + = ETHTOOL_LINK_MODE_FEC_BASER_BIT, }; #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \ diff --git a/net/core/ethtool.c b/net/core/ethtool.c index b987bc475fc8..6a582ae4c5d9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -2512,6 +2512,33 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr) return ret; } +static int ethtool_get_fecparam(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_fecparam fecparam = { ETHTOOL_GFECPARAM }; + + if (!dev->ethtool_ops->get_fecparam) + return -EOPNOTSUPP; + + dev->ethtool_ops->get_fecparam(dev, &fecparam); + + if (copy_to_user(useraddr, &fecparam, sizeof(fecparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_fecparam(struct net_device *dev, void __user *useraddr) +{ + struct ethtool_fecparam fecparam; + + if (!dev->ethtool_ops->set_fecparam) + return -EOPNOTSUPP; + + if (copy_from_user(&fecparam, useraddr, sizeof(fecparam))) + return -EFAULT; + + return dev->ethtool_ops->set_fecparam(dev, &fecparam); +} + /* The main entry point in this file. Called from net/core/dev_ioctl.c */ int dev_ethtool(struct net *net, struct ifreq *ifr) @@ -2570,6 +2597,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_GTUNABLE: case ETHTOOL_PHY_GTUNABLE: case ETHTOOL_GLINKSETTINGS: + case ETHTOOL_GFECPARAM: break; default: if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) @@ -2779,6 +2807,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) case ETHTOOL_PHY_STUNABLE: rc = set_phy_tunable(dev, useraddr); break; + case ETHTOOL_GFECPARAM: + rc = ethtool_get_fecparam(dev, useraddr); + break; + case ETHTOOL_SFECPARAM: + rc = ethtool_set_fecparam(dev, useraddr); + break; default: rc = -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From 158a5c0a24d1d83b3d7e7ba159eafbae1b9f9d60 Mon Sep 17 00:00:00 2001 From: Casey Leedom Date: Thu, 27 Jul 2017 16:47:27 -0700 Subject: cxgb4: core hardware/firmware support for Forward Error Correction on a link Signed-off-by: Casey Leedom Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 152 ++++++++++++++++++++++------- 1 file changed, 117 insertions(+), 35 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index db41b3e99b81..24087c886974 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3840,11 +3840,64 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ FW_PORT_CAP_ANEG) +/* Translate Firmware Port Capabilities Pause specification to Common Code */ +static inline unsigned int fwcap_to_cc_pause(unsigned int fw_pause) +{ + unsigned int cc_pause = 0; + + if (fw_pause & FW_PORT_CAP_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Common Code Pause specification into Firmware Port Capabilities */ +static inline unsigned int cc_to_fwcap_pause(unsigned int cc_pause) +{ + unsigned int fw_pause = 0; + + if (cc_pause & PAUSE_RX) + fw_pause |= FW_PORT_CAP_FC_RX; + if (cc_pause & PAUSE_TX) + fw_pause |= FW_PORT_CAP_FC_TX; + + return fw_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline unsigned int fwcap_to_cc_fec(unsigned int fw_fec) +{ + unsigned int cc_fec = 0; + + if (fw_fec & FW_PORT_CAP_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/* Translate Common Code Forward Error Correction specification to Firmware */ +static inline unsigned int cc_to_fwcap_fec(unsigned int cc_fec) +{ + unsigned int fw_fec = 0; + + if (cc_fec & FEC_RS) + fw_fec |= FW_PORT_CAP_FEC_RS; + if (cc_fec & FEC_BASER_RS) + fw_fec |= FW_PORT_CAP_FEC_BASER_RS; + + return fw_fec; +} + /** * t4_link_l1cfg - apply link configuration to MAC/PHY - * @phy: the PHY to setup - * @mac: the MAC to setup - * @lc: the requested link configuration + * @adapter: the adapter + * @mbox: the Firmware Mailbox to use + * @port: the Port ID + * @lc: the Port's Link Configuration * * Set up a port's MAC and PHY according to a desired link configuration. * - If the PHY can auto-negotiate first decide what to advertise, then @@ -3857,22 +3910,46 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, struct link_config *lc) { struct fw_port_cmd c; - unsigned int mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); - unsigned int fc = 0, fec = 0, fw_fec = 0; + unsigned int fw_mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); + unsigned int fw_fc, cc_fec, fw_fec; + unsigned int rcap; lc->link_ok = 0; - if (lc->requested_fc & PAUSE_RX) - fc |= FW_PORT_CAP_FC_RX; - if (lc->requested_fc & PAUSE_TX) - fc |= FW_PORT_CAP_FC_TX; - fec = lc->requested_fec & FEC_AUTO ? lc->auto_fec : lc->requested_fec; + /* Convert driver coding of Pause Frame Flow Control settings into the + * Firmware's API. + */ + fw_fc = cc_to_fwcap_pause(lc->requested_fc); + + /* Convert Common Code Forward Error Control settings into the + * Firmware's API. If the current Requested FEC has "Automatic" + * (IEEE 802.3) specified, then we use whatever the Firmware + * sent us as part of it's IEEE 802.3-based interpratation of + * the Transceiver Module EPROM FEC parameters. Otherwise we + * use whatever is in the current Requested FEC settings. + */ + if (lc->requested_fec & FEC_AUTO) + cc_fec = lc->auto_fec; + else + cc_fec = lc->requested_fec; + fw_fec = cc_to_fwcap_fec(cc_fec); - if (fec & FEC_RS) - fw_fec |= FW_PORT_CAP_FEC_RS; - if (fec & FEC_BASER_RS) - fw_fec |= FW_PORT_CAP_FEC_BASER_RS; + /* Figure out what our Requested Port Capabilities are going to be. + */ + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + rcap = (lc->supported & ADVERT_MASK) | fw_fc | fw_fec; + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + lc->fec = cc_fec; + } else if (lc->autoneg == AUTONEG_DISABLE) { + rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi; + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + lc->fec = cc_fec; + } else { + rcap = lc->advertising | fw_fc | fw_fec | fw_mdi; + } + /* And send that on to the Firmware ... + */ memset(&c, 0, sizeof(c)); c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | FW_CMD_EXEC_F | @@ -3880,19 +3957,7 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, c.action_to_len16 = cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); - - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | - fc | fw_fec); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - } else if (lc->autoneg == AUTONEG_DISABLE) { - c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | - fw_fec | mdi); - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); - } else - c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | - fw_fec | mdi); - + c.u.l1cfg.rcap = cpu_to_be32(rcap); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -7630,19 +7695,28 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) { const struct fw_port_cmd *p = (const void *)rpl; + unsigned int acaps = be16_to_cpu(p->u.info.acap); struct adapter *adap = pi->adapter; /* link/module state change message */ - int speed = 0, fc = 0; + int speed = 0, fc, fec; struct link_config *lc; u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; u32 mod = FW_PORT_CMD_MODTYPE_G(stat); + /* Unfortunately the format of the Link Status returned by the + * Firmware isn't the same as the Firmware Port Capabilities bitfield + * used everywhere else ... + */ + fc = 0; if (stat & FW_PORT_CMD_RXPAUSE_F) fc |= PAUSE_RX; if (stat & FW_PORT_CMD_TXPAUSE_F) fc |= PAUSE_TX; + + fec = fwcap_to_cc_fec(acaps); + if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) speed = 100; else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) @@ -7659,11 +7733,20 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc = &pi->link_cfg; if (mod != pi->mod_type) { + /* When a new Transceiver Module is inserted, the Firmware + * will examine any Forward Error Correction parameters + * present in the Transceiver Module i2c EPROM and determine + * the supported and recommended FEC settings from those + * based on IEEE 802.3 standards. We always record the + * IEEE 802.3 recommended "automatic" settings. + */ + lc->auto_fec = fec; + pi->mod_type = mod; t4_os_portmod_changed(adap, pi->port_id); } if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { /* something changed */ + fc != lc->fc || fec != lc->fec) { /* something changed */ if (!link_ok && lc->link_ok) { unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat); @@ -7675,6 +7758,8 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; + lc->fec = fec; + lc->supported = be16_to_cpu(p->u.info.pcap); lc->lp_advertising = be16_to_cpu(p->u.info.lpacap); @@ -7764,7 +7849,8 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) /** * init_link_config - initialize a link's SW state * @lc: structure holding the link state - * @caps: link capabilities + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. @@ -7777,15 +7863,11 @@ static void init_link_config(struct link_config *lc, unsigned int pcaps, lc->requested_speed = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - lc->auto_fec = 0; /* For Forward Error Control, we default to whatever the Firmware * tells us the Link is currently advertising. */ - if (acaps & FW_PORT_CAP_FEC_RS) - lc->auto_fec |= FEC_RS; - if (acaps & FW_PORT_CAP_FEC_BASER_RS) - lc->auto_fec |= FEC_BASER_RS; + lc->auto_fec = fwcap_to_cc_fec(acaps); lc->requested_fec = FEC_AUTO; lc->fec = lc->auto_fec; -- cgit v1.2.3-55-g7522 From 7fece840e35e52c7114dcf874a72b2c962923800 Mon Sep 17 00:00:00 2001 From: Casey Leedom Date: Thu, 27 Jul 2017 16:47:28 -0700 Subject: cxgb4: ethtool forward error correction management support Signed-off-by: Casey Leedom Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 100 +++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 26eb00a45db1..03f593e84c24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -801,6 +801,104 @@ static int set_link_ksettings(struct net_device *dev, return ret; } +/* Translate the Firmware FEC value into the ethtool value. */ +static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec) +{ + unsigned int eth_fec = 0; + + if (fw_fec & FW_PORT_CAP_FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (fw_fec & FW_PORT_CAP_FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate Common Code FEC value into ethtool value. */ +static inline unsigned int cc_to_eth_fec(unsigned int cc_fec) +{ + unsigned int eth_fec = 0; + + if (cc_fec & FEC_AUTO) + eth_fec |= ETHTOOL_FEC_AUTO; + if (cc_fec & FEC_RS) + eth_fec |= ETHTOOL_FEC_RS; + if (cc_fec & FEC_BASER_RS) + eth_fec |= ETHTOOL_FEC_BASER; + + /* if nothing is set, then FEC is off */ + if (!eth_fec) + eth_fec = ETHTOOL_FEC_OFF; + + return eth_fec; +} + +/* Translate ethtool FEC value into Common Code value. */ +static inline unsigned int eth_to_cc_fec(unsigned int eth_fec) +{ + unsigned int cc_fec = 0; + + if (eth_fec & ETHTOOL_FEC_OFF) + return cc_fec; + + if (eth_fec & ETHTOOL_FEC_AUTO) + cc_fec |= FEC_AUTO; + if (eth_fec & ETHTOOL_FEC_RS) + cc_fec |= FEC_RS; + if (eth_fec & ETHTOOL_FEC_BASER) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec) +{ + const struct port_info *pi = netdev_priv(dev); + const struct link_config *lc = &pi->link_cfg; + + /* Translate the Firmware FEC Support into the ethtool value. We + * always support IEEE 802.3 "automatic" selection of Link FEC type if + * any FEC is supported. + */ + fec->fec = fwcap_to_eth_fec(lc->supported); + if (fec->fec != ETHTOOL_FEC_OFF) + fec->fec |= ETHTOOL_FEC_AUTO; + + /* Translate the current internal FEC parameters into the + * ethtool values. + */ + fec->active_fec = cc_to_eth_fec(lc->fec); + + return 0; +} + +static int set_fecparam(struct net_device *dev, struct ethtool_fecparam *fec) +{ + struct port_info *pi = netdev_priv(dev); + struct link_config *lc = &pi->link_cfg; + struct link_config old_lc; + int ret; + + /* Save old Link Configuration in case the L1 Configure below + * fails. + */ + old_lc = *lc; + + /* Try to perform the L1 Configure and return the result of that + * effort. If it fails, revert the attempted change. + */ + lc->requested_fec = eth_to_cc_fec(fec->fec); + ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, + pi->tx_chan, lc); + if (ret) + *lc = old_lc; + return ret; +} + static void get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause) { @@ -1255,6 +1353,8 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, static const struct ethtool_ops cxgb_ethtool_ops = { .get_link_ksettings = get_link_ksettings, .set_link_ksettings = set_link_ksettings, + .get_fecparam = get_fecparam, + .set_fecparam = set_fecparam, .get_drvinfo = get_drvinfo, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, -- cgit v1.2.3-55-g7522 From d329ac88eb217fef1516a1dcfda27b7f5a8eb07b Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sun, 30 Jul 2017 19:58:53 +0200 Subject: net: dsa: lan9303: Fix lan9303_detect_phy_setup() for MDIO Handle that MDIO read with no response return 0xffff. Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index cd76e61f1fca..9d0ab77edb4a 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -427,6 +427,7 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) * Special reg 18 of phy 3 reads as 0x0000, if 'phy_addr_sel_strap' is 0 * and the IDs are 0-1-2, else it contains something different from * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3. + * 0xffff is returned on MDIO read with no response. */ reg = lan9303_port_phy_reg_read(chip, 3, MII_LAN911X_SPECIAL_MODES); if (reg < 0) { @@ -434,7 +435,7 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) return reg; } - if (reg != 0) + if ((reg != 0) && (reg != 0xffff)) chip->phy_addr_sel_strap = 1; else chip->phy_addr_sel_strap = 0; -- cgit v1.2.3-55-g7522 From ab78acb152e3f0ded13a13fd27df46448d34a4b7 Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sun, 30 Jul 2017 19:58:54 +0200 Subject: net: dsa: lan9303: Multiply by 4 to get MDIO register lan9303_mdio_write()/_read() must multiply register number by 4 to get offset. Added some commments to the register definitions. Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 6 ++++++ drivers/net/dsa/lan9303_mdio.c | 2 ++ 2 files changed, 8 insertions(+) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 9d0ab77edb4a..96ebeb9bd59a 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -20,6 +20,9 @@ #include "lan9303.h" +/* 13.2 System Control and Status Registers + * Multiply register number by 4 to get address offset. + */ #define LAN9303_CHIP_REV 0x14 # define LAN9303_CHIP_ID 0x9303 #define LAN9303_IRQ_CFG 0x15 @@ -53,6 +56,9 @@ #define LAN9303_VIRT_PHY_BASE 0x70 #define LAN9303_VIRT_SPECIAL_CTRL 0x77 +/*13.4 Switch Fabric Control and Status Registers + * Accessed indirectly via SWITCH_CSR_CMD, SWITCH_CSR_DATA. + */ #define LAN9303_SW_DEV_ID 0x0000 #define LAN9303_SW_RESET 0x0001 #define LAN9303_SW_RESET_RESET BIT(0) diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c index 93c36c0541cf..2db7970fc88c 100644 --- a/drivers/net/dsa/lan9303_mdio.c +++ b/drivers/net/dsa/lan9303_mdio.c @@ -40,6 +40,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val) { struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + reg <<= 2; /* reg num to offset */ mutex_lock(&sw_dev->device->bus->mdio_lock); lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff); lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff); @@ -57,6 +58,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) { struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx; + reg <<= 2; /* reg num to offset */ mutex_lock(&sw_dev->device->bus->mdio_lock); *val = lan9303_mdio_real_read(sw_dev->device, reg); *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16); -- cgit v1.2.3-55-g7522 From 9e866e5dab383a295001b13f08e42b906b9dd34d Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sun, 30 Jul 2017 19:58:55 +0200 Subject: net: dsa: lan9303: Renamed indirect phy access functions Preparing for the following fix of MDIO phy access: Renamed functions that access PHY 1 and 2 indirectly through PMI registers. lan9303_port_phy_reg_wait_for_completion() to lan9303_indirect_phy_wait_for_completion() lan9303_port_phy_reg_read() to lan9303_indirect_phy_read() lan9303_port_phy_reg_write() to lan9303_indirect_phy_write() Also changed "val" parameter of lan9303_indirect_phy_write() to u16, for clarity. Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 96ebeb9bd59a..9427c3b0ced2 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -248,7 +248,7 @@ static int lan9303_virt_phy_reg_write(struct lan9303 *chip, int regnum, u16 val) return regmap_write(chip->regmap, LAN9303_VIRT_PHY_BASE + regnum, val); } -static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip) +static int lan9303_indirect_phy_wait_for_completion(struct lan9303 *chip) { int ret, i; u32 reg; @@ -268,7 +268,7 @@ static int lan9303_port_phy_reg_wait_for_completion(struct lan9303 *chip) return -EIO; } -static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) +static int lan9303_indirect_phy_read(struct lan9303 *chip, int addr, int regnum) { int ret; u32 val; @@ -278,7 +278,7 @@ static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) mutex_lock(&chip->indirect_mutex); - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -287,7 +287,7 @@ static int lan9303_port_phy_reg_read(struct lan9303 *chip, int addr, int regnum) if (ret) goto on_error; - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -305,8 +305,8 @@ on_error: return ret; } -static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, - unsigned int val) +static int lan9303_indirect_phy_write(struct lan9303 *chip, int addr, + int regnum, u16 val) { int ret; u32 reg; @@ -317,7 +317,7 @@ static int lan9303_phy_reg_write(struct lan9303 *chip, int addr, int regnum, mutex_lock(&chip->indirect_mutex); - ret = lan9303_port_phy_reg_wait_for_completion(chip); + ret = lan9303_indirect_phy_wait_for_completion(chip); if (ret) goto on_error; @@ -435,7 +435,7 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3. * 0xffff is returned on MDIO read with no response. */ - reg = lan9303_port_phy_reg_read(chip, 3, MII_LAN911X_SPECIAL_MODES); + reg = lan9303_indirect_phy_read(chip, 3, MII_LAN911X_SPECIAL_MODES); if (reg < 0) { dev_err(chip->dev, "Failed to detect phy config: %d\n", reg); return reg; @@ -726,7 +726,7 @@ static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) if (phy > phy_base + 2) return -ENODEV; - return lan9303_port_phy_reg_read(chip, phy, regnum); + return lan9303_indirect_phy_read(chip, phy, regnum); } static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, @@ -740,7 +740,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, if (phy > phy_base + 2) return -ENODEV; - return lan9303_phy_reg_write(chip, phy, regnum, val); + return lan9303_indirect_phy_write(chip, phy, regnum, val); } static int lan9303_port_enable(struct dsa_switch *ds, int port, @@ -773,13 +773,13 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, switch (port) { case 1: lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); - lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 1, - MII_BMCR, BMCR_PDOWN); + lan9303_indirect_phy_write(chip, chip->phy_addr_sel_strap + 1, + MII_BMCR, BMCR_PDOWN); break; case 2: lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); - lan9303_phy_reg_write(chip, chip->phy_addr_sel_strap + 2, - MII_BMCR, BMCR_PDOWN); + lan9303_indirect_phy_write(chip, chip->phy_addr_sel_strap + 2, + MII_BMCR, BMCR_PDOWN); break; default: dev_dbg(chip->dev, -- cgit v1.2.3-55-g7522 From 2c3408986c07515abcbad5dc584a33892e3621da Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sun, 30 Jul 2017 19:58:56 +0200 Subject: net: dsa: lan9303: MDIO access phy registers directly Indirect access (PMI) to phy register only work in I2C mode. In MDIO mode phy registers must be accessed directly. Introduced struct lan9303_phy_ops to handle the two modes. Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 20 +++++++++++++------- drivers/net/dsa/lan9303.h | 11 +++++++++++ drivers/net/dsa/lan9303_i2c.c | 2 ++ drivers/net/dsa/lan9303_mdio.c | 21 +++++++++++++++++++++ 4 files changed, 47 insertions(+), 7 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 9427c3b0ced2..8e430d1ee297 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -334,6 +334,12 @@ on_error: return ret; } +const struct lan9303_phy_ops lan9303_indirect_phy_ops = { + .phy_read = lan9303_indirect_phy_read, + .phy_write = lan9303_indirect_phy_write, +}; +EXPORT_SYMBOL_GPL(lan9303_indirect_phy_ops); + static int lan9303_switch_wait_for_completion(struct lan9303 *chip) { int ret, i; @@ -435,7 +441,7 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) * 0x0000, which means 'phy_addr_sel_strap' is 1 and the IDs are 1-2-3. * 0xffff is returned on MDIO read with no response. */ - reg = lan9303_indirect_phy_read(chip, 3, MII_LAN911X_SPECIAL_MODES); + reg = chip->ops->phy_read(chip, 3, MII_LAN911X_SPECIAL_MODES); if (reg < 0) { dev_err(chip->dev, "Failed to detect phy config: %d\n", reg); return reg; @@ -726,7 +732,7 @@ static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) if (phy > phy_base + 2) return -ENODEV; - return lan9303_indirect_phy_read(chip, phy, regnum); + return chip->ops->phy_read(chip, phy, regnum); } static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, @@ -740,7 +746,7 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, if (phy > phy_base + 2) return -ENODEV; - return lan9303_indirect_phy_write(chip, phy, regnum, val); + return chip->ops->phy_write(chip, phy, regnum, val); } static int lan9303_port_enable(struct dsa_switch *ds, int port, @@ -773,13 +779,13 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, switch (port) { case 1: lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); - lan9303_indirect_phy_write(chip, chip->phy_addr_sel_strap + 1, - MII_BMCR, BMCR_PDOWN); + lan9303_phy_write(ds, chip->phy_addr_sel_strap + 1, + MII_BMCR, BMCR_PDOWN); break; case 2: lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); - lan9303_indirect_phy_write(chip, chip->phy_addr_sel_strap + 2, - MII_BMCR, BMCR_PDOWN); + lan9303_phy_write(ds, chip->phy_addr_sel_strap + 2, + MII_BMCR, BMCR_PDOWN); break; default: dev_dbg(chip->dev, diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h index d1512dad2d90..4d8be555ff4d 100644 --- a/drivers/net/dsa/lan9303.h +++ b/drivers/net/dsa/lan9303.h @@ -2,6 +2,15 @@ #include #include +struct lan9303; + +struct lan9303_phy_ops { + /* PHY 1 and 2 access*/ + int (*phy_read)(struct lan9303 *chip, int port, int regnum); + int (*phy_write)(struct lan9303 *chip, int port, + int regnum, u16 val); +}; + struct lan9303 { struct device *dev; struct regmap *regmap; @@ -11,9 +20,11 @@ struct lan9303 { bool phy_addr_sel_strap; struct dsa_switch *ds; struct mutex indirect_mutex; /* protect indexed register access */ + const struct lan9303_phy_ops *ops; }; extern const struct regmap_access_table lan9303_register_set; +extern const struct lan9303_phy_ops lan9303_indirect_phy_ops; int lan9303_probe(struct lan9303 *chip, struct device_node *np); int lan9303_remove(struct lan9303 *chip); diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c index ab3ce0da5071..24ec20f7f444 100644 --- a/drivers/net/dsa/lan9303_i2c.c +++ b/drivers/net/dsa/lan9303_i2c.c @@ -63,6 +63,8 @@ static int lan9303_i2c_probe(struct i2c_client *client, i2c_set_clientdata(client, sw_dev); sw_dev->chip.dev = &client->dev; + sw_dev->chip.ops = &lan9303_indirect_phy_ops; + ret = lan9303_probe(&sw_dev->chip, client->dev.of_node); if (ret != 0) return ret; diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c index 2db7970fc88c..fc16668a487f 100644 --- a/drivers/net/dsa/lan9303_mdio.c +++ b/drivers/net/dsa/lan9303_mdio.c @@ -67,6 +67,25 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) return 0; } +int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg, u16 val) +{ + struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); + + return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val); +} + +int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg) +{ + struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); + + return mdiobus_read_nested(sw_dev->device->bus, phy, reg); +} + +static const struct lan9303_phy_ops lan9303_mdio_phy_ops = { + .phy_read = lan9303_mdio_phy_read, + .phy_write = lan9303_mdio_phy_write, +}; + static const struct regmap_config lan9303_mdio_regmap_config = { .reg_bits = 8, .val_bits = 32, @@ -108,6 +127,8 @@ static int lan9303_mdio_probe(struct mdio_device *mdiodev) dev_set_drvdata(&mdiodev->dev, sw_dev); sw_dev->chip.dev = &mdiodev->dev; + sw_dev->chip.ops = &lan9303_mdio_phy_ops; + ret = lan9303_probe(&sw_dev->chip, mdiodev->dev.of_node); if (ret != 0) return ret; -- cgit v1.2.3-55-g7522 From 9558df3a8251f1c636eaf3d2222b6da2eb5d1086 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:11:06 +0200 Subject: net: fec: Issue error for missing but expected PHY If the PHY is missing but expected, e.g. because of a typ0 in the dt file, it is not possible to open the interface. ip link returns: RTNETLINK answers: No such device It is not very obvious what the problem is. Add a netdev_err() in this case to make it easier to debug the issue. [ 21.409385] fec 2188000.ethernet eth0: Unable to connect to phy RTNETLINK answers: No such device Signed-off-by: Andrew Lunn Acked-by: Fugang Duan Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a6e323f15637..faa36f6ddf29 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1904,8 +1904,10 @@ static int fec_enet_mii_probe(struct net_device *ndev) phy_dev = of_phy_connect(ndev, fep->phy_node, &fec_enet_adjust_link, 0, fep->phy_interface); - if (!phy_dev) + if (!phy_dev) { + netdev_err(ndev, "Unable to connect to phy\n"); return -ENODEV; + } } else { /* check for attached phy */ for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { -- cgit v1.2.3-55-g7522 From fbbeefdd21049fcf9437c809da3828b210577f36 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 19:36:05 +0200 Subject: net: fec: Allow reception of frames bigger than 1522 bytes The FEC Receive Control Register has a 14 bit field indicating the longest frame that may be received. It is being set to 1522. Frames longer than this are discarded, but counted as being in error. When using DSA, frames from the switch has an additional header, either 4 or 8 bytes if a Marvell switch is used. Thus a full MTU frame of 1522 bytes received by the switch on a port becomes 1530 bytes when passed to the host via the FEC interface. Change the maximum receive size to 2048 - 64, where 64 is the maximum rx_alignment applied on the receive buffer for AVB capable FEC cores. Use this value also for the maximum receive buffer size. The driver is already allocating a receive SKB of 2048 bytes, so this change should not have any significant effects. Tested on imx51, imx6, vf610. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index faa36f6ddf29..df09b254553d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -173,10 +173,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #endif /* CONFIG_M5272 */ /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. + * + * 2048 byte skbufs are allocated. However, alignment requirements + * varies between FEC variants. Worst case is 64, so round down by 64. */ -#define PKT_MAXBUF_SIZE 1522 +#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) #define PKT_MINBUF_SIZE 64 -#define PKT_MAXBLR_SIZE 1536 /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) @@ -851,7 +853,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); - writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ if (i) -- cgit v1.2.3-55-g7522 From 64c83d837329531252a1a0f0dfdd4fd607e1d8e9 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 30 Jul 2017 13:24:49 -0400 Subject: net netlink: Add new type NLA_BITFIELD32 Generic bitflags attribute content sent to the kernel by user. With this netlink attr type the user can either set or unset a flag in the kernel. The value is a bitmap that defines the bit values being set The selector is a bitmask that defines which value bit is to be considered. A check is made to ensure the rules that a kernel subsystem always conforms to bitflags the kernel already knows about. i.e if the user tries to set a bit flag that is not understood then the _it will be rejected_. In the most basic form, the user specifies the attribute policy as: [ATTR_GOO] = { .type = NLA_BITFIELD32, .validation_data = &myvalidflags }, where myvalidflags is the bit mask of the flags the kernel understands. If the user _does not_ provide myvalidflags then the attribute will also be rejected. Examples: value = 0x0, and selector = 0x1 implies we are selecting bit 1 and we want to set its value to 0. value = 0x2, and selector = 0x2 implies we are selecting bit 2 and we want to set its value to 1. Suggested-by: Jiri Pirko Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/netlink.h | 16 ++++++++++++++++ include/uapi/linux/netlink.h | 17 +++++++++++++++++ lib/nlattr.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+) diff --git a/include/net/netlink.h b/include/net/netlink.h index ef8e6c3a80a6..82dd298b40c7 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -178,6 +178,7 @@ enum { NLA_S16, NLA_S32, NLA_S64, + NLA_BITFIELD32, __NLA_TYPE_MAX, }; @@ -206,6 +207,7 @@ enum { * NLA_MSECS Leaving the length field zero will verify the * given type fits, using it verifies minimum length * just like "All other" + * NLA_BITFIELD32 A 32-bit bitmap/bitselector attribute * All other Minimum length of attribute payload * * Example: @@ -213,11 +215,13 @@ enum { * [ATTR_FOO] = { .type = NLA_U16 }, * [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ }, * [ATTR_BAZ] = { .len = sizeof(struct mystruct) }, + * [ATTR_GOO] = { .type = NLA_BITFIELD32, .validation_data = &myvalidflags }, * }; */ struct nla_policy { u16 type; u16 len; + void *validation_data; }; /** @@ -1202,6 +1206,18 @@ static inline struct in6_addr nla_get_in6_addr(const struct nlattr *nla) return tmp; } +/** + * nla_get_bitfield32 - return payload of 32 bitfield attribute + * @nla: nla_bitfield32 attribute + */ +static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla) +{ + struct nla_bitfield32 tmp; + + nla_memcpy(&tmp, nla, sizeof(tmp)); + return tmp; +} + /** * nla_memdup - duplicate attribute memory (kmemdup) * @src: netlink attribute to duplicate from diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h index f86127a46cfc..f4fc9c9e123d 100644 --- a/include/uapi/linux/netlink.h +++ b/include/uapi/linux/netlink.h @@ -226,5 +226,22 @@ struct nlattr { #define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1)) #define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr))) +/* Generic 32 bitflags attribute content sent to the kernel. + * + * The value is a bitmap that defines the values being set + * The selector is a bitmask that defines which value is legit + * + * Examples: + * value = 0x0, and selector = 0x1 + * implies we are selecting bit 1 and we want to set its value to 0. + * + * value = 0x2, and selector = 0x2 + * implies we are selecting bit 2 and we want to set its value to 1. + * + */ +struct nla_bitfield32 { + __u32 value; + __u32 selector; +}; #endif /* _UAPI__LINUX_NETLINK_H */ diff --git a/lib/nlattr.c b/lib/nlattr.c index fb52435be42d..ee79b7a3c6b0 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -27,6 +27,30 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { [NLA_S64] = sizeof(s64), }; +static int validate_nla_bitfield32(const struct nlattr *nla, + u32 *valid_flags_allowed) +{ + const struct nla_bitfield32 *bf = nla_data(nla); + u32 *valid_flags_mask = valid_flags_allowed; + + if (!valid_flags_allowed) + return -EINVAL; + + /*disallow invalid bit selector */ + if (bf->selector & ~*valid_flags_mask) + return -EINVAL; + + /*disallow invalid bit values */ + if (bf->value & ~*valid_flags_mask) + return -EINVAL; + + /*disallow valid bit values that are not selected*/ + if (bf->value & ~bf->selector) + return -EINVAL; + + return 0; +} + static int validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy) { @@ -46,6 +70,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype, return -ERANGE; break; + case NLA_BITFIELD32: + if (attrlen != sizeof(struct nla_bitfield32)) + return -ERANGE; + + return validate_nla_bitfield32(nla, pt->validation_data); + case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); -- cgit v1.2.3-55-g7522 From df823b02970172f3e4003063699e333295b9b32d Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 30 Jul 2017 13:24:50 -0400 Subject: net sched actions: Use proper root attribute table for actions Bug fix for an issue which has been around for about a decade. We got away with it because the enumeration was larger than needed. Fixes: 7ba699c604ab ("[NET_SCHED]: Convert actions from rtnetlink to new netlink API") Suggested-by: Jiri Pirko Reviewed-by: Simon Horman Signed-off-by: Jamal Hadi Salim Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/act_api.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f2e9ed34a963..848370e2fcca 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1072,7 +1072,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); - struct nlattr *tca[TCA_ACT_MAX + 1]; + struct nlattr *tca[TCAA_MAX + 1]; u32 portid = skb ? NETLINK_CB(skb).portid : 0; int ret = 0, ovr = 0; @@ -1080,7 +1080,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; - ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL, + ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCAA_MAX, NULL, extack); if (ret < 0) return ret; -- cgit v1.2.3-55-g7522 From 90825b23a887f06f6c05bdde77b200c5fe9b6217 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 30 Jul 2017 13:24:51 -0400 Subject: net sched actions: dump more than TCA_ACT_MAX_PRIO actions per batch When you dump hundreds of thousands of actions, getting only 32 per dump batch even when the socket buffer and memory allocations allow is inefficient. With this change, the user will get as many as possibly fitting within the given constraints available to the kernel. The top level action TLV space is extended. An attribute TCA_ROOT_FLAGS is used to carry flags; flag TCA_FLAG_LARGE_DUMP_ON is set by the user indicating the user is capable of processing these large dumps. Older user space which doesnt set this flag doesnt get the large (than 32) batches. The kernel uses the TCA_ROOT_COUNT attribute to tell the user how many actions are put in a single batch. As such user space app knows how long to iterate (independent of the type of action being dumped) instead of hardcoded maximum of 32 thus maintaining backward compat. Some results dumping 1.5M actions below: first an unpatched tc which doesnt understand these features... prompt$ time -p tc actions ls action gact | grep index | wc -l 1500000 real 1388.43 user 2.07 sys 1386.79 Now lets see a patched tc which sets the correct flags when requesting a dump: prompt$ time -p updatedtc actions ls action gact | grep index | wc -l 1500000 real 178.13 user 2.02 sys 176.96 That is about 8x performance improvement for tc app which sets its receive buffer to about 32K. Signed-off-by: Jamal Hadi Salim Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 22 +++++++++++++++++-- net/sched/act_api.c | 50 +++++++++++++++++++++++++++++++++--------- 2 files changed, 60 insertions(+), 12 deletions(-) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index d148505010a7..bfa80a6164d9 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -683,10 +683,28 @@ struct tcamsg { unsigned char tca__pad1; unsigned short tca__pad2; }; + +enum { + TCA_ROOT_UNSPEC, + TCA_ROOT_TAB, +#define TCA_ACT_TAB TCA_ROOT_TAB +#define TCAA_MAX TCA_ROOT_TAB + TCA_ROOT_FLAGS, + TCA_ROOT_COUNT, + __TCA_ROOT_MAX, +#define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1) +}; + #define TA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct tcamsg)))) #define TA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct tcamsg)) -#define TCA_ACT_TAB 1 /* attr type must be >=1 */ -#define TCAA_MAX 1 +/* tcamsg flags stored in attribute TCA_ROOT_FLAGS + * + * TCA_FLAG_LARGE_DUMP_ON user->kernel to request for larger than TCA_ACT_MAX_PRIO + * actions in a dump. All dump responses will contain the number of actions + * being dumped stored in for user app's consumption in TCA_ROOT_COUNT + * + */ +#define TCA_FLAG_LARGE_DUMP_ON (1 << 0) /* New extended info filters for IFLA_EXT_MASK */ #define RTEXT_FILTER_VF (1 << 0) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 848370e2fcca..d53653a73c4f 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -110,6 +110,7 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb, struct netlink_callback *cb) { int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; + u32 act_flags = cb->args[2]; struct nlattr *nest; spin_lock_bh(&hinfo->lock); @@ -138,14 +139,18 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb, } nla_nest_end(skb, nest); n_i++; - if (n_i >= TCA_ACT_MAX_PRIO) + if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) && + n_i >= TCA_ACT_MAX_PRIO) goto done; } } done: spin_unlock_bh(&hinfo->lock); - if (n_i) + if (n_i) { cb->args[0] += n_i; + if (act_flags & TCA_FLAG_LARGE_DUMP_ON) + cb->args[1] = n_i; + } return n_i; nla_put_failure: @@ -1068,11 +1073,17 @@ static int tcf_action_add(struct net *net, struct nlattr *nla, return tcf_add_notify(net, n, &actions, portid); } +static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; +static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { + [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32, + .validation_data = &tcaa_root_flags_allowed }, +}; + static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); - struct nlattr *tca[TCAA_MAX + 1]; + struct nlattr *tca[TCA_ROOT_MAX + 1]; u32 portid = skb ? NETLINK_CB(skb).portid : 0; int ret = 0, ovr = 0; @@ -1080,7 +1091,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; - ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCAA_MAX, NULL, + ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ROOT_MAX, NULL, extack); if (ret < 0) return ret; @@ -1121,16 +1132,12 @@ replay: return ret; } -static struct nlattr *find_dump_kind(const struct nlmsghdr *n) +static struct nlattr *find_dump_kind(struct nlattr **nla) { struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; - struct nlattr *nla[TCAA_MAX + 1]; struct nlattr *kind; - if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, - NULL, NULL) < 0) - return NULL; tb1 = nla[TCA_ACT_TAB]; if (tb1 == NULL) return NULL; @@ -1157,8 +1164,18 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) struct tc_action_ops *a_o; int ret = 0; struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); - struct nlattr *kind = find_dump_kind(cb->nlh); + struct nlattr *tb[TCA_ROOT_MAX + 1]; + struct nlattr *count_attr = NULL; + struct nlattr *kind = NULL; + struct nla_bitfield32 bf; + u32 act_count = 0; + + ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX, + tcaa_policy, NULL); + if (ret < 0) + return ret; + kind = find_dump_kind(tb); if (kind == NULL) { pr_info("tc_dump_action: action bad kind\n"); return 0; @@ -1168,14 +1185,24 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) if (a_o == NULL) return 0; + cb->args[2] = 0; + if (tb[TCA_ROOT_FLAGS]) { + bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]); + cb->args[2] = bf.value; + } + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t), 0); if (!nlh) goto out_module_put; + t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; + count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); + if (!count_attr) + goto out_module_put; nest = nla_nest_start(skb, TCA_ACT_TAB); if (nest == NULL) @@ -1188,6 +1215,9 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) if (ret > 0) { nla_nest_end(skb, nest); ret = skb->len; + act_count = cb->args[1]; + memcpy(nla_data(count_attr), &act_count, sizeof(u32)); + cb->args[1] = 0; } else nlmsg_trim(skb, b); -- cgit v1.2.3-55-g7522 From e62e484df04964ac947c679ef4f00c54ae5395aa Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Sun, 30 Jul 2017 13:24:52 -0400 Subject: net sched actions: add time filter for action dumping This patch adds support for filtering based on time since last used. When we are dumping a large number of actions it is useful to have the option of filtering based on when the action was last used to reduce the amount of data crossing to user space. With this patch the user space app sets the TCA_ROOT_TIME_DELTA attribute with the value in milliseconds with "time of interest since now". The kernel converts this to jiffies and does the filtering comparison matching entries that have seen activity since then and returns them to user space. Old kernels and old tc continue to work in legacy mode since they dont specify this attribute. Some example (we have 400 actions bound to 400 filters); at installation time. Using updated when tc setting the time of interest to 120 seconds earlier (we see 400 actions): prompt$ hackedtc actions ls action gact since 120000| grep index | wc -l 400 go get some coffee and wait for > 120 seconds and try again: prompt$ hackedtc actions ls action gact since 120000 | grep index | wc -l 0 Lets see a filter bound to one of these actions: .... filter pref 10 u32 filter pref 10 u32 fh 800: ht divisor 1 filter pref 10 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:10 (rule hit 2 success 1) match 7f000002/ffffffff at 12 (success 1 ) action order 1: gact action pass random type none pass val 0 index 23 ref 2 bind 1 installed 1145 sec used 802 sec Action statistics: Sent 84 bytes 1 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 .... that coffee took long, no? It was good. Now lets ping -c 1 127.0.0.2, then run the actions again: prompt$ hackedtc actions ls action gact since 120 | grep index | wc -l 1 More details please: prompt$ hackedtc -s actions ls action gact since 120000 action order 0: gact action pass random type none pass val 0 index 23 ref 2 bind 1 installed 1270 sec used 30 sec Action statistics: Sent 168 bytes 2 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 And the filter? filter pref 10 u32 filter pref 10 u32 fh 800: ht divisor 1 filter pref 10 u32 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1:10 (rule hit 4 success 2) match 7f000002/ffffffff at 12 (success 2 ) action order 1: gact action pass random type none pass val 0 index 23 ref 2 bind 1 installed 1324 sec used 84 sec Action statistics: Sent 168 bytes 2 pkt (dropped 0, overlimits 0 requeues 0) backlog 0b 0p requeues 0 Signed-off-by: Jamal Hadi Salim Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/rtnetlink.h | 1 + net/sched/act_api.c | 21 ++++++++++++++++++++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index bfa80a6164d9..dab7dad9e01a 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -691,6 +691,7 @@ enum { #define TCAA_MAX TCA_ROOT_TAB TCA_ROOT_FLAGS, TCA_ROOT_COUNT, + TCA_ROOT_TIME_DELTA, /* in msecs */ __TCA_ROOT_MAX, #define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1) }; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index d53653a73c4f..f19b118df414 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -111,6 +111,7 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb, { int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; u32 act_flags = cb->args[2]; + unsigned long jiffy_since = cb->args[3]; struct nlattr *nest; spin_lock_bh(&hinfo->lock); @@ -128,6 +129,11 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb, if (index < s_i) continue; + if (jiffy_since && + time_after(jiffy_since, + (unsigned long)p->tcfa_tm.lastuse)) + continue; + nest = nla_nest_start(skb, n_i); if (nest == NULL) goto nla_put_failure; @@ -145,9 +151,11 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb, } } done: + if (index >= 0) + cb->args[0] = index + 1; + spin_unlock_bh(&hinfo->lock); if (n_i) { - cb->args[0] += n_i; if (act_flags & TCA_FLAG_LARGE_DUMP_ON) cb->args[1] = n_i; } @@ -1077,6 +1085,7 @@ static u32 tcaa_root_flags_allowed = TCA_FLAG_LARGE_DUMP_ON; static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = { [TCA_ROOT_FLAGS] = { .type = NLA_BITFIELD32, .validation_data = &tcaa_root_flags_allowed }, + [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 }, }; static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, @@ -1166,8 +1175,10 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); struct nlattr *tb[TCA_ROOT_MAX + 1]; struct nlattr *count_attr = NULL; + unsigned long jiffy_since = 0; struct nlattr *kind = NULL; struct nla_bitfield32 bf; + u32 msecs_since = 0; u32 act_count = 0; ret = nlmsg_parse(cb->nlh, sizeof(struct tcamsg), tb, TCA_ROOT_MAX, @@ -1191,15 +1202,23 @@ static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) cb->args[2] = bf.value; } + if (tb[TCA_ROOT_TIME_DELTA]) { + msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]); + } + nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb->nlh->nlmsg_type, sizeof(*t), 0); if (!nlh) goto out_module_put; + if (msecs_since) + jiffy_since = jiffies - msecs_to_jiffies(msecs_since); + t = nlmsg_data(nlh); t->tca_family = AF_UNSPEC; t->tca__pad1 = 0; t->tca__pad2 = 0; + cb->args[3] = jiffy_since; count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32)); if (!count_attr) goto out_module_put; -- cgit v1.2.3-55-g7522 From fb776481c4ffd9bf8d4dc091ea66c3a93bdfcb35 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Sat, 29 Jul 2017 19:32:31 +0200 Subject: Bluetooth: hci_uart: Fix uninitialized alignment value Force alignment value to the default one (1 byte) if uninitialized. This fixes hci_ll serdev driver (alignment = 0) and avoid any further issues with upcoming drivers. Signed-off-by: Loic Poulain Signed-off-by: Johan Hedberg --- drivers/bluetooth/hci_h4.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 4e328d7d47bb..3b82a87224a9 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -172,7 +172,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, const struct h4_recv_pkt *pkts, int pkts_count) { struct hci_uart *hu = hci_get_drvdata(hdev); - u8 alignment = hu->alignment; + u8 alignment = hu->alignment ? hu->alignment : 1; while (count) { int i, len; -- cgit v1.2.3-55-g7522 From f347ec852c7a83e1803192d2c1fce4e42e0715a5 Mon Sep 17 00:00:00 2001 From: Pablo M. Bermudo Garay Date: Fri, 21 Jul 2017 01:54:37 +0200 Subject: netfilter: nf_tables: fib: use skb_header_pointer This is a preparatory patch for adding fib support to the netdev family. The netdev family receives the packets from ingress hook. At this point we have no guarantee that the ip header is linear. So this patch replaces ip_hdr with skb_header_pointer in order to address that possible situation. Signed-off-by: Pablo M. Bermudo Garay Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/nft_fib_ipv4.c | 20 ++++++++++++++++---- net/ipv6/netfilter/nft_fib_ipv6.c | 29 +++++++++++++++++++++++------ 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index de3681df2ce7..e50976e3c213 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c @@ -32,9 +32,10 @@ void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); + int noff = skb_network_offset(pkt->skb); u32 *dst = ®s->data[priv->dreg]; const struct net_device *dev = NULL; - const struct iphdr *iph; + struct iphdr *iph, _iph; __be32 addr; if (priv->flags & NFTA_FIB_F_IIF) @@ -42,7 +43,12 @@ void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs, else if (priv->flags & NFTA_FIB_F_OIF) dev = nft_out(pkt); - iph = ip_hdr(pkt->skb); + iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); + if (!iph) { + regs->verdict.code = NFT_BREAK; + return; + } + if (priv->flags & NFTA_FIB_F_DADDR) addr = iph->daddr; else @@ -61,8 +67,9 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); + int noff = skb_network_offset(pkt->skb); u32 *dest = ®s->data[priv->dreg]; - const struct iphdr *iph; + struct iphdr *iph, _iph; struct fib_result res; struct flowi4 fl4 = { .flowi4_scope = RT_SCOPE_UNIVERSE, @@ -95,7 +102,12 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, return; } - iph = ip_hdr(pkt->skb); + iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); + if (!iph) { + regs->verdict.code = NFT_BREAK; + return; + } + if (ipv4_is_zeronet(iph->saddr)) { if (ipv4_is_lbcast(iph->daddr) || ipv4_is_local_multicast(iph->daddr)) { diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 43f91d9b086c..54b5899543ef 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c @@ -25,9 +25,9 @@ static int get_ifindex(const struct net_device *dev) static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv, const struct nft_pktinfo *pkt, - const struct net_device *dev) + const struct net_device *dev, + struct ipv6hdr *iph) { - const struct ipv6hdr *iph = ipv6_hdr(pkt->skb); int lookup_flags = 0; if (priv->flags & NFTA_FIB_F_DADDR) { @@ -55,7 +55,8 @@ static int nft_fib6_flowi_init(struct flowi6 *fl6, const struct nft_fib *priv, } static u32 __nft_fib6_eval_type(const struct nft_fib *priv, - const struct nft_pktinfo *pkt) + const struct nft_pktinfo *pkt, + struct ipv6hdr *iph) { const struct net_device *dev = NULL; const struct nf_ipv6_ops *v6ops; @@ -77,7 +78,7 @@ static u32 __nft_fib6_eval_type(const struct nft_fib *priv, else if (priv->flags & NFTA_FIB_F_OIF) dev = nft_out(pkt); - nft_fib6_flowi_init(&fl6, priv, pkt, dev); + nft_fib6_flowi_init(&fl6, priv, pkt, dev, iph); v6ops = nf_get_ipv6_ops(); if (dev && v6ops && v6ops->chk_addr(nft_net(pkt), &fl6.daddr, dev, true)) @@ -131,9 +132,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); + int noff = skb_network_offset(pkt->skb); u32 *dest = ®s->data[priv->dreg]; + struct ipv6hdr *iph, _iph; - *dest = __nft_fib6_eval_type(priv, pkt); + iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); + if (!iph) { + regs->verdict.code = NFT_BREAK; + return; + } + + *dest = __nft_fib6_eval_type(priv, pkt, iph); } EXPORT_SYMBOL_GPL(nft_fib6_eval_type); @@ -141,8 +150,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_fib *priv = nft_expr_priv(expr); + int noff = skb_network_offset(pkt->skb); const struct net_device *oif = NULL; u32 *dest = ®s->data[priv->dreg]; + struct ipv6hdr *iph, _iph; struct flowi6 fl6 = { .flowi6_iif = LOOPBACK_IFINDEX, .flowi6_proto = pkt->tprot, @@ -155,7 +166,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, else if (priv->flags & NFTA_FIB_F_OIF) oif = nft_out(pkt); - lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif); + iph = skb_header_pointer(pkt->skb, noff, sizeof(_iph), &_iph); + if (!iph) { + regs->verdict.code = NFT_BREAK; + return; + } + + lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph); if (nft_hook(pkt) == NF_INET_PRE_ROUTING && nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { -- cgit v1.2.3-55-g7522 From 6392c226037c2b90d3062126c65fc354e47156f7 Mon Sep 17 00:00:00 2001 From: Pablo M. Bermudo Garay Date: Fri, 21 Jul 2017 01:54:38 +0200 Subject: netfilter: nf_tables: add fib expression to the netdev family Add fib expression support for netdev family. Like inet family, netdev delegates the actual decision to the corresponding backend, either ipv4 or ipv6. This allows to perform very early reverse path filtering, among other things. You can find more information about fib expression in the f6d0cbcf09c5 ("") commit message. Signed-off-by: Pablo M. Bermudo Garay Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 9 +++++ net/netfilter/Makefile | 1 + net/netfilter/nft_fib_netdev.c | 87 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 97 insertions(+) create mode 100644 net/netfilter/nft_fib_netdev.c diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 9b28864cc36a..e4a13cc8a2e7 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -636,6 +636,15 @@ config NFT_FWD_NETDEV help This option enables packet forwarding for the "netdev" family. +config NFT_FIB_NETDEV + depends on NFT_FIB_IPV4 + depends on NFT_FIB_IPV6 + tristate "Netfilter nf_tables netdev fib lookups support" + help + This option allows using the FIB expression from the netdev table. + The lookup will be delegated to the IPv4 or IPv6 FIB depending + on the protocol of the packet. + endif # NF_TABLES_NETDEV endif # NF_TABLES diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 913380919301..d3891c93edd6 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -100,6 +100,7 @@ obj-$(CONFIG_NFT_REDIR) += nft_redir.o obj-$(CONFIG_NFT_HASH) += nft_hash.o obj-$(CONFIG_NFT_FIB) += nft_fib.o obj-$(CONFIG_NFT_FIB_INET) += nft_fib_inet.o +obj-$(CONFIG_NFT_FIB_NETDEV) += nft_fib_netdev.o # nf_tables netdev obj-$(CONFIG_NFT_DUP_NETDEV) += nft_dup_netdev.o diff --git a/net/netfilter/nft_fib_netdev.c b/net/netfilter/nft_fib_netdev.c new file mode 100644 index 000000000000..3997ee36cfbd --- /dev/null +++ b/net/netfilter/nft_fib_netdev.c @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2017 Pablo M. Bermudo Garay + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This code is based on net/netfilter/nft_fib_inet.c, written by + * Florian Westphal . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static void nft_fib_netdev_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_fib *priv = nft_expr_priv(expr); + + switch (ntohs(pkt->skb->protocol)) { + case ETH_P_IP: + switch (priv->result) { + case NFT_FIB_RESULT_OIF: + case NFT_FIB_RESULT_OIFNAME: + return nft_fib4_eval(expr, regs, pkt); + case NFT_FIB_RESULT_ADDRTYPE: + return nft_fib4_eval_type(expr, regs, pkt); + } + break; + case ETH_P_IPV6: + switch (priv->result) { + case NFT_FIB_RESULT_OIF: + case NFT_FIB_RESULT_OIFNAME: + return nft_fib6_eval(expr, regs, pkt); + case NFT_FIB_RESULT_ADDRTYPE: + return nft_fib6_eval_type(expr, regs, pkt); + } + break; + } + + regs->verdict.code = NFT_BREAK; +} + +static struct nft_expr_type nft_fib_netdev_type; +static const struct nft_expr_ops nft_fib_netdev_ops = { + .type = &nft_fib_netdev_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_fib)), + .eval = nft_fib_netdev_eval, + .init = nft_fib_init, + .dump = nft_fib_dump, + .validate = nft_fib_validate, +}; + +static struct nft_expr_type nft_fib_netdev_type __read_mostly = { + .family = NFPROTO_NETDEV, + .name = "fib", + .ops = &nft_fib_netdev_ops, + .policy = nft_fib_policy, + .maxattr = NFTA_FIB_MAX, + .owner = THIS_MODULE, +}; + +static int __init nft_fib_netdev_module_init(void) +{ + return nft_register_expr(&nft_fib_netdev_type); +} + +static void __exit nft_fib_netdev_module_exit(void) +{ + nft_unregister_expr(&nft_fib_netdev_type); +} + +module_init(nft_fib_netdev_module_init); +module_exit(nft_fib_netdev_module_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Pablo M. Bermudo Garay "); +MODULE_ALIAS_NFT_AF_EXPR(5, "fib"); -- cgit v1.2.3-55-g7522 From a232cd0e0cf1ede77b047bc0c98142cd51f318e3 Mon Sep 17 00:00:00 2001 From: subashab@codeaurora.org Date: Thu, 20 Jul 2017 19:42:19 -0600 Subject: netfilter: conntrack: Change to deferable work queue Delayed workqueue causes wakeups to idle CPUs. This was causing a power impact for devices. Use deferable work queue instead so that gc_worker runs when CPU is active only. Signed-off-by: Subash Abhinov Kasiviswanathan Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 69746928cc0a..c6f1cf0bff56 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1081,7 +1081,7 @@ static void gc_worker(struct work_struct *work) static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) { - INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); + INIT_DEFERRABLE_WORK(&gc_work->dwork, gc_worker); gc_work->next_gc_run = HZ; gc_work->exiting = false; } -- cgit v1.2.3-55-g7522 From ac7b848390036dadd4351899d2a23748075916bd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 26 Jul 2017 00:02:31 +0200 Subject: netfilter: expect: add and use nf_ct_expect_iterate helpers We have several spots that open-code a expect walk, add a helper that is similar to nf_ct_iterate_destroy/nf_ct_iterate_cleanup. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_expect.h | 5 +++ net/netfilter/nf_conntrack_expect.c | 54 +++++++++++++++++++++++++ net/netfilter/nf_conntrack_helper.c | 34 +++++++--------- net/netfilter/nf_conntrack_netlink.c | 63 ++++++++++------------------- 4 files changed, 95 insertions(+), 61 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index 2ba54feaccd8..818def011110 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -107,6 +107,11 @@ void nf_ct_remove_expectations(struct nf_conn *ct); void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); bool nf_ct_remove_expect(struct nf_conntrack_expect *exp); +void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), void *data); +void nf_ct_expect_iterate_net(struct net *net, + bool (*iter)(struct nf_conntrack_expect *e, void *data), + void *data, u32 portid, int report); + /* Allocate space for an expectation: this is mandatory before calling nf_ct_expect_related. You will have to call put afterwards. */ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 2c63808bea96..dad2c0c22ad5 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -474,6 +474,60 @@ out: } EXPORT_SYMBOL_GPL(nf_ct_expect_related_report); +void nf_ct_expect_iterate_destroy(bool (*iter)(struct nf_conntrack_expect *e, void *data), + void *data) +{ + struct nf_conntrack_expect *exp; + const struct hlist_node *next; + unsigned int i; + + spin_lock_bh(&nf_conntrack_expect_lock); + + for (i = 0; i < nf_ct_expect_hsize; i++) { + hlist_for_each_entry_safe(exp, next, + &nf_ct_expect_hash[i], + hnode) { + if (iter(exp, data) && del_timer(&exp->timeout)) { + nf_ct_unlink_expect(exp); + nf_ct_expect_put(exp); + } + } + } + + spin_unlock_bh(&nf_conntrack_expect_lock); +} +EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_destroy); + +void nf_ct_expect_iterate_net(struct net *net, + bool (*iter)(struct nf_conntrack_expect *e, void *data), + void *data, + u32 portid, int report) +{ + struct nf_conntrack_expect *exp; + const struct hlist_node *next; + unsigned int i; + + spin_lock_bh(&nf_conntrack_expect_lock); + + for (i = 0; i < nf_ct_expect_hsize; i++) { + hlist_for_each_entry_safe(exp, next, + &nf_ct_expect_hash[i], + hnode) { + + if (!net_eq(nf_ct_exp_net(exp), net)) + continue; + + if (iter(exp, data) && del_timer(&exp->timeout)) { + nf_ct_unlink_expect_report(exp, portid, report); + nf_ct_expect_put(exp); + } + } + } + + spin_unlock_bh(&nf_conntrack_expect_lock); +} +EXPORT_SYMBOL_GPL(nf_ct_expect_iterate_net); + #ifdef CONFIG_NF_CONNTRACK_PROCFS struct ct_expect_iter_state { struct seq_net_private p; diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 9129bb3b5153..551a1eddf0fa 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -437,12 +437,22 @@ out: } EXPORT_SYMBOL_GPL(nf_conntrack_helper_register); -void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) +static bool expect_iter_me(struct nf_conntrack_expect *exp, void *data) { - struct nf_conntrack_expect *exp; - const struct hlist_node *next; - unsigned int i; + struct nf_conn_help *help = nfct_help(exp->master); + const struct nf_conntrack_helper *me = data; + const struct nf_conntrack_helper *this; + + if (exp->helper == me) + return true; + this = rcu_dereference_protected(help->helper, + lockdep_is_held(&nf_conntrack_expect_lock)); + return this == me; +} + +void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) +{ mutex_lock(&nf_ct_helper_mutex); hlist_del_rcu(&me->hnode); nf_ct_helper_count--; @@ -453,21 +463,7 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me) */ synchronize_rcu(); - /* Get rid of expectations */ - spin_lock_bh(&nf_conntrack_expect_lock); - for (i = 0; i < nf_ct_expect_hsize; i++) { - hlist_for_each_entry_safe(exp, next, - &nf_ct_expect_hash[i], hnode) { - struct nf_conn_help *help = nfct_help(exp->master); - if ((rcu_dereference_protected( - help->helper, - lockdep_is_held(&nf_conntrack_expect_lock) - ) == me || exp->helper == me)) - nf_ct_remove_expect(exp); - } - } - spin_unlock_bh(&nf_conntrack_expect_lock); - + nf_ct_expect_iterate_destroy(expect_iter_me, NULL); nf_ct_iterate_destroy(unhelp, me); } EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 4dba71de4de7..4922c8aefb2a 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -2898,6 +2898,21 @@ out: return err == -EAGAIN ? -ENOBUFS : err; } +static bool expect_iter_name(struct nf_conntrack_expect *exp, void *data) +{ + const struct nf_conn_help *m_help; + const char *name = data; + + m_help = nfct_help(exp->master); + + return strcmp(m_help->helper->name, name) == 0; +} + +static bool expect_iter_all(struct nf_conntrack_expect *exp, void *data) +{ + return true; +} + static int ctnetlink_del_expect(struct net *net, struct sock *ctnl, struct sk_buff *skb, const struct nlmsghdr *nlh, const struct nlattr * const cda[], @@ -2906,10 +2921,8 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl, struct nf_conntrack_expect *exp; struct nf_conntrack_tuple tuple; struct nfgenmsg *nfmsg = nlmsg_data(nlh); - struct hlist_node *next; u_int8_t u3 = nfmsg->nfgen_family; struct nf_conntrack_zone zone; - unsigned int i; int err; if (cda[CTA_EXPECT_TUPLE]) { @@ -2949,49 +2962,15 @@ static int ctnetlink_del_expect(struct net *net, struct sock *ctnl, nf_ct_expect_put(exp); } else if (cda[CTA_EXPECT_HELP_NAME]) { char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]); - struct nf_conn_help *m_help; - /* delete all expectations for this helper */ - spin_lock_bh(&nf_conntrack_expect_lock); - for (i = 0; i < nf_ct_expect_hsize; i++) { - hlist_for_each_entry_safe(exp, next, - &nf_ct_expect_hash[i], - hnode) { - - if (!net_eq(nf_ct_exp_net(exp), net)) - continue; - - m_help = nfct_help(exp->master); - if (!strcmp(m_help->helper->name, name) && - del_timer(&exp->timeout)) { - nf_ct_unlink_expect_report(exp, - NETLINK_CB(skb).portid, - nlmsg_report(nlh)); - nf_ct_expect_put(exp); - } - } - } - spin_unlock_bh(&nf_conntrack_expect_lock); + nf_ct_expect_iterate_net(net, expect_iter_name, name, + NETLINK_CB(skb).portid, + nlmsg_report(nlh)); } else { /* This basically means we have to flush everything*/ - spin_lock_bh(&nf_conntrack_expect_lock); - for (i = 0; i < nf_ct_expect_hsize; i++) { - hlist_for_each_entry_safe(exp, next, - &nf_ct_expect_hash[i], - hnode) { - - if (!net_eq(nf_ct_exp_net(exp), net)) - continue; - - if (del_timer(&exp->timeout)) { - nf_ct_unlink_expect_report(exp, - NETLINK_CB(skb).portid, - nlmsg_report(nlh)); - nf_ct_expect_put(exp); - } - } - } - spin_unlock_bh(&nf_conntrack_expect_lock); + nf_ct_expect_iterate_net(net, expect_iter_all, NULL, + NETLINK_CB(skb).portid, + nlmsg_report(nlh)); } return 0; -- cgit v1.2.3-55-g7522 From 84657984c26fd0b64743a397f3a1a587fa4b575a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 26 Jul 2017 00:02:32 +0200 Subject: netfilter: add and use nf_ct_unconfirmed_destroy This also removes __nf_ct_unconfirmed_destroy() call from nf_ct_iterate_cleanup_net, so that function can be used only when missing conntracks from unconfirmed list isn't a problem. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack.h | 3 +++ net/netfilter/nf_conntrack_core.c | 15 +++++++++++---- net/netfilter/nfnetlink_cttimeout.c | 1 + 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index 48407569585d..6e6f678aaac7 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -224,6 +224,9 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, enum ip_conntrack_dir dir, u32 seq); +/* Set all unconfirmed conntrack as dying */ +void nf_ct_unconfirmed_destroy(struct net *); + /* Iterate over all conntracks: if iter returns true, it's deleted. */ void nf_ct_iterate_cleanup_net(struct net *net, int (*iter)(struct nf_conn *i, void *data), diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index c6f1cf0bff56..80ab4e937765 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1686,6 +1686,17 @@ __nf_ct_unconfirmed_destroy(struct net *net) } } +void nf_ct_unconfirmed_destroy(struct net *net) +{ + might_sleep(); + + if (atomic_read(&net->ct.count) > 0) { + __nf_ct_unconfirmed_destroy(net); + synchronize_net(); + } +} +EXPORT_SYMBOL_GPL(nf_ct_unconfirmed_destroy); + void nf_ct_iterate_cleanup_net(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, u32 portid, int report) @@ -1697,14 +1708,10 @@ void nf_ct_iterate_cleanup_net(struct net *net, if (atomic_read(&net->ct.count) == 0) return; - __nf_ct_unconfirmed_destroy(net); - d.iter = iter; d.data = data; d.net = net; - synchronize_net(); - nf_ct_iterate_cleanup(iter_net_only, &d, portid, report); } EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup_net); diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 7ce9e86d374c..f4fb6d4dd0b9 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -570,6 +570,7 @@ static void __net_exit cttimeout_net_exit(struct net *net) { struct ctnl_timeout *cur, *tmp; + nf_ct_unconfirmed_destroy(net); ctnl_untimeout(net, NULL); list_for_each_entry_safe(cur, tmp, &net->nfct_timeout_list, head) { -- cgit v1.2.3-55-g7522 From e2a750070aeec7af3818065b39d61cb38627ce64 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 26 Jul 2017 00:02:33 +0200 Subject: netfilter: conntrack: destroy functions need to free queued packets queued skbs might be using conntrack extensions that are being removed, such as timeout. This happens for skbs that have a skb->nfct in unconfirmed state (i.e., not in hash table yet). This is destructive, but there are only two use cases: - module removal (rare) - netns cleanup (most likely no conntracks exist, and if they do, they are removed anyway later on). Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 4 ++++ net/netfilter/nf_queue.c | 1 + 2 files changed, 5 insertions(+) diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 80ab4e937765..2bc499186186 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -56,6 +56,8 @@ #include #include +#include "nf_internals.h" + #define NF_CONNTRACK_VERSION "0.5.0" int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct, @@ -1692,6 +1694,7 @@ void nf_ct_unconfirmed_destroy(struct net *net) if (atomic_read(&net->ct.count) > 0) { __nf_ct_unconfirmed_destroy(net); + nf_queue_nf_hook_drop(net); synchronize_net(); } } @@ -1737,6 +1740,7 @@ nf_ct_iterate_destroy(int (*iter)(struct nf_conn *i, void *data), void *data) if (atomic_read(&net->ct.count) == 0) continue; __nf_ct_unconfirmed_destroy(net); + nf_queue_nf_hook_drop(net); } rtnl_unlock(); diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 043850c9d154..4f4d80a58fb5 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -109,6 +109,7 @@ unsigned int nf_queue_nf_hook_drop(struct net *net) return count; } +EXPORT_SYMBOL_GPL(nf_queue_nf_hook_drop); static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, struct nf_hook_entry *hook_entry, unsigned int queuenum) -- cgit v1.2.3-55-g7522 From 5da773a3e81e6093c4346ee8cd356fc214d7c76c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 26 Jul 2017 00:02:34 +0200 Subject: netfilter: nfnetlink_queue: don't queue dying conntracks to userspace When skb is queued to userspace it leaves softirq/rcu protection. skb->nfct (via conntrack extensions such as helper) could then reference modules that no longer exist if the conntrack was not yet confirmed. nf_ct_iterate_destroy() will set the DYING bit for unconfirmed conntracks, we therefore solve this race as follows: 1. take the queue spinlock. 2. check if the conntrack is unconfirmed and has dying bit set. In this case, we must discard skb while we're still inside rcu read-side section. 3. If nf_ct_iterate_destroy() is called right after the packet is queued to userspace, it will be removed from the queue via nf_ct_iterate_destroy -> nf_queue_nf_hook_drop. When userspace sends the verdict (nfnetlink takes rcu read lock), there are two cases to consider: 1. nf_ct_iterate_destroy() was called while packet was out. In this case, skb will have been removed from the queue already and no reinject takes place as we won't find a matching entry for the packet id. 2. nf_ct_iterate_destroy() gets called right after verdict callback found and removed the skb from queue list. In this case, skb->nfct is marked as dying but it is still valid. The skb will be dropped either in nf_conntrack_confirm (we don't insert DYING conntracks into hash table) or when we try to queue the skb again, but either events don't occur before the rcu read lock is dropped. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nfnetlink_queue.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7c543bfbf624..c9796629858f 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -41,6 +41,10 @@ #include "../bridge/br_private.h" #endif +#if IS_ENABLED(CONFIG_NF_CONNTRACK) +#include +#endif + #define NFQNL_QMAX_DEFAULT 1024 /* We're using struct nlattr which has 16bit nla_len. Note that nla_len @@ -612,6 +616,18 @@ nlmsg_failure: return NULL; } +static bool nf_ct_drop_unconfirmed(const struct nf_queue_entry *entry) +{ +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + static const unsigned long flags = IPS_CONFIRMED | IPS_DYING; + const struct nf_conn *ct = (void *)skb_nfct(entry->skb); + + if (ct && ((ct->status & flags) == IPS_DYING)) + return true; +#endif + return false; +} + static int __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, struct nf_queue_entry *entry) @@ -628,6 +644,9 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, } spin_lock_bh(&queue->lock); + if (nf_ct_drop_unconfirmed(entry)) + goto err_out_free_nskb; + if (queue->queue_total >= queue->queue_maxlen) { if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { failopen = 1; -- cgit v1.2.3-55-g7522 From 591bb2789bc2a93f379b13d277f441f1b427102d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 26 Jul 2017 11:40:52 +0200 Subject: netfilter: nf_hook_ops structs can be const We no longer place these on a list so they can be const. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- drivers/net/ipvlan/ipvlan_main.c | 2 +- net/bridge/br_netfilter_hooks.c | 2 +- net/bridge/netfilter/ebtable_filter.c | 2 +- net/bridge/netfilter/ebtable_nat.c | 2 +- net/decnet/netfilter/dn_rtmsg.c | 2 +- net/ipv4/netfilter/ipt_CLUSTERIP.c | 2 +- net/ipv4/netfilter/ipt_SYNPROXY.c | 2 +- net/ipv4/netfilter/iptable_nat.c | 2 +- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 2 +- net/ipv4/netfilter/nf_defrag_ipv4.c | 2 +- net/ipv6/ila/ila_xlat.c | 2 +- net/ipv6/netfilter/ip6t_SYNPROXY.c | 2 +- net/ipv6/netfilter/ip6table_nat.c | 2 +- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 2 +- net/ipv6/netfilter/nf_defrag_ipv6_hooks.c | 2 +- net/netfilter/ipvs/ip_vs_core.c | 2 +- security/selinux/hooks.c | 2 +- security/smack/smack_netfilter.c | 2 +- 18 files changed, 18 insertions(+), 18 deletions(-) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index fdde20735416..943e6907dc19 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -15,7 +15,7 @@ struct ipvlan_netns { unsigned int ipvl_nf_hook_refcnt; }; -static struct nf_hook_ops ipvl_nfops[] __read_mostly = { +static const struct nf_hook_ops ipvl_nfops[] = { { .hook = ipvlan_nf_input, .pf = NFPROTO_IPV4, diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 2261e5194c82..626f4b2cef16 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -887,7 +887,7 @@ EXPORT_SYMBOL_GPL(br_netfilter_enable); /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because * br_dev_queue_push_xmit is called afterwards */ -static struct nf_hook_ops br_nf_ops[] __read_mostly = { +static const struct nf_hook_ops br_nf_ops[] = { { .hook = br_nf_pre_routing, .pf = NFPROTO_BRIDGE, diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index f22ef7c21913..45a00dbdbcad 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c @@ -70,7 +70,7 @@ ebt_out_hook(void *priv, struct sk_buff *skb, return ebt_do_table(skb, state, state->net->xt.frame_filter); } -static struct nf_hook_ops ebt_ops_filter[] __read_mostly = { +static const struct nf_hook_ops ebt_ops_filter[] = { { .hook = ebt_in_hook, .pf = NFPROTO_BRIDGE, diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 2f7a4f314406..4ecf50662b7d 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c @@ -70,7 +70,7 @@ ebt_nat_out(void *priv, struct sk_buff *skb, return ebt_do_table(skb, state, state->net->xt.frame_nat); } -static struct nf_hook_ops ebt_ops_nat[] __read_mostly = { +static const struct nf_hook_ops ebt_ops_nat[] = { { .hook = ebt_nat_out, .pf = NFPROTO_BRIDGE, diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index aa8ffecc46a4..ab395e55cd78 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c @@ -115,7 +115,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) RCV_SKB_FAIL(-EINVAL); } -static struct nf_hook_ops dnrmg_ops __read_mostly = { +static const struct nf_hook_ops dnrmg_ops = { .hook = dnrmg_hook, .pf = NFPROTO_DECNET, .hooknum = NF_DN_ROUTE, diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 7d72decb80f9..6637e8b37ee2 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c @@ -624,7 +624,7 @@ arp_mangle(void *priv, return NF_ACCEPT; } -static struct nf_hook_ops cip_arp_ops __read_mostly = { +static const struct nf_hook_ops cip_arp_ops = { .hook = arp_mangle, .pf = NFPROTO_ARP, .hooknum = NF_ARP_OUT, diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index f1528f7175a8..811689e523c3 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c @@ -416,7 +416,7 @@ static unsigned int ipv4_synproxy_hook(void *priv, return NF_ACCEPT; } -static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = { +static const struct nf_hook_ops ipv4_synproxy_ops[] = { { .hook = ipv4_synproxy_hook, .pf = NFPROTO_IPV4, diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c index 138a24bc76ad..a1a07b338ccf 100644 --- a/net/ipv4/netfilter/iptable_nat.c +++ b/net/ipv4/netfilter/iptable_nat.c @@ -67,7 +67,7 @@ static unsigned int iptable_nat_ipv4_local_fn(void *priv, return nf_nat_ipv4_local_fn(priv, skb, state, iptable_nat_do_chain); } -static struct nf_hook_ops nf_nat_ipv4_ops[] __read_mostly = { +static const struct nf_hook_ops nf_nat_ipv4_ops[] = { /* Before packet filtering, change destination */ { .hook = iptable_nat_ipv4_in, diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 2e14ed11a35c..63e4ea0e01f8 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -174,7 +174,7 @@ static unsigned int ipv4_conntrack_local(void *priv, /* Connection tracking may drop packets, but never alters them, so make it the first hook. */ -static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = { +static const struct nf_hook_ops ipv4_conntrack_ops[] = { { .hook = ipv4_conntrack_in, .pf = NFPROTO_IPV4, diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 346bf7ccac08..37fe1616ca0b 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -90,7 +90,7 @@ static unsigned int ipv4_conntrack_defrag(void *priv, return NF_ACCEPT; } -static struct nf_hook_ops ipv4_defrag_ops[] = { +static const struct nf_hook_ops ipv4_defrag_ops[] = { { .hook = ipv4_conntrack_defrag, .pf = NFPROTO_IPV4, diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 77f7f8c7d93d..5bd419c1abc8 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -208,7 +208,7 @@ ila_nf_input(void *priv, return NF_ACCEPT; } -static struct nf_hook_ops ila_nf_hook_ops[] __read_mostly = { +static const struct nf_hook_ops ila_nf_hook_ops[] = { { .hook = ila_nf_input, .pf = NFPROTO_IPV6, diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index ce203dd729e0..a5cd43d75393 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c @@ -438,7 +438,7 @@ static unsigned int ipv6_synproxy_hook(void *priv, return NF_ACCEPT; } -static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = { +static const struct nf_hook_ops ipv6_synproxy_ops[] = { { .hook = ipv6_synproxy_hook, .pf = NFPROTO_IPV6, diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c index 7d2bd940291f..991512576c8c 100644 --- a/net/ipv6/netfilter/ip6table_nat.c +++ b/net/ipv6/netfilter/ip6table_nat.c @@ -69,7 +69,7 @@ static unsigned int ip6table_nat_local_fn(void *priv, return nf_nat_ipv6_local_fn(priv, skb, state, ip6table_nat_do_chain); } -static struct nf_hook_ops nf_nat_ipv6_ops[] __read_mostly = { +static const struct nf_hook_ops nf_nat_ipv6_ops[] = { /* Before packet filtering, change destination */ { .hook = ip6table_nat_in, diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 4e3402486833..f2d2f4a9294b 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -191,7 +191,7 @@ static unsigned int ipv6_conntrack_local(void *priv, return nf_conntrack_in(state->net, PF_INET6, state->hook, skb); } -static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = { +static const struct nf_hook_ops ipv6_conntrack_ops[] = { { .hook = ipv6_conntrack_in, .pf = NFPROTO_IPV6, diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c index ada60d1a991b..b326da59257f 100644 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c @@ -74,7 +74,7 @@ static unsigned int ipv6_defrag(void *priv, return err == 0 ? NF_ACCEPT : NF_DROP; } -static struct nf_hook_ops ipv6_defrag_ops[] = { +static const struct nf_hook_ops ipv6_defrag_ops[] = { { .hook = ipv6_defrag, .pf = NFPROTO_IPV6, diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 2ff9d9070c95..5cb7cac9177d 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -2101,7 +2101,7 @@ ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb, #endif -static struct nf_hook_ops ip_vs_ops[] __read_mostly = { +static const struct nf_hook_ops ip_vs_ops[] = { /* After packet filtering, change source only for VS/NAT */ { .hook = ip_vs_reply4, diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 33fd061305c4..2f2e1338cd3d 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -6530,7 +6530,7 @@ security_initcall(selinux_init); #if defined(CONFIG_NETFILTER) -static struct nf_hook_ops selinux_nf_ops[] = { +static const struct nf_hook_ops selinux_nf_ops[] = { { .hook = selinux_ipv4_postroute, .pf = NFPROTO_IPV4, diff --git a/security/smack/smack_netfilter.c b/security/smack/smack_netfilter.c index cdeb0f3243dd..e36d17835d4f 100644 --- a/security/smack/smack_netfilter.c +++ b/security/smack/smack_netfilter.c @@ -58,7 +58,7 @@ static unsigned int smack_ipv4_output(void *priv, return NF_ACCEPT; } -static struct nf_hook_ops smack_nf_ops[] = { +static const struct nf_hook_ops smack_nf_ops[] = { { .hook = smack_ipv4_output, .pf = NFPROTO_IPV4, -- cgit v1.2.3-55-g7522 From 6e692678d74289d6129bbd4bb20bb9fe01278faa Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 27 Jul 2017 16:56:39 +0200 Subject: netfilter: nf_tables: No need to check chain existence when tracing nft_trace_notify() is called only from __nft_trace_packet(), which assigns its parameter 'chain' to info->chain. __nft_trace_packet() in turn later dereferences 'chain' unconditionally, which indicates that it's never NULL. Same does nft_do_chain(), the only user of the tracing infrastructure. Hence it is safe to assume the check removed here is not needed. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_trace.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index e1b15e7a5793..0c3a0049e4aa 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -217,14 +217,11 @@ void nft_trace_notify(struct nft_traceinfo *info) if (trace_fill_id(skb, pkt->skb)) goto nla_put_failure; - if (info->chain) { - if (nla_put_string(skb, NFTA_TRACE_CHAIN, - info->chain->name)) - goto nla_put_failure; - if (nla_put_string(skb, NFTA_TRACE_TABLE, - info->chain->table->name)) - goto nla_put_failure; - } + if (nla_put_string(skb, NFTA_TRACE_CHAIN, info->chain->name)) + goto nla_put_failure; + + if (nla_put_string(skb, NFTA_TRACE_TABLE, info->chain->table->name)) + goto nla_put_failure; if (nf_trace_fill_rule_info(skb, info)) goto nla_put_failure; -- cgit v1.2.3-55-g7522 From 2cf0c8b3e6942ecafe6ebb1a6d0328a81641bf39 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 27 Jul 2017 16:56:40 +0200 Subject: netlink: Introduce nla_strdup() This is similar to strdup() for netlink string attributes. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/net/netlink.h | 1 + lib/nlattr.c | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/include/net/netlink.h b/include/net/netlink.h index ef8e6c3a80a6..c8c2eb5ae55e 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -247,6 +247,7 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int nla_policy_len(const struct nla_policy *, int); struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype); size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize); +char *nla_strdup(const struct nlattr *nla, gfp_t flags); int nla_memcpy(void *dest, const struct nlattr *src, int count); int nla_memcmp(const struct nlattr *nla, const void *data, size_t size); int nla_strcmp(const struct nlattr *nla, const char *str); diff --git a/lib/nlattr.c b/lib/nlattr.c index fb52435be42d..f13013f7e21a 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -271,6 +271,30 @@ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize) } EXPORT_SYMBOL(nla_strlcpy); +/** + * nla_strdup - Copy string attribute payload into a newly allocated buffer + * @nla: attribute to copy the string from + * @flags: the type of memory to allocate (see kmalloc). + * + * Returns a pointer to the allocated buffer or NULL on error. + */ +char *nla_strdup(const struct nlattr *nla, gfp_t flags) +{ + size_t srclen = nla_len(nla); + char *src = nla_data(nla), *dst; + + if (srclen > 0 && src[srclen - 1] == '\0') + srclen--; + + dst = kmalloc(srclen + 1, flags); + if (dst != NULL) { + memcpy(dst, src, srclen); + dst[srclen] = '\0'; + } + return dst; +} +EXPORT_SYMBOL(nla_strdup); + /** * nla_memcpy - Copy a netlink attribute into another memory area * @dest: where to copy to memcpy -- cgit v1.2.3-55-g7522 From e46abbcc05aa8a16b0e7f5c94e86d11af9aa2770 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 27 Jul 2017 16:56:41 +0200 Subject: netfilter: nf_tables: Allow table names of up to 255 chars Allocate all table names dynamically to allow for arbitrary lengths but introduce NFT_NAME_MAXLEN as an upper sanity boundary. It's value was chosen to allow using a domain name as per RFC 1035. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 +- include/uapi/linux/netfilter/nf_tables.h | 3 +- net/netfilter/nf_tables_api.c | 49 +++++++++++++++++++++++--------- net/netfilter/nf_tables_trace.c | 2 +- 4 files changed, 40 insertions(+), 16 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index bd5be0d691d5..05ecf78ec078 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -957,7 +957,7 @@ struct nft_table { u32 use; u16 flags:14, genmask:2; - char name[NFT_TABLE_MAXNAMELEN]; + char *name; }; enum nft_af_flags { diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 6f0a950e21c3..0b94e572ef16 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1,7 +1,8 @@ #ifndef _LINUX_NF_TABLES_H #define _LINUX_NF_TABLES_H -#define NFT_TABLE_MAXNAMELEN 32 +#define NFT_NAME_MAXLEN 256 +#define NFT_TABLE_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_CHAIN_MAXNAMELEN 32 #define NFT_SET_MAXNAMELEN 32 #define NFT_OBJ_MAXNAMELEN 32 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b77ad0813564..c2e392d5e512 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -726,7 +726,10 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk, if (table == NULL) goto err2; - nla_strlcpy(table->name, name, NFT_TABLE_MAXNAMELEN); + table->name = nla_strdup(name, GFP_KERNEL); + if (table->name == NULL) + goto err3; + INIT_LIST_HEAD(&table->chains); INIT_LIST_HEAD(&table->sets); INIT_LIST_HEAD(&table->objects); @@ -735,10 +738,12 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk, nft_ctx_init(&ctx, net, skb, nlh, afi, table, NULL, nla); err = nft_trans_table_add(&ctx, NFT_MSG_NEWTABLE); if (err < 0) - goto err3; + goto err4; list_add_tail_rcu(&table->list, &afi->tables); return 0; +err4: + kfree(table->name); err3: kfree(table); err2: @@ -865,6 +870,7 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx) { BUG_ON(ctx->table->use > 0); + kfree(ctx->table->name); kfree(ctx->table); module_put(ctx->afi->owner); } @@ -1972,7 +1978,7 @@ err: } struct nft_rule_dump_ctx { - char table[NFT_TABLE_MAXNAMELEN]; + char *table; char chain[NFT_CHAIN_MAXNAMELEN]; }; @@ -1997,7 +2003,7 @@ static int nf_tables_dump_rules(struct sk_buff *skb, continue; list_for_each_entry_rcu(table, &afi->tables, list) { - if (ctx && ctx->table[0] && + if (ctx && ctx->table && strcmp(ctx->table, table->name) != 0) continue; @@ -2037,7 +2043,12 @@ done: static int nf_tables_dump_rules_done(struct netlink_callback *cb) { - kfree(cb->data); + struct nft_rule_dump_ctx *ctx = cb->data; + + if (ctx) { + kfree(ctx->table); + kfree(ctx); + } return 0; } @@ -2069,9 +2080,14 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, if (!ctx) return -ENOMEM; - if (nla[NFTA_RULE_TABLE]) - nla_strlcpy(ctx->table, nla[NFTA_RULE_TABLE], - sizeof(ctx->table)); + if (nla[NFTA_RULE_TABLE]) { + ctx->table = nla_strdup(nla[NFTA_RULE_TABLE], + GFP_KERNEL); + if (!ctx->table) { + kfree(ctx); + return -ENOMEM; + } + } if (nla[NFTA_RULE_CHAIN]) nla_strlcpy(ctx->chain, nla[NFTA_RULE_CHAIN], sizeof(ctx->chain)); @@ -4410,7 +4426,7 @@ nla_put_failure: } struct nft_obj_filter { - char table[NFT_OBJ_MAXNAMELEN]; + char *table; u32 type; }; @@ -4475,7 +4491,10 @@ done: static int nf_tables_dump_obj_done(struct netlink_callback *cb) { - kfree(cb->data); + struct nft_obj_filter *filter = cb->data; + + kfree(filter->table); + kfree(filter); return 0; } @@ -4489,9 +4508,13 @@ nft_obj_filter_alloc(const struct nlattr * const nla[]) if (!filter) return ERR_PTR(-ENOMEM); - if (nla[NFTA_OBJ_TABLE]) - nla_strlcpy(filter->table, nla[NFTA_OBJ_TABLE], - NFT_TABLE_MAXNAMELEN); + if (nla[NFTA_OBJ_TABLE]) { + filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_KERNEL); + if (!filter->table) { + kfree(filter); + return ERR_PTR(-ENOMEM); + } + } if (nla[NFTA_OBJ_TYPE]) filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE])); diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index 0c3a0049e4aa..62787d985e9d 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -175,7 +175,7 @@ void nft_trace_notify(struct nft_traceinfo *info) return; size = nlmsg_total_size(sizeof(struct nfgenmsg)) + - nla_total_size(NFT_TABLE_MAXNAMELEN) + + nla_total_size(strlen(info->chain->table->name)) + nla_total_size(NFT_CHAIN_MAXNAMELEN) + nla_total_size_64bit(sizeof(__be64)) + /* rule handle */ nla_total_size(sizeof(__be32)) + /* trace type */ -- cgit v1.2.3-55-g7522 From b7263e071aba736cea9e71cdf2e76dfa7aebd039 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 27 Jul 2017 16:56:42 +0200 Subject: netfilter: nf_tables: Allow chain name of up to 255 chars Same conversion as for table names, use NFT_NAME_MAXLEN as upper boundary as well. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 4 ++-- include/uapi/linux/netfilter/nf_tables.h | 2 +- net/netfilter/nf_tables_api.c | 34 ++++++++++++++++++++++++-------- net/netfilter/nf_tables_trace.c | 27 +++++++++++++++++++++++-- 4 files changed, 54 insertions(+), 13 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 05ecf78ec078..be1610162ee0 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -859,7 +859,7 @@ struct nft_chain { u16 level; u8 flags:6, genmask:2; - char name[NFT_CHAIN_MAXNAMELEN]; + char *name; }; enum nft_chain_type { @@ -1272,7 +1272,7 @@ struct nft_trans_set { struct nft_trans_chain { bool update; - char name[NFT_CHAIN_MAXNAMELEN]; + char *name; struct nft_stats __percpu *stats; u8 policy; }; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 0b94e572ef16..d9c03a8608ee 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -3,7 +3,7 @@ #define NFT_NAME_MAXLEN 256 #define NFT_TABLE_MAXNAMELEN NFT_NAME_MAXLEN -#define NFT_CHAIN_MAXNAMELEN 32 +#define NFT_CHAIN_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_SET_MAXNAMELEN 32 #define NFT_OBJ_MAXNAMELEN 32 #define NFT_USERDATA_MAXLEN 256 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c2e392d5e512..747499039709 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1250,8 +1250,10 @@ static void nf_tables_chain_destroy(struct nft_chain *chain) static_branch_dec(&nft_counters_enabled); if (basechain->ops[0].dev != NULL) dev_put(basechain->ops[0].dev); + kfree(chain->name); kfree(basechain); } else { + kfree(chain->name); kfree(chain); } } @@ -1476,8 +1478,13 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, nft_trans_chain_policy(trans) = -1; if (nla[NFTA_CHAIN_HANDLE] && name) { - nla_strlcpy(nft_trans_chain_name(trans), name, - NFT_CHAIN_MAXNAMELEN); + nft_trans_chain_name(trans) = + nla_strdup(name, GFP_KERNEL); + if (!nft_trans_chain_name(trans)) { + kfree(trans); + free_percpu(stats); + return -ENOMEM; + } } list_add_tail(&trans->list, &net->nft.commit_list); return 0; @@ -1544,7 +1551,11 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk, INIT_LIST_HEAD(&chain->rules); chain->handle = nf_tables_alloc_handle(table); chain->table = table; - nla_strlcpy(chain->name, name, NFT_CHAIN_MAXNAMELEN); + chain->name = nla_strdup(name, GFP_KERNEL); + if (!chain->name) { + err = -ENOMEM; + goto err1; + } err = nf_tables_register_hooks(net, table, chain, afi->nops); if (err < 0) @@ -1979,7 +1990,7 @@ err: struct nft_rule_dump_ctx { char *table; - char chain[NFT_CHAIN_MAXNAMELEN]; + char *chain; }; static int nf_tables_dump_rules(struct sk_buff *skb, @@ -2047,6 +2058,7 @@ static int nf_tables_dump_rules_done(struct netlink_callback *cb) if (ctx) { kfree(ctx->table); + kfree(ctx->chain); kfree(ctx); } return 0; @@ -2088,9 +2100,15 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk, return -ENOMEM; } } - if (nla[NFTA_RULE_CHAIN]) - nla_strlcpy(ctx->chain, nla[NFTA_RULE_CHAIN], - sizeof(ctx->chain)); + if (nla[NFTA_RULE_CHAIN]) { + ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN], + GFP_KERNEL); + if (!ctx->chain) { + kfree(ctx->table); + kfree(ctx); + return -ENOMEM; + } + } c.data = ctx; } @@ -4863,7 +4881,7 @@ static void nft_chain_commit_update(struct nft_trans *trans) { struct nft_base_chain *basechain; - if (nft_trans_chain_name(trans)[0]) + if (nft_trans_chain_name(trans)) strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); if (!nft_is_base_chain(trans->ctx.chain)) diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index 62787d985e9d..e1dc527a493b 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -162,6 +162,27 @@ static int nf_trace_fill_rule_info(struct sk_buff *nlskb, NFTA_TRACE_PAD); } +static bool nft_trace_have_verdict_chain(struct nft_traceinfo *info) +{ + switch (info->type) { + case NFT_TRACETYPE_RETURN: + case NFT_TRACETYPE_RULE: + break; + default: + return false; + } + + switch (info->verdict->code) { + case NFT_JUMP: + case NFT_GOTO: + break; + default: + return false; + } + + return true; +} + void nft_trace_notify(struct nft_traceinfo *info) { const struct nft_pktinfo *pkt = info->pkt; @@ -176,12 +197,11 @@ void nft_trace_notify(struct nft_traceinfo *info) size = nlmsg_total_size(sizeof(struct nfgenmsg)) + nla_total_size(strlen(info->chain->table->name)) + - nla_total_size(NFT_CHAIN_MAXNAMELEN) + + nla_total_size(strlen(info->chain->name)) + nla_total_size_64bit(sizeof(__be64)) + /* rule handle */ nla_total_size(sizeof(__be32)) + /* trace type */ nla_total_size(0) + /* VERDICT, nested */ nla_total_size(sizeof(u32)) + /* verdict code */ - nla_total_size(NFT_CHAIN_MAXNAMELEN) + /* jump target */ nla_total_size(sizeof(u32)) + /* id */ nla_total_size(NFT_TRACETYPE_LL_HSIZE) + nla_total_size(NFT_TRACETYPE_NETWORK_HSIZE) + @@ -194,6 +214,9 @@ void nft_trace_notify(struct nft_traceinfo *info) nla_total_size(sizeof(u32)) + /* nfproto */ nla_total_size(sizeof(u32)); /* policy */ + if (nft_trace_have_verdict_chain(info)) + size += nla_total_size(strlen(info->verdict->chain->name)); /* jump target */ + skb = nlmsg_new(size, GFP_ATOMIC); if (!skb) return; -- cgit v1.2.3-55-g7522 From 387454901bd62022ac1b04e15bd8d4fcc60bbed4 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 27 Jul 2017 16:56:43 +0200 Subject: netfilter: nf_tables: Allow set names of up to 255 chars Same conversion as for table names, use NFT_NAME_MAXLEN as upper boundary as well. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 +- include/uapi/linux/netfilter/nf_tables.h | 2 +- net/netfilter/nf_tables_api.c | 18 ++++++++++++++---- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index be1610162ee0..66ba62fa7d90 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -396,7 +396,7 @@ void nft_unregister_set(struct nft_set_type *type); struct nft_set { struct list_head list; struct list_head bindings; - char name[NFT_SET_MAXNAMELEN]; + char *name; u32 ktype; u32 dtype; u32 objtype; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index d9c03a8608ee..b5e73e80b7b6 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -4,7 +4,7 @@ #define NFT_NAME_MAXLEN 256 #define NFT_TABLE_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_CHAIN_MAXNAMELEN NFT_NAME_MAXLEN -#define NFT_SET_MAXNAMELEN 32 +#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_OBJ_MAXNAMELEN 32 #define NFT_USERDATA_MAXLEN 256 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 747499039709..e6a07f27b1a3 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2650,7 +2650,7 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, unsigned long *inuse; unsigned int n = 0, min = 0; - p = strnchr(name, NFT_SET_MAXNAMELEN, '%'); + p = strchr(name, '%'); if (p != NULL) { if (p[1] != 'd' || strchr(p + 2, '%')) return -EINVAL; @@ -2681,7 +2681,10 @@ cont: free_page((unsigned long)inuse); } - snprintf(set->name, sizeof(set->name), name, min + n); + set->name = kasprintf(GFP_KERNEL, name, min + n); + if (!set->name) + return -ENOMEM; + list_for_each_entry(i, &ctx->table->sets, list) { if (!nft_is_active_next(ctx->net, i)) continue; @@ -2958,7 +2961,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, struct nft_table *table; struct nft_set *set; struct nft_ctx ctx; - char name[NFT_SET_MAXNAMELEN]; + char *name; unsigned int size; bool create; u64 timeout; @@ -3104,8 +3107,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, goto err1; } - nla_strlcpy(name, nla[NFTA_SET_NAME], sizeof(set->name)); + name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL); + if (!name) { + err = -ENOMEM; + goto err2; + } + err = nf_tables_set_alloc_name(&ctx, set, name); + kfree(name); if (err < 0) goto err2; @@ -3155,6 +3164,7 @@ static void nft_set_destroy(struct nft_set *set) { set->ops->destroy(set); module_put(set->ops->type->owner); + kfree(set->name); kvfree(set); } -- cgit v1.2.3-55-g7522 From 615095752100748e221028fc96163c2b78185ae4 Mon Sep 17 00:00:00 2001 From: Phil Sutter Date: Thu, 27 Jul 2017 16:56:44 +0200 Subject: netfilter: nf_tables: Allow object names of up to 255 chars Same conversion as for table names, use NFT_NAME_MAXLEN as upper boundary as well. Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 2 +- include/uapi/linux/netfilter/nf_tables.h | 2 +- net/netfilter/nf_tables_api.c | 11 +++++++++-- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 66ba62fa7d90..f9795fe394f3 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1016,7 +1016,7 @@ int nft_verdict_dump(struct sk_buff *skb, int type, */ struct nft_object { struct list_head list; - char name[NFT_OBJ_MAXNAMELEN]; + char *name; struct nft_table *table; u32 genmask:2, use:30; diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index b5e73e80b7b6..be25cf69295b 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -5,7 +5,7 @@ #define NFT_TABLE_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_CHAIN_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN -#define NFT_OBJ_MAXNAMELEN 32 +#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN #define NFT_USERDATA_MAXLEN 256 /** diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index e6a07f27b1a3..149785ff1c7b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4402,15 +4402,21 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, goto err1; } obj->table = table; - nla_strlcpy(obj->name, nla[NFTA_OBJ_NAME], NFT_OBJ_MAXNAMELEN); + obj->name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL); + if (!obj->name) { + err = -ENOMEM; + goto err2; + } err = nft_trans_obj_add(&ctx, NFT_MSG_NEWOBJ, obj); if (err < 0) - goto err2; + goto err3; list_add_tail_rcu(&obj->list, &table->objects); table->use++; return 0; +err3: + kfree(obj->name); err2: if (obj->type->destroy) obj->type->destroy(obj); @@ -4626,6 +4632,7 @@ static void nft_obj_destroy(struct nft_object *obj) obj->type->destroy(obj); module_put(obj->type->owner); + kfree(obj->name); kfree(obj); } -- cgit v1.2.3-55-g7522 From 9b7e26aee7cf27ffb37bb2f17229cecc89a833bd Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 28 Jul 2017 10:34:42 +0200 Subject: netfilter: nft_set_rbtree: use seqcount to avoid lock in most cases switch to lockless lockup. write side now also increments sequence counter. On lookup, sample counter value and only take the lock if we did not find a match and the counter has changed. This avoids need to write to private area in normal (lookup) cases. In case we detect a writer (seqretry is true) we fall back to taking the readlock. The readlock is also used during dumps to ensure we get a consistent tree walk. Similar technique (rbtree+seqlock) was used by David Howells in rxrpc. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_rbtree.c | 49 +++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index bce5382f1d49..d83a4ec5900d 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -19,8 +19,9 @@ #include struct nft_rbtree { - rwlock_t lock; struct rb_root root; + rwlock_t lock; + seqcount_t count; }; struct nft_rbtree_elem { @@ -40,8 +41,9 @@ static bool nft_rbtree_equal(const struct nft_set *set, const void *this, return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0; } -static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, - const u32 *key, const struct nft_set_ext **ext) +static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext, + unsigned int seq) { struct nft_rbtree *priv = nft_set_priv(set); const struct nft_rbtree_elem *rbe, *interval = NULL; @@ -50,15 +52,17 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, const void *this; int d; - read_lock_bh(&priv->lock); - parent = priv->root.rb_node; + parent = rcu_dereference_raw(priv->root.rb_node); while (parent != NULL) { + if (read_seqcount_retry(&priv->count, seq)) + return false; + rbe = rb_entry(parent, struct nft_rbtree_elem, node); this = nft_set_ext_key(&rbe->ext); d = memcmp(this, key, set->klen); if (d < 0) { - parent = parent->rb_left; + parent = rcu_dereference_raw(parent->rb_left); if (interval && nft_rbtree_equal(set, this, interval) && nft_rbtree_interval_end(this) && @@ -66,15 +70,14 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, continue; interval = rbe; } else if (d > 0) - parent = parent->rb_right; + parent = rcu_dereference_raw(parent->rb_right); else { if (!nft_set_elem_active(&rbe->ext, genmask)) { - parent = parent->rb_left; + parent = rcu_dereference_raw(parent->rb_left); continue; } if (nft_rbtree_interval_end(rbe)) goto out; - read_unlock_bh(&priv->lock); *ext = &rbe->ext; return true; @@ -84,15 +87,32 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, if (set->flags & NFT_SET_INTERVAL && interval != NULL && nft_set_elem_active(&interval->ext, genmask) && !nft_rbtree_interval_end(interval)) { - read_unlock_bh(&priv->lock); *ext = &interval->ext; return true; } out: - read_unlock_bh(&priv->lock); return false; } +static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, + const u32 *key, const struct nft_set_ext **ext) +{ + struct nft_rbtree *priv = nft_set_priv(set); + unsigned int seq = read_seqcount_begin(&priv->count); + bool ret; + + ret = __nft_rbtree_lookup(net, set, key, ext, seq); + if (ret || !read_seqcount_retry(&priv->count, seq)) + return ret; + + read_lock_bh(&priv->lock); + seq = read_seqcount_begin(&priv->count); + ret = __nft_rbtree_lookup(net, set, key, ext, seq); + read_unlock_bh(&priv->lock); + + return ret; +} + static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_rbtree_elem *new, struct nft_set_ext **ext) @@ -130,7 +150,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, } } } - rb_link_node(&new->node, parent, p); + rb_link_node_rcu(&new->node, parent, p); rb_insert_color(&new->node, &priv->root); return 0; } @@ -144,7 +164,9 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set, int err; write_lock_bh(&priv->lock); + write_seqcount_begin(&priv->count); err = __nft_rbtree_insert(net, set, rbe, ext); + write_seqcount_end(&priv->count); write_unlock_bh(&priv->lock); return err; @@ -158,7 +180,9 @@ static void nft_rbtree_remove(const struct net *net, struct nft_rbtree_elem *rbe = elem->priv; write_lock_bh(&priv->lock); + write_seqcount_begin(&priv->count); rb_erase(&rbe->node, &priv->root); + write_seqcount_end(&priv->count); write_unlock_bh(&priv->lock); } @@ -264,6 +288,7 @@ static int nft_rbtree_init(const struct nft_set *set, struct nft_rbtree *priv = nft_set_priv(set); rwlock_init(&priv->lock); + seqcount_init(&priv->count); priv->root = RB_ROOT; return 0; } -- cgit v1.2.3-55-g7522 From 4d3a57f23dec59f0a2362e63540b2d01b37afe0a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 28 Jul 2017 11:22:04 +0200 Subject: netfilter: conntrack: do not enable connection tracking unless needed Discussion during NFWS 2017 in Faro has shown that the current conntrack behaviour is unreasonable. Even if conntrack module is loaded on behalf of a single net namespace, its turned on for all namespaces, which is expensive. Commit 481fa373476 ("netfilter: conntrack: add nf_conntrack_default_on sysctl") attempted to provide an alternative to the 'default on' behaviour by adding a sysctl to change it. However, as Eric points out, the sysctl only becomes available once the module is loaded, and then its too late. So we either have to move the sysctl to the core, or, alternatively, change conntrack to become active only once the rule set requires this. This does the latter, conntrack is only enabled when a rule needs it. Reported-by: Eric Dumazet Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- Documentation/networking/nf_conntrack-sysctl.txt | 11 --------- include/net/netfilter/nf_conntrack_l3proto.h | 15 ------------ net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 16 ++----------- net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c | 17 ++------------ net/netfilter/nf_conntrack_proto.c | 29 ------------------------ net/netfilter/nf_conntrack_standalone.c | 10 -------- 6 files changed, 4 insertions(+), 94 deletions(-) diff --git a/Documentation/networking/nf_conntrack-sysctl.txt b/Documentation/networking/nf_conntrack-sysctl.txt index 497d668288f9..433b6724797a 100644 --- a/Documentation/networking/nf_conntrack-sysctl.txt +++ b/Documentation/networking/nf_conntrack-sysctl.txt @@ -96,17 +96,6 @@ nf_conntrack_max - INTEGER Size of connection tracking table. Default value is nf_conntrack_buckets value * 4. -nf_conntrack_default_on - BOOLEAN - 0 - don't register conntrack in new net namespaces - 1 - register conntrack in new net namespaces (default) - - This controls wheter newly created network namespaces have connection - tracking enabled by default. It will be enabled automatically - regardless of this setting if the new net namespace requires - connection tracking, e.g. when NAT rules are created. - This setting is only visible in initial user namespace, it has no - effect on existing namespaces. - nf_conntrack_tcp_be_liberal - BOOLEAN 0 - disabled (default) not 0 - enabled diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index 6d14b36e3a49..1b8de164d744 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -73,21 +73,6 @@ struct nf_conntrack_l3proto { extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO]; -#ifdef CONFIG_SYSCTL -/* Protocol pernet registration. */ -int nf_ct_l3proto_pernet_register(struct net *net, - struct nf_conntrack_l3proto *proto); -#else -static inline int nf_ct_l3proto_pernet_register(struct net *n, - struct nf_conntrack_l3proto *p) -{ - return 0; -} -#endif - -void nf_ct_l3proto_pernet_unregister(struct net *net, - struct nf_conntrack_l3proto *proto); - /* Protocol global registration. */ int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto); void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto); diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index 63e4ea0e01f8..de5f0e6ddd1b 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -398,24 +398,12 @@ static struct nf_conntrack_l4proto *builtin_l4proto4[] = { static int ipv4_net_init(struct net *net) { - int ret = 0; - - ret = nf_ct_l4proto_pernet_register(net, builtin_l4proto4, - ARRAY_SIZE(builtin_l4proto4)); - if (ret < 0) - return ret; - ret = nf_ct_l3proto_pernet_register(net, &nf_conntrack_l3proto_ipv4); - if (ret < 0) { - pr_err("nf_conntrack_ipv4: pernet registration failed\n"); - nf_ct_l4proto_pernet_unregister(net, builtin_l4proto4, - ARRAY_SIZE(builtin_l4proto4)); - } - return ret; + return nf_ct_l4proto_pernet_register(net, builtin_l4proto4, + ARRAY_SIZE(builtin_l4proto4)); } static void ipv4_net_exit(struct net *net) { - nf_ct_l3proto_pernet_unregister(net, &nf_conntrack_l3proto_ipv4); nf_ct_l4proto_pernet_unregister(net, builtin_l4proto4, ARRAY_SIZE(builtin_l4proto4)); } diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index f2d2f4a9294b..ddef5ee9e0a8 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -398,25 +398,12 @@ static struct nf_conntrack_l4proto *builtin_l4proto6[] = { static int ipv6_net_init(struct net *net) { - int ret = 0; - - ret = nf_ct_l4proto_pernet_register(net, builtin_l4proto6, - ARRAY_SIZE(builtin_l4proto6)); - if (ret < 0) - return ret; - - ret = nf_ct_l3proto_pernet_register(net, &nf_conntrack_l3proto_ipv6); - if (ret < 0) { - pr_err("nf_conntrack_ipv6: pernet registration failed.\n"); - nf_ct_l4proto_pernet_unregister(net, builtin_l4proto6, - ARRAY_SIZE(builtin_l4proto6)); - } - return ret; + return nf_ct_l4proto_pernet_register(net, builtin_l4proto6, + ARRAY_SIZE(builtin_l4proto6)); } static void ipv6_net_exit(struct net *net) { - nf_ct_l3proto_pernet_unregister(net, &nf_conntrack_l3proto_ipv6); nf_ct_l4proto_pernet_unregister(net, builtin_l4proto6, ARRAY_SIZE(builtin_l4proto6)); } diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 1dcad229c3cc..7c89dade6fd3 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -238,20 +238,6 @@ out_unlock: } EXPORT_SYMBOL_GPL(nf_ct_l3proto_register); -#ifdef CONFIG_SYSCTL -extern unsigned int nf_conntrack_default_on; - -int nf_ct_l3proto_pernet_register(struct net *net, - struct nf_conntrack_l3proto *proto) -{ - if (nf_conntrack_default_on == 0) - return 0; - - return proto->net_ns_get ? proto->net_ns_get(net) : 0; -} -EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_register); -#endif - void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto) { BUG_ON(proto->l3proto >= NFPROTO_NUMPROTO); @@ -270,21 +256,6 @@ void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto) } EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister); -void nf_ct_l3proto_pernet_unregister(struct net *net, - struct nf_conntrack_l3proto *proto) -{ - /* - * nf_conntrack_default_on *might* have registered hooks. - * ->net_ns_put must cope with more puts() than get(), i.e. - * if nf_conntrack_default_on was 0 at time of - * nf_ct_l3proto_pernet_register invocation this net_ns_put() - * should be a noop. - */ - if (proto->net_ns_put) - proto->net_ns_put(net); -} -EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister); - static struct nf_proto_net *nf_ct_l4proto_net(struct net *net, struct nf_conntrack_l4proto *l4proto) { diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index ccb5cb9043e0..5b6c675d55b1 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -452,9 +452,6 @@ static int log_invalid_proto_max __read_mostly = 255; /* size the user *wants to set */ static unsigned int nf_conntrack_htable_size_user __read_mostly; -extern unsigned int nf_conntrack_default_on; -unsigned int nf_conntrack_default_on __read_mostly = 1; - static int nf_conntrack_hash_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -520,13 +517,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, - { - .procname = "nf_conntrack_default_on", - .data = &nf_conntrack_default_on, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, { } }; -- cgit v1.2.3-55-g7522 From e7942d0633c47c791ece6afa038be9cf977226de Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 30 Jul 2017 03:57:18 +0200 Subject: tcp: remove prequeue support prequeue is a tcp receive optimization that moves part of rx processing from bh to process context. This only works if the socket being processed belongs to a process that is blocked in recv on that socket. In practice, this doesn't happen anymore that often because nowadays servers tend to use an event driven (epoll) model. Even normal client applications (web browsers) commonly use many tcp connections in parallel. This has measureable impact only in netperf (which uses plain recv and thus allows prequeue use) from host to locally running vm (~4%), however, there were no changes when using netperf between two physical hosts with ixgbe interfaces. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/linux/tcp.h | 9 ---- include/net/tcp.h | 11 ----- net/ipv4/tcp.c | 105 ----------------------------------------------- net/ipv4/tcp_input.c | 62 ---------------------------- net/ipv4/tcp_ipv4.c | 61 +-------------------------- net/ipv4/tcp_minisocks.c | 1 - net/ipv4/tcp_timer.c | 12 ------ net/ipv6/tcp_ipv6.c | 3 +- 8 files changed, 2 insertions(+), 262 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 542ca1ae02c4..32fb37cfb0d1 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -192,15 +192,6 @@ struct tcp_sock { struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ - /* Data for direct copy to user */ - struct { - struct sk_buff_head prequeue; - struct task_struct *task; - struct msghdr *msg; - int memory; - int len; - } ucopy; - u32 snd_wl1; /* Sequence for window update */ u32 snd_wnd; /* The window we expect to receive */ u32 max_window; /* Maximal window ever seen from peer */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 12d68335acd4..93f115cfc8f8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1244,17 +1244,6 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb) __tcp_checksum_complete(skb); } -/* Prequeue for VJ style copy to user, combined with checksumming. */ - -static inline void tcp_prequeue_init(struct tcp_sock *tp) -{ - tp->ucopy.task = NULL; - tp->ucopy.len = 0; - tp->ucopy.memory = 0; - skb_queue_head_init(&tp->ucopy.prequeue); -} - -bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); int tcp_filter(struct sock *sk, struct sk_buff *skb); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 71ce33decd97..62018ea6f45f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -400,7 +400,6 @@ void tcp_init_sock(struct sock *sk) tp->out_of_order_queue = RB_ROOT; tcp_init_xmit_timers(sk); - tcp_prequeue_init(tp); INIT_LIST_HEAD(&tp->tsq_node); icsk->icsk_rto = TCP_TIMEOUT_INIT; @@ -1525,20 +1524,6 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied) tcp_send_ack(sk); } -static void tcp_prequeue_process(struct sock *sk) -{ - struct sk_buff *skb; - struct tcp_sock *tp = tcp_sk(sk); - - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED); - - while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) - sk_backlog_rcv(sk, skb); - - /* Clear memory counter. */ - tp->ucopy.memory = 0; -} - static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) { struct sk_buff *skb; @@ -1671,7 +1656,6 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int err; int target; /* Read at least this many bytes */ long timeo; - struct task_struct *user_recv = NULL; struct sk_buff *skb, *last; u32 urg_hole = 0; @@ -1806,51 +1790,6 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, tcp_cleanup_rbuf(sk, copied); - if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { - /* Install new reader */ - if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { - user_recv = current; - tp->ucopy.task = user_recv; - tp->ucopy.msg = msg; - } - - tp->ucopy.len = len; - - WARN_ON(tp->copied_seq != tp->rcv_nxt && - !(flags & (MSG_PEEK | MSG_TRUNC))); - - /* Ugly... If prequeue is not empty, we have to - * process it before releasing socket, otherwise - * order will be broken at second iteration. - * More elegant solution is required!!! - * - * Look: we have the following (pseudo)queues: - * - * 1. packets in flight - * 2. backlog - * 3. prequeue - * 4. receive_queue - * - * Each queue can be processed only if the next ones - * are empty. At this point we have empty receive_queue. - * But prequeue _can_ be not empty after 2nd iteration, - * when we jumped to start of loop because backlog - * processing added something to receive_queue. - * We cannot release_sock(), because backlog contains - * packets arrived _after_ prequeued ones. - * - * Shortly, algorithm is clear --- to process all - * the queues in order. We could make it more directly, - * requeueing packets from backlog to prequeue, if - * is not empty. It is more elegant, but eats cycles, - * unfortunately. - */ - if (!skb_queue_empty(&tp->ucopy.prequeue)) - goto do_prequeue; - - /* __ Set realtime policy in scheduler __ */ - } - if (copied >= target) { /* Do not sleep, just process backlog. */ release_sock(sk); @@ -1859,31 +1798,6 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, sk_wait_data(sk, &timeo, last); } - if (user_recv) { - int chunk; - - /* __ Restore normal policy in scheduler __ */ - - chunk = len - tp->ucopy.len; - if (chunk != 0) { - NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); - len -= chunk; - copied += chunk; - } - - if (tp->rcv_nxt == tp->copied_seq && - !skb_queue_empty(&tp->ucopy.prequeue)) { -do_prequeue: - tcp_prequeue_process(sk); - - chunk = len - tp->ucopy.len; - if (chunk != 0) { - NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); - len -= chunk; - copied += chunk; - } - } - } if ((flags & MSG_PEEK) && (peek_seq - copied - urg_hole != tp->copied_seq)) { net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", @@ -1955,25 +1869,6 @@ skip_copy: break; } while (len > 0); - if (user_recv) { - if (!skb_queue_empty(&tp->ucopy.prequeue)) { - int chunk; - - tp->ucopy.len = copied > 0 ? len : 0; - - tcp_prequeue_process(sk); - - if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { - NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); - len -= chunk; - copied += chunk; - } - } - - tp->ucopy.task = NULL; - tp->ucopy.len = 0; - } - /* According to UNIX98, msg_name/msg_namelen are ignored * on connected socket. I was just happy when found this 8) --ANK */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index adc3f3e9468c..770ce6cb3eca 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4611,22 +4611,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) goto out_of_window; /* Ok. In sequence. In window. */ - if (tp->ucopy.task == current && - tp->copied_seq == tp->rcv_nxt && tp->ucopy.len && - sock_owned_by_user(sk) && !tp->urg_data) { - int chunk = min_t(unsigned int, skb->len, - tp->ucopy.len); - - __set_current_state(TASK_RUNNING); - - if (!skb_copy_datagram_msg(skb, 0, tp->ucopy.msg, chunk)) { - tp->ucopy.len -= chunk; - tp->copied_seq += chunk; - eaten = (chunk == skb->len); - tcp_rcv_space_adjust(sk); - } - } - if (eaten <= 0) { queue_and_out: if (eaten < 0) { @@ -5186,26 +5170,6 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t } } -static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) -{ - struct tcp_sock *tp = tcp_sk(sk); - int chunk = skb->len - hlen; - int err; - - if (skb_csum_unnecessary(skb)) - err = skb_copy_datagram_msg(skb, hlen, tp->ucopy.msg, chunk); - else - err = skb_copy_and_csum_datagram_msg(skb, hlen, tp->ucopy.msg); - - if (!err) { - tp->ucopy.len -= chunk; - tp->copied_seq += chunk; - tcp_rcv_space_adjust(sk); - } - - return err; -} - /* Accept RST for rcv_nxt - 1 after a FIN. * When tcp connections are abruptly terminated from Mac OSX (via ^C), a * FIN is sent followed by a RST packet. The RST is sent with the same @@ -5446,32 +5410,6 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, int eaten = 0; bool fragstolen = false; - if (tp->ucopy.task == current && - tp->copied_seq == tp->rcv_nxt && - len - tcp_header_len <= tp->ucopy.len && - sock_owned_by_user(sk)) { - __set_current_state(TASK_RUNNING); - - if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) { - /* Predicted packet is in window by definition. - * seq == rcv_nxt and rcv_wup <= rcv_nxt. - * Hence, check seq<=rcv_wup reduces to: - */ - if (tcp_header_len == - (sizeof(struct tcphdr) + - TCPOLEN_TSTAMP_ALIGNED) && - tp->rcv_nxt == tp->rcv_wup) - tcp_store_ts_recent(tp); - - tcp_rcv_rtt_measure_ts(sk, skb); - - __skb_pull(skb, tcp_header_len); - tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); - NET_INC_STATS(sock_net(sk), - LINUX_MIB_TCPHPHITSTOUSER); - eaten = 1; - } - } if (!eaten) { if (tcp_checksum_complete(skb)) goto csum_error; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3a19ea28339f..a68eb4577d36 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1541,61 +1541,6 @@ void tcp_v4_early_demux(struct sk_buff *skb) } } -/* Packet is added to VJ-style prequeue for processing in process - * context, if a reader task is waiting. Apparently, this exciting - * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) - * failed somewhere. Latency? Burstiness? Well, at least now we will - * see, why it failed. 8)8) --ANK - * - */ -bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (sysctl_tcp_low_latency || !tp->ucopy.task) - return false; - - if (skb->len <= tcp_hdrlen(skb) && - skb_queue_len(&tp->ucopy.prequeue) == 0) - return false; - - /* Before escaping RCU protected region, we need to take care of skb - * dst. Prequeue is only enabled for established sockets. - * For such sockets, we might need the skb dst only to set sk->sk_rx_dst - * Instead of doing full sk_rx_dst validity here, let's perform - * an optimistic check. - */ - if (likely(sk->sk_rx_dst)) - skb_dst_drop(skb); - else - skb_dst_force_safe(skb); - - __skb_queue_tail(&tp->ucopy.prequeue, skb); - tp->ucopy.memory += skb->truesize; - if (skb_queue_len(&tp->ucopy.prequeue) >= 32 || - tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) { - struct sk_buff *skb1; - - BUG_ON(sock_owned_by_user(sk)); - __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED, - skb_queue_len(&tp->ucopy.prequeue)); - - while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) - sk_backlog_rcv(sk, skb1); - - tp->ucopy.memory = 0; - } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { - wake_up_interruptible_sync_poll(sk_sleep(sk), - POLLIN | POLLRDNORM | POLLRDBAND); - if (!inet_csk_ack_scheduled(sk)) - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, - (3 * tcp_rto_min(sk)) / 4, - TCP_RTO_MAX); - } - return true; -} -EXPORT_SYMBOL(tcp_prequeue); - bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; @@ -1770,8 +1715,7 @@ process: tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { - if (!tcp_prequeue(sk, skb)) - ret = tcp_v4_do_rcv(sk, skb); + ret = tcp_v4_do_rcv(sk, skb); } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } @@ -1936,9 +1880,6 @@ void tcp_v4_destroy_sock(struct sock *sk) } #endif - /* Clean prequeue, it must be empty really */ - __skb_queue_purge(&tp->ucopy.prequeue); - /* Clean up a referenced TCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0ff83c1637d8..188a6f31356d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -445,7 +445,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; - tcp_prequeue_init(newtp); INIT_LIST_HEAD(&newtp->tsq_node); tcp_init_wl(newtp, treq->rcv_isn); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index c0feeeef962a..f753f9d2fee3 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -239,7 +239,6 @@ static int tcp_write_timeout(struct sock *sk) /* Called with BH disabled */ void tcp_delack_timer_handler(struct sock *sk) { - struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); sk_mem_reclaim_partial(sk); @@ -254,17 +253,6 @@ void tcp_delack_timer_handler(struct sock *sk) } icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; - if (!skb_queue_empty(&tp->ucopy.prequeue)) { - struct sk_buff *skb; - - __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); - - while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) - sk_backlog_rcv(sk, skb); - - tp->ucopy.memory = 0; - } - if (inet_csk_ack_scheduled(sk)) { if (!icsk->icsk_ack.pingpong) { /* Delayed ACK missed: inflate ATO. */ diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 2968a33cca7d..39ee8e7fc4bd 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1505,8 +1505,7 @@ process: tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { - if (!tcp_prequeue(sk, skb)) - ret = tcp_v6_do_rcv(sk, skb); + ret = tcp_v6_do_rcv(sk, skb); } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } -- cgit v1.2.3-55-g7522 From c13ee2a4f03ff6e15102b7731258681913e551a5 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 30 Jul 2017 03:57:19 +0200 Subject: tcp: reindent two spots after prequeue removal These two branches are now always true, remove the conditional. objdiff shows no changes. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 50 +++++++++++++++++++++++--------------------------- 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 770ce6cb3eca..87efde9f5a90 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4611,16 +4611,14 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) goto out_of_window; /* Ok. In sequence. In window. */ - if (eaten <= 0) { queue_and_out: - if (eaten < 0) { - if (skb_queue_len(&sk->sk_receive_queue) == 0) - sk_forced_mem_schedule(sk, skb->truesize); - else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) - goto drop; - } - eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); + if (eaten < 0) { + if (skb_queue_len(&sk->sk_receive_queue) == 0) + sk_forced_mem_schedule(sk, skb->truesize); + else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) + goto drop; } + eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (skb->len) tcp_event_data_recv(sk, skb); @@ -5410,30 +5408,28 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, int eaten = 0; bool fragstolen = false; - if (!eaten) { - if (tcp_checksum_complete(skb)) - goto csum_error; + if (tcp_checksum_complete(skb)) + goto csum_error; - if ((int)skb->truesize > sk->sk_forward_alloc) - goto step5; + if ((int)skb->truesize > sk->sk_forward_alloc) + goto step5; - /* Predicted packet is in window by definition. - * seq == rcv_nxt and rcv_wup <= rcv_nxt. - * Hence, check seq<=rcv_wup reduces to: - */ - if (tcp_header_len == - (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && - tp->rcv_nxt == tp->rcv_wup) - tcp_store_ts_recent(tp); + /* Predicted packet is in window by definition. + * seq == rcv_nxt and rcv_wup <= rcv_nxt. + * Hence, check seq<=rcv_wup reduces to: + */ + if (tcp_header_len == + (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && + tp->rcv_nxt == tp->rcv_wup) + tcp_store_ts_recent(tp); - tcp_rcv_rtt_measure_ts(sk, skb); + tcp_rcv_rtt_measure_ts(sk, skb); - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); - /* Bulk data transfer: receiver */ - eaten = tcp_queue_rcv(sk, skb, tcp_header_len, - &fragstolen); - } + /* Bulk data transfer: receiver */ + eaten = tcp_queue_rcv(sk, skb, tcp_header_len, + &fragstolen); tcp_event_data_recv(sk, skb); -- cgit v1.2.3-55-g7522 From b6690b14386698ce2c19309abad3f17656bdfaea Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 30 Jul 2017 03:57:20 +0200 Subject: tcp: remove low_latency sysctl Was only checked by the removed prequeue code. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- Documentation/networking/ip-sysctl.txt | 7 +------ include/net/tcp.h | 1 - net/ipv4/sysctl_net_ipv4.c | 3 +++ net/ipv4/tcp_ipv4.c | 2 -- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index f485d553e65c..84c9b8cee780 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -353,12 +353,7 @@ tcp_l3mdev_accept - BOOLEAN compiled with CONFIG_NET_L3_MASTER_DEV. tcp_low_latency - BOOLEAN - If set, the TCP stack makes decisions that prefer lower - latency as opposed to higher throughput. By default, this - option is not set meaning that higher throughput is preferred. - An example of an application where this default should be - changed would be a Beowulf compute cluster. - Default: 0 + This is a legacy option, it has no effect anymore. tcp_max_orphans - INTEGER Maximal number of TCP sockets not attached to any user file handle, diff --git a/include/net/tcp.h b/include/net/tcp.h index 93f115cfc8f8..8507c81fb0e9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -256,7 +256,6 @@ extern int sysctl_tcp_rmem[3]; extern int sysctl_tcp_app_win; extern int sysctl_tcp_adv_win_scale; extern int sysctl_tcp_frto; -extern int sysctl_tcp_low_latency; extern int sysctl_tcp_nometrics_save; extern int sysctl_tcp_moderate_rcvbuf; extern int sysctl_tcp_tso_win_divisor; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 9bf809726066..0d3c038d7b04 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -45,6 +45,9 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT; static int ip_ping_group_range_min[] = { 0, 0 }; static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; +/* obsolete */ +static int sysctl_tcp_low_latency __read_mostly; + /* Update system visible IP port range */ static void set_local_port_range(struct net *net, int range[2]) { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index a68eb4577d36..9b51663cd5a4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -85,8 +85,6 @@ #include #include -int sysctl_tcp_low_latency __read_mostly; - #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); -- cgit v1.2.3-55-g7522 From 45f119bf936b1f9f546a0b139c5b56f9bb2bdc78 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 30 Jul 2017 03:57:21 +0200 Subject: tcp: remove header prediction Like prequeue, I am not sure this is overly useful nowadays. If we receive a train of packets, GRO will aggregate them if the headers are the same (HP predates GRO by several years) so we don't get a per-packet benefit, only a per-aggregated-packet one. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/linux/tcp.h | 6 -- include/net/tcp.h | 23 ------ net/ipv4/tcp.c | 4 +- net/ipv4/tcp_input.c | 192 +++-------------------------------------------- net/ipv4/tcp_minisocks.c | 2 - net/ipv4/tcp_output.c | 2 - 6 files changed, 10 insertions(+), 219 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 32fb37cfb0d1..d7389ea36e10 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -147,12 +147,6 @@ struct tcp_sock { u16 tcp_header_len; /* Bytes of tcp header to send */ u16 gso_segs; /* Max number of segs per GSO packet */ -/* - * Header prediction flags - * 0x5?10 << 16 + snd_wnd in net byte order - */ - __be32 pred_flags; - /* * RFC793 variables by their proper names. This means you can * read the code and the spec side by side (and laugh ...) diff --git a/include/net/tcp.h b/include/net/tcp.h index 8507c81fb0e9..8f11b82b5b5a 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -631,29 +631,6 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp) return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us); } -static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) -{ - tp->pred_flags = htonl((tp->tcp_header_len << 26) | - ntohl(TCP_FLAG_ACK) | - snd_wnd); -} - -static inline void tcp_fast_path_on(struct tcp_sock *tp) -{ - __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); -} - -static inline void tcp_fast_path_check(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (RB_EMPTY_ROOT(&tp->out_of_order_queue) && - tp->rcv_wnd && - atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && - !tp->urg_data) - tcp_fast_path_on(tp); -} - /* Compute the actual rto_min value */ static inline u32 tcp_rto_min(struct sock *sk) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 62018ea6f45f..e022874d509f 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1848,10 +1848,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, tcp_rcv_space_adjust(sk); skip_copy: - if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { + if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) tp->urg_data = 0; - tcp_fast_path_check(sk); - } if (used + offset < skb->len) continue; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 87efde9f5a90..bfde9d7d210e 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -103,7 +103,6 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; #define FLAG_DATA_SACKED 0x20 /* New SACK. */ #define FLAG_ECE 0x40 /* ECE in this ACK */ #define FLAG_LOST_RETRANS 0x80 /* This ACK marks some retransmission lost */ -#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ #define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */ #define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ @@ -3367,12 +3366,6 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 if (tp->snd_wnd != nwin) { tp->snd_wnd = nwin; - /* Note, it is the only place, where - * fast path is recovered for sending TCP. - */ - tp->pred_flags = 0; - tcp_fast_path_check(sk); - if (tcp_send_head(sk)) tcp_slow_start_after_idle_check(sk); @@ -3597,19 +3590,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (flag & FLAG_UPDATE_TS_RECENT) tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { - /* Window is constant, pure forward advance. - * No more checks are required. - * Note, we use the fact that SND.UNA>=SND.WL2. - */ - tcp_update_wl(tp, ack_seq); - tcp_snd_una_update(tp, ack); - flag |= FLAG_WIN_UPDATE; - - tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); - - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); - } else { + { u32 ack_ev_flags = CA_ACK_SLOWPATH; if (ack_seq != TCP_SKB_CB(skb)->end_seq) @@ -4398,8 +4379,6 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) return; } - /* Disable header prediction. */ - tp->pred_flags = 0; inet_csk_schedule_ack(sk); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); @@ -4638,8 +4617,6 @@ queue_and_out: if (tp->rx_opt.num_sacks) tcp_sack_remove(tp); - tcp_fast_path_check(sk); - if (eaten > 0) kfree_skb_partial(skb, fragstolen); if (!sock_flag(sk, SOCK_DEAD)) @@ -4965,7 +4942,6 @@ static int tcp_prune_queue(struct sock *sk) NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED); /* Massive buffer overcommit. */ - tp->pred_flags = 0; return -1; } @@ -5137,9 +5113,6 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) tp->urg_data = TCP_URG_NOTYET; tp->urg_seq = ptr; - - /* Disable header prediction. */ - tp->pred_flags = 0; } /* This is the 'fast' part of urgent handling. */ @@ -5298,26 +5271,6 @@ discard: /* * TCP receive function for the ESTABLISHED state. - * - * It is split into a fast path and a slow path. The fast path is - * disabled when: - * - A zero window was announced from us - zero window probing - * is only handled properly in the slow path. - * - Out of order segments arrived. - * - Urgent data is expected. - * - There is no buffer space left - * - Unexpected TCP flags/window values/header lengths are received - * (detected by checking the TCP header against pred_flags) - * - Data is sent in both directions. Fast path only supports pure senders - * or pure receivers (this means either the sequence number or the ack - * value must stay constant) - * - Unexpected TCP option. - * - * When these conditions are not satisfied it drops into a standard - * receive procedure patterned after RFC793 to handle all cases. - * The first three cases are guaranteed by proper pred_flags setting, - * the rest is checked inline. Fast processing is turned on in - * tcp_data_queue when everything is OK. */ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) @@ -5328,144 +5281,19 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb, tcp_mstamp_refresh(tp); if (unlikely(!sk->sk_rx_dst)) inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); - /* - * Header prediction. - * The code loosely follows the one in the famous - * "30 instruction TCP receive" Van Jacobson mail. - * - * Van's trick is to deposit buffers into socket queue - * on a device interrupt, to call tcp_recv function - * on the receive process context and checksum and copy - * the buffer to user space. smart... - * - * Our current scheme is not silly either but we take the - * extra cost of the net_bh soft interrupt processing... - * We do checksum and copy also but from device to kernel. - */ tp->rx_opt.saw_tstamp = 0; - /* pred_flags is 0xS?10 << 16 + snd_wnd - * if header_prediction is to be made - * 'S' will always be tp->tcp_header_len >> 2 - * '?' will be 0 for the fast path, otherwise pred_flags is 0 to - * turn it off (when there are holes in the receive - * space for instance) - * PSH flag is ignored. - */ - - if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && - TCP_SKB_CB(skb)->seq == tp->rcv_nxt && - !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { - int tcp_header_len = tp->tcp_header_len; - - /* Timestamp header prediction: tcp_header_len - * is automatically equal to th->doff*4 due to pred_flags - * match. - */ - - /* Check timestamp */ - if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) { - /* No? Slow path! */ - if (!tcp_parse_aligned_timestamp(tp, th)) - goto slow_path; - - /* If PAWS failed, check it more carefully in slow path */ - if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) - goto slow_path; - - /* DO NOT update ts_recent here, if checksum fails - * and timestamp was corrupted part, it will result - * in a hung connection since we will drop all - * future packets due to the PAWS test. - */ - } - - if (len <= tcp_header_len) { - /* Bulk data transfer: sender */ - if (len == tcp_header_len) { - /* Predicted packet is in window by definition. - * seq == rcv_nxt and rcv_wup <= rcv_nxt. - * Hence, check seq<=rcv_wup reduces to: - */ - if (tcp_header_len == - (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && - tp->rcv_nxt == tp->rcv_wup) - tcp_store_ts_recent(tp); - - /* We know that such packets are checksummed - * on entry. - */ - tcp_ack(sk, skb, 0); - __kfree_skb(skb); - tcp_data_snd_check(sk); - return; - } else { /* Header too small */ - TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); - goto discard; - } - } else { - int eaten = 0; - bool fragstolen = false; - - if (tcp_checksum_complete(skb)) - goto csum_error; - - if ((int)skb->truesize > sk->sk_forward_alloc) - goto step5; - - /* Predicted packet is in window by definition. - * seq == rcv_nxt and rcv_wup <= rcv_nxt. - * Hence, check seq<=rcv_wup reduces to: - */ - if (tcp_header_len == - (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) && - tp->rcv_nxt == tp->rcv_wup) - tcp_store_ts_recent(tp); - - tcp_rcv_rtt_measure_ts(sk, skb); - - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); - - /* Bulk data transfer: receiver */ - eaten = tcp_queue_rcv(sk, skb, tcp_header_len, - &fragstolen); - - tcp_event_data_recv(sk, skb); - - if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { - /* Well, only one small jumplet in fast path... */ - tcp_ack(sk, skb, FLAG_DATA); - tcp_data_snd_check(sk); - if (!inet_csk_ack_scheduled(sk)) - goto no_ack; - } - - __tcp_ack_snd_check(sk, 0); -no_ack: - if (eaten) - kfree_skb_partial(skb, fragstolen); - sk->sk_data_ready(sk); - return; - } - } - -slow_path: if (len < (th->doff << 2) || tcp_checksum_complete(skb)) goto csum_error; if (!th->ack && !th->rst && !th->syn) goto discard; - /* - * Standard slow path. - */ - if (!tcp_validate_incoming(sk, skb, th, 1)) return; -step5: - if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) + if (tcp_ack(sk, skb, FLAG_UPDATE_TS_RECENT) < 0) goto discard; tcp_rcv_rtt_measure_ts(sk, skb); @@ -5519,11 +5347,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) if (sock_flag(sk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); - if (!tp->rx_opt.snd_wscale) - __tcp_fast_path_on(tp, tp->snd_wnd); - else - tp->pred_flags = 0; - + if (!sock_flag(sk, SOCK_DEAD)) { + sk->sk_state_change(sk); + sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); + } } static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, @@ -5652,7 +5479,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_ecn_rcv_synack(tp, th); tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); - tcp_ack(sk, skb, FLAG_SLOWPATH); + tcp_ack(sk, skb, 0); /* Ok.. it's good. Set up sequence numbers and * move to established. @@ -5888,8 +5715,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) return 0; /* step 5: check the ACK field */ - acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | - FLAG_UPDATE_TS_RECENT | + + acceptable = tcp_ack(sk, skb, FLAG_UPDATE_TS_RECENT | FLAG_NO_CHALLENGE_ACK) > 0; if (!acceptable) { @@ -5957,7 +5784,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tp->lsndtime = tcp_jiffies32; tcp_initialize_rcv_mss(sk); - tcp_fast_path_on(tp); break; case TCP_FIN_WAIT1: { diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 188a6f31356d..1537b87c657f 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -436,8 +436,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, struct tcp_sock *newtp = tcp_sk(newsk); /* Now setup tcp_sock */ - newtp->pred_flags = 0; - newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; newtp->segs_in = 1; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 886d874775df..8380464aead1 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -295,9 +295,7 @@ static u16 tcp_select_window(struct sock *sk) /* RFC1323 scaling applied */ new_win >>= tp->rx_opt.rcv_wscale; - /* If we advertise zero window, disable fast path. */ if (new_win == 0) { - tp->pred_flags = 0; if (old_win) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTOZEROWINDOWADV); -- cgit v1.2.3-55-g7522 From 573aeb0492be3d0e5be9796a0c91abde794c1e36 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 30 Jul 2017 03:57:22 +0200 Subject: tcp: remove CA_ACK_SLOWPATH re-indent tcp_ack, and remove CA_ACK_SLOWPATH; it is always set now. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/tcp.h | 5 ++--- net/ipv4/tcp_input.c | 35 ++++++++++++++++------------------- net/ipv4/tcp_westwood.c | 31 ++++--------------------------- 3 files changed, 22 insertions(+), 49 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index 8f11b82b5b5a..3ecb62811004 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -880,9 +880,8 @@ enum tcp_ca_event { /* Information about inbound ACK, passed to cong_ops->in_ack_event() */ enum tcp_ca_ack_event_flags { - CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */ - CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */ - CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */ + CA_ACK_WIN_UPDATE = (1 << 0), /* ACK updated window */ + CA_ACK_ECE = (1 << 1), /* ECE bit is set on ack */ }; /* diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index bfde9d7d210e..af0a98d54b62 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3547,6 +3547,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) u32 lost = tp->lost; int acked = 0; /* Number of packets newly acked */ int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */ + u32 ack_ev_flags = 0; sack_state.first_sackt = 0; sack_state.rate = &rs; @@ -3590,30 +3591,26 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) if (flag & FLAG_UPDATE_TS_RECENT) tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - { - u32 ack_ev_flags = CA_ACK_SLOWPATH; - - if (ack_seq != TCP_SKB_CB(skb)->end_seq) - flag |= FLAG_DATA; - else - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); + if (ack_seq != TCP_SKB_CB(skb)->end_seq) + flag |= FLAG_DATA; + else + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS); - flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); + flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); - if (TCP_SKB_CB(skb)->sacked) - flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, - &sack_state); + if (TCP_SKB_CB(skb)->sacked) + flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, + &sack_state); - if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { - flag |= FLAG_ECE; - ack_ev_flags |= CA_ACK_ECE; - } + if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { + flag |= FLAG_ECE; + ack_ev_flags = CA_ACK_ECE; + } - if (flag & FLAG_WIN_UPDATE) - ack_ev_flags |= CA_ACK_WIN_UPDATE; + if (flag & FLAG_WIN_UPDATE) + ack_ev_flags |= CA_ACK_WIN_UPDATE; - tcp_in_ack_event(sk, ack_ev_flags); - } + tcp_in_ack_event(sk, ack_ev_flags); /* We passed data and got it acked, remove any soft error * log. Something worked... diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index bec9cafbe3f9..e5de84310949 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c @@ -153,24 +153,6 @@ static inline void update_rtt_min(struct westwood *w) w->rtt_min = min(w->rtt, w->rtt_min); } -/* - * @westwood_fast_bw - * It is called when we are in fast path. In particular it is called when - * header prediction is successful. In such case in fact update is - * straight forward and doesn't need any particular care. - */ -static inline void westwood_fast_bw(struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - struct westwood *w = inet_csk_ca(sk); - - westwood_update_window(sk); - - w->bk += tp->snd_una - w->snd_una; - w->snd_una = tp->snd_una; - update_rtt_min(w); -} - /* * @westwood_acked_count * This function evaluates cumul_ack for evaluating bk in case of @@ -223,17 +205,12 @@ static u32 tcp_westwood_bw_rttmin(const struct sock *sk) static void tcp_westwood_ack(struct sock *sk, u32 ack_flags) { - if (ack_flags & CA_ACK_SLOWPATH) { - struct westwood *w = inet_csk_ca(sk); - - westwood_update_window(sk); - w->bk += westwood_acked_count(sk); + struct westwood *w = inet_csk_ca(sk); - update_rtt_min(w); - return; - } + westwood_update_window(sk); + w->bk += westwood_acked_count(sk); - westwood_fast_bw(sk); + update_rtt_min(w); } static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) -- cgit v1.2.3-55-g7522 From 3282e65558b3651e230ee985c174c35cb2fedaf1 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 30 Jul 2017 03:57:23 +0200 Subject: tcp: remove unused mib counters was used by tcp prequeue and header prediction. TCPFORWARDRETRANS use was removed in january. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/uapi/linux/snmp.h | 9 --------- net/ipv4/proc.c | 9 --------- 2 files changed, 18 deletions(-) diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index d85693295798..b3f346fb9fe3 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h @@ -184,14 +184,7 @@ enum LINUX_MIB_DELAYEDACKLOST, /* DelayedACKLost */ LINUX_MIB_LISTENOVERFLOWS, /* ListenOverflows */ LINUX_MIB_LISTENDROPS, /* ListenDrops */ - LINUX_MIB_TCPPREQUEUED, /* TCPPrequeued */ - LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, /* TCPDirectCopyFromBacklog */ - LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, /* TCPDirectCopyFromPrequeue */ - LINUX_MIB_TCPPREQUEUEDROPPED, /* TCPPrequeueDropped */ - LINUX_MIB_TCPHPHITS, /* TCPHPHits */ - LINUX_MIB_TCPHPHITSTOUSER, /* TCPHPHitsToUser */ LINUX_MIB_TCPPUREACKS, /* TCPPureAcks */ - LINUX_MIB_TCPHPACKS, /* TCPHPAcks */ LINUX_MIB_TCPRENORECOVERY, /* TCPRenoRecovery */ LINUX_MIB_TCPSACKRECOVERY, /* TCPSackRecovery */ LINUX_MIB_TCPSACKRENEGING, /* TCPSACKReneging */ @@ -208,14 +201,12 @@ enum LINUX_MIB_TCPSACKFAILURES, /* TCPSackFailures */ LINUX_MIB_TCPLOSSFAILURES, /* TCPLossFailures */ LINUX_MIB_TCPFASTRETRANS, /* TCPFastRetrans */ - LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */ LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */ LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */ LINUX_MIB_TCPLOSSPROBES, /* TCPLossProbes */ LINUX_MIB_TCPLOSSPROBERECOVERY, /* TCPLossProbeRecovery */ LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */ LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */ - LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */ LINUX_MIB_TCPRCVCOLLAPSED, /* TCPRcvCollapsed */ LINUX_MIB_TCPDSACKOLDSENT, /* TCPDSACKOldSent */ LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */ diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 43eb6567b3a0..b6d3fe03feb3 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -206,14 +206,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("DelayedACKLost", LINUX_MIB_DELAYEDACKLOST), SNMP_MIB_ITEM("ListenOverflows", LINUX_MIB_LISTENOVERFLOWS), SNMP_MIB_ITEM("ListenDrops", LINUX_MIB_LISTENDROPS), - SNMP_MIB_ITEM("TCPPrequeued", LINUX_MIB_TCPPREQUEUED), - SNMP_MIB_ITEM("TCPDirectCopyFromBacklog", LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG), - SNMP_MIB_ITEM("TCPDirectCopyFromPrequeue", LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE), - SNMP_MIB_ITEM("TCPPrequeueDropped", LINUX_MIB_TCPPREQUEUEDROPPED), - SNMP_MIB_ITEM("TCPHPHits", LINUX_MIB_TCPHPHITS), - SNMP_MIB_ITEM("TCPHPHitsToUser", LINUX_MIB_TCPHPHITSTOUSER), SNMP_MIB_ITEM("TCPPureAcks", LINUX_MIB_TCPPUREACKS), - SNMP_MIB_ITEM("TCPHPAcks", LINUX_MIB_TCPHPACKS), SNMP_MIB_ITEM("TCPRenoRecovery", LINUX_MIB_TCPRENORECOVERY), SNMP_MIB_ITEM("TCPSackRecovery", LINUX_MIB_TCPSACKRECOVERY), SNMP_MIB_ITEM("TCPSACKReneging", LINUX_MIB_TCPSACKRENEGING), @@ -230,14 +223,12 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES), SNMP_MIB_ITEM("TCPLossFailures", LINUX_MIB_TCPLOSSFAILURES), SNMP_MIB_ITEM("TCPFastRetrans", LINUX_MIB_TCPFASTRETRANS), - SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS), SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS), SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS), SNMP_MIB_ITEM("TCPLossProbes", LINUX_MIB_TCPLOSSPROBES), SNMP_MIB_ITEM("TCPLossProbeRecovery", LINUX_MIB_TCPLOSSPROBERECOVERY), SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL), SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL), - SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED), SNMP_MIB_ITEM("TCPRcvCollapsed", LINUX_MIB_TCPRCVCOLLAPSED), SNMP_MIB_ITEM("TCPDSACKOldSent", LINUX_MIB_TCPDSACKOLDSENT), SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT), -- cgit v1.2.3-55-g7522 From 69a60b0579a4bf63871dfcfaca44a4e20c7d05f8 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 12:04:22 -0700 Subject: net: phy: mdio-bcm-unimac: factor busy polling loop Factor the code that does the busy polling on the MDIO_BUSY bit since we will have different code-paths for for completion depending on whether we are using interrupts or polling. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-bcm-unimac.c | 43 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 34395230ce70..226fdccfa1a8 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -57,10 +57,26 @@ static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY; } +static int unimac_mdio_poll(struct unimac_mdio_priv *priv) +{ + unsigned int timeout = 1000; + + do { + if (!unimac_mdio_busy(priv)) + return 0; + + usleep_range(1000, 2000); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + + return 0; +} + static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct unimac_mdio_priv *priv = bus->priv; - unsigned int timeout = 1000; u32 cmd; /* Prepare the read operation */ @@ -70,15 +86,9 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) /* Start MDIO transaction */ unimac_mdio_start(priv); - do { - if (!unimac_mdio_busy(priv)) - break; - - usleep_range(1000, 2000); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; + ret = unimac_mdio_poll(priv); + if (ret) + return ret; cmd = __raw_readl(priv->base + MDIO_CMD); @@ -97,7 +107,6 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct unimac_mdio_priv *priv = bus->priv; - unsigned int timeout = 1000; u32 cmd; /* Prepare the write operation */ @@ -107,17 +116,7 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, unimac_mdio_start(priv); - do { - if (!unimac_mdio_busy(priv)) - break; - - usleep_range(1000, 2000); - } while (timeout--); - - if (!timeout) - return -ETIMEDOUT; - - return 0; + return unimac_mdio_poll(priv); } /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with -- cgit v1.2.3-55-g7522 From d782f7c935123ad99582af15c862bc7b0331d205 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 12:04:23 -0700 Subject: net: phy: mdio-bcm-unimac: create unique bus names In preparation for having multiple GENET instances in a system (up to 3), make sure that we do include the bus instance number in the name of the MDIO bus such that we change it from "unimac-mdio" to "unimac-mdio-0" for instance. So far, the only user of this driver is using Device Tree, which uses a lookup/parenting based technique to map PHY devices to their respective MDIO bus controllers, hence causing no additional changes. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-bcm-unimac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 226fdccfa1a8..97cac10bd0d5 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -197,7 +197,7 @@ static int unimac_mdio_probe(struct platform_device *pdev) bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; bus->reset = unimac_mdio_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); ret = of_mdiobus_register(bus, np); if (ret) { -- cgit v1.2.3-55-g7522 From e23597f7524ba11ddecc6bb3d9b31e5285fd3b45 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 12:04:24 -0700 Subject: net: phy: mdio-bcm-unimac: Add debug print for PHY workaround In order to be stricly identical to what bcmgenet does, add a debug print when a PHY workaround during bus->reset() is executed. Preliminary change to moving bcmgenet towards mdio-bcm-unimac. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/mdio-bcm-unimac.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 97cac10bd0d5..4e52692f9eea 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -154,8 +154,10 @@ static int unimac_mdio_reset(struct mii_bus *bus) } for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (read_mask & 1 << addr) + if (read_mask & 1 << addr) { + dev_dbg(&bus->dev, "Workaround for PHY @ %d\n", addr); mdiobus_read(bus, addr, MII_BMSR); + } } return 0; -- cgit v1.2.3-55-g7522 From f248aff86d1fd6e60b656c2af278f1723c3b84c2 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 12:04:25 -0700 Subject: net: phy: mdio-bcm-unimac: Allow specifying platform data In preparation for having the bcmgenet driver migrate over the mdio-bcm-unimac driver, add a platform data structure which allows passing integrating specific details like bus name, wait function to complete MDIO operations and PHY mask. We also define what the platform device name contract is by defining UNIMAC_MDIO_DRV_NAME and moving it to the platform_data header. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- MAINTAINERS | 1 + drivers/net/phy/mdio-bcm-unimac.c | 28 +++++++++++++++++++++------ include/linux/platform_data/mdio-bcm-unimac.h | 13 +++++++++++++ 3 files changed, 36 insertions(+), 6 deletions(-) create mode 100644 include/linux/platform_data/mdio-bcm-unimac.h diff --git a/MAINTAINERS b/MAINTAINERS index 297e610c9163..7d72fdbed6e6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5140,6 +5140,7 @@ L: netdev@vger.kernel.org S: Maintained F: include/linux/phy.h F: include/linux/phy_fixed.h +F: include/linux/platform_data/mdio-bcm-unimac.h F: drivers/net/phy/ F: Documentation/networking/phy.txt F: drivers/of/of_mdio.c diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 4e52692f9eea..89425ca48412 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -21,6 +21,8 @@ #include #include +#include + #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) #define MDIO_READ_FAIL (1 << 28) @@ -41,6 +43,8 @@ struct unimac_mdio_priv { struct mii_bus *mii_bus; void __iomem *base; + int (*wait_func) (void *wait_func_data); + void *wait_func_data; }; static inline void unimac_mdio_start(struct unimac_mdio_priv *priv) @@ -57,8 +61,9 @@ static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) return __raw_readl(priv->base + MDIO_CMD) & MDIO_START_BUSY; } -static int unimac_mdio_poll(struct unimac_mdio_priv *priv) +static int unimac_mdio_poll(void *wait_func_data) { + struct unimac_mdio_priv *priv = wait_func_data; unsigned int timeout = 1000; do { @@ -77,6 +82,7 @@ static int unimac_mdio_poll(struct unimac_mdio_priv *priv) static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct unimac_mdio_priv *priv = bus->priv; + int ret; u32 cmd; /* Prepare the read operation */ @@ -86,7 +92,7 @@ static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) /* Start MDIO transaction */ unimac_mdio_start(priv); - ret = unimac_mdio_poll(priv); + ret = priv->wait_func(priv->wait_func_data); if (ret) return ret; @@ -116,7 +122,7 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, unimac_mdio_start(priv); - return unimac_mdio_poll(priv); + return priv->wait_func(priv->wait_func_data); } /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with @@ -165,6 +171,7 @@ static int unimac_mdio_reset(struct mii_bus *bus) static int unimac_mdio_probe(struct platform_device *pdev) { + struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; struct unimac_mdio_priv *priv; struct device_node *np; struct mii_bus *bus; @@ -194,7 +201,16 @@ static int unimac_mdio_probe(struct platform_device *pdev) bus = priv->mii_bus; bus->priv = priv; - bus->name = "unimac MII bus"; + if (pdata) { + bus->name = pdata->bus_name; + priv->wait_func = pdata->wait_func; + priv->wait_func_data = pdata->wait_func_data; + bus->phy_mask = ~pdata->phy_mask; + } else { + bus->name = "unimac MII bus"; + priv->wait_func_data = priv; + priv->wait_func = unimac_mdio_poll; + } bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; @@ -241,7 +257,7 @@ MODULE_DEVICE_TABLE(of, unimac_mdio_ids); static struct platform_driver unimac_mdio_driver = { .driver = { - .name = "unimac-mdio", + .name = UNIMAC_MDIO_DRV_NAME, .of_match_table = unimac_mdio_ids, }, .probe = unimac_mdio_probe, @@ -252,4 +268,4 @@ module_platform_driver(unimac_mdio_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:unimac-mdio"); +MODULE_ALIAS("platform:" UNIMAC_MDIO_DRV_NAME); diff --git a/include/linux/platform_data/mdio-bcm-unimac.h b/include/linux/platform_data/mdio-bcm-unimac.h new file mode 100644 index 000000000000..8a5f9f0b2c52 --- /dev/null +++ b/include/linux/platform_data/mdio-bcm-unimac.h @@ -0,0 +1,13 @@ +#ifndef __MDIO_BCM_UNIMAC_PDATA_H +#define __MDIO_BCM_UNIMAC_PDATA_H + +struct unimac_mdio_pdata { + u32 phy_mask; + int (*wait_func)(void *data); + void *wait_func_data; + const char *bus_name; +}; + +#define UNIMAC_MDIO_DRV_NAME "unimac-mdio" + +#endif /* __MDIO_BCM_UNIMAC_PDATA_H */ -- cgit v1.2.3-55-g7522 From 9a4e79697009ddd0d1af52053c830f6e60e1c771 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 12:04:26 -0700 Subject: net: bcmgenet: utilize generic Broadcom UniMAC MDIO controller driver Update the GENET driver to register an UniMAC MDIO bus controller for the GENET internal MDIO bus, update the platform data code to attach the PHY to the correct MDIO bus controller. The Device Tree portion of the code is mostly left unmodified since the lookup/binding is done via phandles and Device Tree nodes which are much more flexible in locating and binding PHYs to their respective MDIO bus controllers. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 1 + drivers/net/ethernet/broadcom/genet/bcmgenet.h | 1 + drivers/net/ethernet/broadcom/genet/bcmmii.c | 148 +++++++++++++++++++------ 3 files changed, 116 insertions(+), 34 deletions(-) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 285f8bc25682..ec7a798c6bd1 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -65,6 +65,7 @@ config BCMGENET select PHYLIB select FIXED_PHY select BCM7XXX_PHY + select MDIO_BCM_UNIMAC help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b9344de669f8..4775999ee016 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -657,6 +657,7 @@ struct bcmgenet_priv { struct clk *clk; struct platform_device *pdev; + struct platform_device *mii_pdev; /* WOL */ struct clk *clk_wol; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 071fcbd14e6a..368d5eab306b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "bcmgenet.h" @@ -464,31 +465,120 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) return 0; } -static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; - struct phy_device *phydev = NULL; char *compat; - int phy_mode; - int ret; compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version); if (!compat) - return -ENOMEM; + return NULL; priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); kfree(compat); if (!priv->mdio_dn) { dev_err(kdev, "unable to find MDIO bus node\n"); - return -ENODEV; + return NULL; } - ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; + return priv->mdio_dn; +} + +static void bcmgenet_mii_pdata_init(struct bcmgenet_priv *priv, + struct unimac_mdio_pdata *ppd) +{ + struct device *kdev = &priv->pdev->dev; + struct bcmgenet_platform_data *pd = kdev->platform_data; + + if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + /* + * Internal or external PHY with MDIO access + */ + if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) + ppd->phy_mask = 1 << pd->phy_address; + else + ppd->phy_mask = 0; } +} + +static int bcmgenet_mii_wait(void *wait_func_data) +{ + struct bcmgenet_priv *priv = wait_func_data; + + wait_event_timeout(priv->wq, + !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) + & MDIO_START_BUSY), + HZ / 100); + return 0; +} + +static int bcmgenet_mii_register(struct bcmgenet_priv *priv) +{ + struct platform_device *pdev = priv->pdev; + struct bcmgenet_platform_data *pdata = pdev->dev.platform_data; + struct device_node *dn = pdev->dev.of_node; + struct unimac_mdio_pdata ppd; + struct platform_device *ppdev; + struct resource *pres, res; + int id, ret; + + pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + memset(&res, 0, sizeof(res)); + memset(&ppd, 0, sizeof(ppd)); + + ppd.wait_func = bcmgenet_mii_wait; + ppd.wait_func_data = priv; + ppd.bus_name = "bcmgenet MII bus"; + + /* Unimac MDIO bus controller starts at UniMAC offset + MDIO_CMD + * and is 2 * 32-bits word long, 8 bytes total. + */ + res.start = pres->start + GENET_UMAC_OFF + UMAC_MDIO_CMD; + res.end = res.start + 8; + res.flags = IORESOURCE_MEM; + + if (dn) + id = of_alias_get_id(dn, "eth"); + else + id = pdev->id; + + ppdev = platform_device_alloc(UNIMAC_MDIO_DRV_NAME, id); + if (!ppdev) + return -ENOMEM; + + /* Retain this platform_device pointer for later cleanup */ + priv->mii_pdev = ppdev; + ppdev->dev.parent = &pdev->dev; + ppdev->dev.of_node = bcmgenet_mii_of_find_mdio(priv); + if (pdata) + bcmgenet_mii_pdata_init(priv, &ppd); + + ret = platform_device_add_resources(ppdev, &res, 1); + if (ret) + goto out; + + ret = platform_device_add_data(ppdev, &ppd, sizeof(ppd)); + if (ret) + goto out; + + ret = platform_device_add(ppdev); + if (ret) + goto out; + + return 0; +out: + platform_device_put(ppdev); + return ret; +} + +static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct device *kdev = &priv->pdev->dev; + struct phy_device *phydev; + int phy_mode; + int ret; /* Fetch the PHY phandle */ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0); @@ -536,33 +626,23 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) { struct device *kdev = &priv->pdev->dev; struct bcmgenet_platform_data *pd = kdev->platform_data; - struct mii_bus *mdio = priv->mii_bus; + char phy_name[MII_BUS_ID_SIZE + 3]; + char mdio_bus_id[MII_BUS_ID_SIZE]; struct phy_device *phydev; - int ret; + + snprintf(mdio_bus_id, MII_BUS_ID_SIZE, "%s-%d", + UNIMAC_MDIO_DRV_NAME, priv->pdev->id); if (pd->phy_interface != PHY_INTERFACE_MODE_MOCA && pd->mdio_enabled) { + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, + mdio_bus_id, pd->phy_address); + /* * Internal or external PHY with MDIO access */ - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - mdio->phy_mask = ~(1 << pd->phy_address); - else - mdio->phy_mask = 0; - - ret = mdiobus_register(mdio); - if (ret) { - dev_err(kdev, "failed to register MDIO bus\n"); - return ret; - } - - if (pd->phy_address >= 0 && pd->phy_address < PHY_MAX_ADDR) - phydev = mdiobus_get_phy(mdio, pd->phy_address); - else - phydev = phy_find_first(mdio); - + phydev = phy_attach(priv->dev, phy_name, pd->phy_interface); if (!phydev) { dev_err(kdev, "failed to register PHY device\n"); - mdiobus_unregister(mdio); return -ENODEV; } } else { @@ -611,7 +691,7 @@ int bcmgenet_mii_init(struct net_device *dev) struct device_node *dn = priv->pdev->dev.of_node; int ret; - ret = bcmgenet_mii_alloc(priv); + ret = bcmgenet_mii_register(priv); if (ret) return ret; @@ -625,8 +705,8 @@ out: if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + platform_device_unregister(priv->mii_pdev); + platform_device_put(priv->mii_pdev); return ret; } @@ -638,6 +718,6 @@ void bcmgenet_mii_exit(struct net_device *dev) if (of_phy_is_fixed_link(dn)) of_phy_deregister_fixed_link(dn); of_node_put(priv->phy_dn); - mdiobus_unregister(priv->mii_bus); - mdiobus_free(priv->mii_bus); + platform_device_unregister(priv->mii_pdev); + platform_device_put(priv->mii_pdev); } -- cgit v1.2.3-55-g7522 From 2b13c3ae098142b9a7da0b6d989081f0b563b0dd Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 12:04:27 -0700 Subject: net: bcmgenet: Drop legacy MDIO code Now that we have fully migrated to the mdio-bcm-unimac driver, drop the legacy MDIO bus code which did duplicate a fair amount of code. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmmii.c | 125 --------------------------- 1 file changed, 125 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 368d5eab306b..7fdc352628f9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -28,59 +28,6 @@ #include "bcmgenet.h" -/* read a value from the MII */ -static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location) -{ - int ret; - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD); - /* Start MDIO transaction*/ - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) - & MDIO_START_BUSY), - HZ / 100); - ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - - /* Some broken devices are known not to release the line during - * turn-around, e.g: Broadcom BCM53125 external switches, so check for - * that condition here and ignore the MDIO controller read failure - * indication. - */ - if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (ret & MDIO_READ_FAIL)) - return -EIO; - - return ret & 0xffff; -} - -/* write a value to the MII */ -static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id, - int location, u16 val) -{ - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - u32 reg; - - bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | - (location << MDIO_REG_SHIFT) | (0xffff & val)), - UMAC_MDIO_CMD); - reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD); - reg |= MDIO_START_BUSY; - bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD); - wait_event_timeout(priv->wq, - !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) & - MDIO_START_BUSY), - HZ / 100); - - return 0; -} - /* setup netdev link state when PHY link status change and * update UMAC and RGMII block when link up */ @@ -393,78 +340,6 @@ int bcmgenet_mii_probe(struct net_device *dev) return 0; } -/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with - * their internal MDIO management controller making them fail to successfully - * be read from or written to for the first transaction. We insert a dummy - * BMSR read here to make sure that phy_get_device() and get_phy_id() can - * correctly read the PHY MII_PHYSID1/2 registers and successfully register a - * PHY device for this peripheral. - * - * Once the PHY driver is registered, we can workaround subsequent reads from - * there (e.g: during system-wide power management). - * - * bus->reset is invoked before mdiobus_scan during mdiobus_register and is - * therefore the right location to stick that workaround. Since we do not want - * to read from non-existing PHYs, we either use bus->phy_mask or do a manual - * Device Tree scan to limit the search area. - */ -static int bcmgenet_mii_bus_reset(struct mii_bus *bus) -{ - struct net_device *dev = bus->priv; - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *np = priv->mdio_dn; - struct device_node *child = NULL; - u32 read_mask = 0; - int addr = 0; - - if (!np) { - read_mask = 1 << priv->phy_addr; - } else { - for_each_available_child_of_node(np, child) { - addr = of_mdio_parse_addr(&dev->dev, child); - if (addr < 0) - continue; - - read_mask |= 1 << addr; - } - } - - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - if (read_mask & 1 << addr) { - dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr); - mdiobus_read(bus, addr, MII_BMSR); - } - } - - return 0; -} - -static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) -{ - struct mii_bus *bus; - - if (priv->mii_bus) - return 0; - - priv->mii_bus = mdiobus_alloc(); - if (!priv->mii_bus) { - pr_err("failed to allocate\n"); - return -ENOMEM; - } - - bus = priv->mii_bus; - bus->priv = priv->dev; - bus->name = "bcmgenet MII bus"; - bus->parent = &priv->pdev->dev; - bus->read = bcmgenet_mii_read; - bus->write = bcmgenet_mii_write; - bus->reset = bcmgenet_mii_bus_reset; - snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", - priv->pdev->name, priv->pdev->id); - - return 0; -} - static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; -- cgit v1.2.3-55-g7522 From 6f24b85e265a29f570f83c1c3694444ca0e07a56 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 12:04:28 -0700 Subject: net: bcmgenet: Utilize bcmgenet_mii_exit() for error path bcmgenet_mii_init() has an error path which is strictly identical to the unwinding that bcmgenet_mii_exit() does, so have bcmgenet_mii_init() utilize bcmgenet_mii_exit() for that. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmmii.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 7fdc352628f9..33d3f60ac74b 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -563,7 +563,6 @@ static int bcmgenet_mii_bus_init(struct bcmgenet_priv *priv) int bcmgenet_mii_init(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct device_node *dn = priv->pdev->dev.of_node; int ret; ret = bcmgenet_mii_register(priv); @@ -577,11 +576,7 @@ int bcmgenet_mii_init(struct net_device *dev) return 0; out: - if (of_phy_is_fixed_link(dn)) - of_phy_deregister_fixed_link(dn); - of_node_put(priv->phy_dn); - platform_device_unregister(priv->mii_pdev); - platform_device_put(priv->mii_pdev); + bcmgenet_mii_exit(dev); return ret; } -- cgit v1.2.3-55-g7522 From 4bb51bd64f65acfb6bb63fea69fec8cbb39a33ad Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:23 +0200 Subject: mlxsw: spectrum_acl: Fix a typo Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 01a1501b56ca..508b5fcacd77 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -369,7 +369,7 @@ int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp, local_port = mlxsw_sp_port->local_port; in_port = false; } else { - /* If out_dev is NULL, the called wants to + /* If out_dev is NULL, the caller wants to * set forward to ingress port. */ local_port = 0; -- cgit v1.2.3-55-g7522 From 806a1c1ab1b9bba06e690b306f4db5fab0ebf3dd Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:24 +0200 Subject: mlxsw: reg.h: Fix a typo Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index c6c508941d23..b4ea8cbc1f7d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4718,7 +4718,7 @@ MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8); /* reg_ralue_dip* * The prefix of the route or of the marker that the object of the LPM * is compared with. The most significant bits of the dip are the prefix. - * The list significant bits must be '0' if the prefix_len is smaller + * The least significant bits must be '0' if the prefix_len is smaller * than 128 for IPv6 or smaller than 32 for IPv4. * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved. * Access: Index -- cgit v1.2.3-55-g7522 From 8de3c17819ef5b3ee1c6e75f7ad98e90ff421063 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:25 +0200 Subject: mlxsw: spectrum_router: Fix a typo Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 8bf076d22fb6..b6df3c3b5290 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1804,7 +1804,7 @@ static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp, return 0; /* Take a reference of neigh here ensuring that neigh would - * not be detructed before the nexthop entry is finished. + * not be destructed before the nexthop entry is finished. * The reference is taken either in neigh_lookup() or * in neigh_create() in case n is not found. */ -- cgit v1.2.3-55-g7522 From 78676ad4fb4e4f6345385e09a315707d7c355a52 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:26 +0200 Subject: mlxsw: Update specification of reg_ritr_type The comments really belong to the individual enumerators. The comment at the register should instead reference the enum. Signed-off-by: Petr Machata Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index b4ea8cbc1f7d..a671438562fd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -3992,16 +3992,16 @@ MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1); MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1); enum mlxsw_reg_ritr_if_type { + /* VLAN interface. */ MLXSW_REG_RITR_VLAN_IF, + /* FID interface. */ MLXSW_REG_RITR_FID_IF, + /* Sub-port interface. */ MLXSW_REG_RITR_SP_IF, }; /* reg_ritr_type - * Router interface type. - * 0 - VLAN interface. - * 1 - FID interface. - * 2 - Sub-port interface. + * Router interface type as per enum mlxsw_reg_ritr_if_type. * Access: RW */ MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3); -- cgit v1.2.3-55-g7522 From 83930cd76abc81c23e355a7df8b1c0646a55df43 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:27 +0200 Subject: mlxsw: reg.h: Namespace IP2ME registers This renames IP2ME-specific registers reg_ralue_v and reg_ralue_tunnel_ptr to reg_ralue_ip2me_*. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index a671438562fd..7e8ba546c3a4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4813,7 +4813,7 @@ MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13); */ MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); -/* reg_ralue_v +/* reg_ralue_ip2me_v * Valid bit for the tunnel_ptr field. * If valid = 0 then trap to CPU as IP2ME trap ID. * If valid = 1 and the packet format allows NVE or IPinIP tunnel @@ -4823,15 +4823,15 @@ MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16); * Only relevant in case of IP2ME action. * Access: RW */ -MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1); +MLXSW_ITEM32(reg, ralue, ip2me_v, 0x24, 31, 1); -/* reg_ralue_tunnel_ptr +/* reg_ralue_ip2me_tunnel_ptr * Tunnel Pointer for NVE or IPinIP tunnel decapsulation. * For Spectrum, pointer to KVD Linear. * Only relevant in case of IP2ME action. * Access: RW */ -MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24); +MLXSW_ITEM32(reg, ralue, ip2me_tunnel_ptr, 0x24, 0, 24); static inline void mlxsw_reg_ralue_pack(char *payload, enum mlxsw_reg_ralxx_protocol protocol, -- cgit v1.2.3-55-g7522 From f1b1f273aec0478c17ead69c5f285edd495d3d58 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:28 +0200 Subject: mlxsw: spectrum_router: Simplify a piece of code Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index b6df3c3b5290..600268cf4fa1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3124,9 +3124,7 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev, switch (event) { case NETDEV_UP: - if (!rif) - return true; - return false; + return rif == NULL; case NETDEV_DOWN: idev = __in_dev_get_rtnl(dev); if (idev && idev->ifa_list) -- cgit v1.2.3-55-g7522 From 56b8a9ed276bc1771deb4efd5f409749ababc961 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:29 +0200 Subject: mlxsw: spectrum_router: Clarify a piece of code Prefer logical operator that expresses the intent to bitwise one that happens to give the same result. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 600268cf4fa1..c69efbd955c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1668,7 +1668,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; - if (nh->should_offload ^ nh->offloaded) { + if (nh->should_offload != nh->offloaded) { offload_change = true; if (nh->should_offload) nh->update = 1; -- cgit v1.2.3-55-g7522 From 213666a3563f71b4a168b1fdc8c64115218e7758 Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Mon, 31 Jul 2017 09:27:30 +0200 Subject: mlxsw: spectrum_router: Simplify a piece of code Express the same logic more succinctly. Signed-off-by: Petr Machata Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index c69efbd955c5..802f5b8d8761 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1752,9 +1752,9 @@ set_trap: static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh, bool removing) { - if (!removing && !nh->should_offload) + if (!removing) nh->should_offload = 1; - else if (removing && nh->offloaded) + else if (nh->offloaded) nh->should_offload = 0; nh->update = 1; } -- cgit v1.2.3-55-g7522 From 6a95befc8d0346d6cb3b4646c761e8b42e66a4df Mon Sep 17 00:00:00 2001 From: Marc Gonzalez Date: Fri, 28 Jul 2017 13:18:30 +0200 Subject: net: phy: Log only PHY state transitions In the current code, old and new PHY states are always logged. >From now on, log only PHY state transitions. Signed-off-by: Marc Gonzalez Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index ac1dcf0289fa..3aedf415908b 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1225,9 +1225,10 @@ void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - phydev_dbg(phydev, "PHY state change %s -> %s\n", - phy_state_to_str(old_state), - phy_state_to_str(phydev->state)); + if (old_state != phydev->state) + phydev_dbg(phydev, "PHY state change %s -> %s\n", + phy_state_to_str(old_state), + phy_state_to_str(phydev->state)); /* Only re-schedule a PHY state machine change if we are polling the * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving -- cgit v1.2.3-55-g7522 From 0263598c774250f72b275f7f44f93dfd85b88f2b Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 28 Jul 2017 10:28:20 -0700 Subject: tcp: extract the function to compute delivery rate Refactor the code to extract the function to compute delivery rate. This function will be used in later commit. Signed-off-by: Wei Wang Acked-by: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e022874d509f..acee7acdcba6 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -388,6 +388,19 @@ static int retrans_to_secs(u8 retrans, int timeout, int rto_max) return period; } +static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp) +{ + u32 rate = READ_ONCE(tp->rate_delivered); + u32 intv = READ_ONCE(tp->rate_interval_us); + u64 rate64 = 0; + + if (rate && intv) { + rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; + do_div(rate64, intv); + } + return rate64; +} + /* Address-family independent initialization for a tcp_sock. * * NOTE: A lot of things set to zero explicitly by call to @@ -2716,7 +2729,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) { const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ const struct inet_connection_sock *icsk = inet_csk(sk); - u32 now, intv; + u32 now; u64 rate64; bool slow; u32 rate; @@ -2815,13 +2828,9 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_data_segs_out = tp->data_segs_out; info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0; - rate = READ_ONCE(tp->rate_delivered); - intv = READ_ONCE(tp->rate_interval_us); - if (rate && intv) { - rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC; - do_div(rate64, intv); + rate64 = tcp_compute_delivery_rate(tp); + if (rate64) info->tcpi_delivery_rate = rate64; - } unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); -- cgit v1.2.3-55-g7522 From bb7c19f96012720b895111300b9d9f3f858c3a69 Mon Sep 17 00:00:00 2001 From: Wei Wang Date: Fri, 28 Jul 2017 10:28:21 -0700 Subject: tcp: add related fields into SCM_TIMESTAMPING_OPT_STATS Add the following stats into SCM_TIMESTAMPING_OPT_STATS control msg: TCP_NLA_PACING_RATE TCP_NLA_DELIVERY_RATE TCP_NLA_SND_CWND TCP_NLA_REORDERING TCP_NLA_MIN_RTT TCP_NLA_RECUR_RETRANS TCP_NLA_DELIVERY_RATE_APP_LMT Signed-off-by: Wei Wang Acked-by: Yuchung Cheng Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/uapi/linux/tcp.h | 8 ++++++++ net/ipv4/tcp.c | 20 +++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index a5507c977497..030e594bab45 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -231,6 +231,14 @@ enum { TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */ TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */ TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */ + TCP_NLA_PACING_RATE, /* Pacing rate in bytes per second */ + TCP_NLA_DELIVERY_RATE, /* Delivery rate in bytes per second */ + TCP_NLA_SND_CWND, /* Sending congestion window */ + TCP_NLA_REORDERING, /* Reordering metric */ + TCP_NLA_MIN_RTT, /* minimum RTT */ + TCP_NLA_RECUR_RETRANS, /* Recurring retransmits for the current pkt */ + TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */ + }; /* for TCP_MD5SIG socket option */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index acee7acdcba6..5326b50a3450 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2840,8 +2840,12 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *stats; struct tcp_info info; + u64 rate64; + u32 rate; - stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC); + stats = alloc_skb(7 * nla_total_size_64bit(sizeof(u64)) + + 3 * nla_total_size(sizeof(u32)) + + 2 * nla_total_size(sizeof(u8)), GFP_ATOMIC); if (!stats) return NULL; @@ -2856,6 +2860,20 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk) tp->data_segs_out, TCP_NLA_PAD); nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS, tp->total_retrans, TCP_NLA_PAD); + + rate = READ_ONCE(sk->sk_pacing_rate); + rate64 = rate != ~0U ? rate : ~0ULL; + nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD); + + rate64 = tcp_compute_delivery_rate(tp); + nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); + + nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd); + nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); + nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); + + nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits); + nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited); return stats; } -- cgit v1.2.3-55-g7522 From 5af74bb4fcf8935cff38757400c25ae10eed4fd6 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 31 Jul 2017 17:53:07 -0700 Subject: net: bcmgenet: Add dependency on HAS_IOMEM && OF The driver needs CONFIG_HAS_IOMEM and OF to be functional, but we still let it build with COMPILE_TEST. This fixes the unmet dependency after selecting MDIO_BCM_UNIMAC in commit mentioned below: warning: (NET_DSA_BCM_SF2 && BCMGENET) selects MDIO_BCM_UNIMAC which has unmet direct dependencies (NETDEVICES && MDIO_DEVICE && HAS_IOMEM && OF_MDIO) Fixes: 9a4e79697009 ("net: bcmgenet: utilize generic Broadcom UniMAC MDIO controller driver") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index ec7a798c6bd1..45775399cab6 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,6 +61,7 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" + depends on (OF && HAS_IOMEM) || COMPILE_TEST select MII select PHYLIB select FIXED_PHY -- cgit v1.2.3-55-g7522 From 8cf8b87b73993980d703d71bd84bd8ab7c2898e4 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:41:44 +0200 Subject: net: phy: marvell: tabification Convert spaces to tabs where appropriate, and fix up some otherwise odd indentation. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 5d314f143aea..6a5256ceb11e 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -108,24 +108,24 @@ #define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) /* Copper Specific Interrupt Enable Register */ -#define MII_88E1318S_PHY_CSIER 0x12 +#define MII_88E1318S_PHY_CSIER 0x12 /* WOL Event Interrupt Enable */ -#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7) +#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7) /* LED Timer Control Register */ -#define MII_88E1318S_PHY_LED_TCR 0x12 -#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15) -#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7) -#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11) +#define MII_88E1318S_PHY_LED_TCR 0x12 +#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15) +#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7) +#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11) /* Magic Packet MAC address registers */ -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17 -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18 -#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18 +#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19 -#define MII_88E1318S_PHY_WOL_CTRL 0x10 -#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) -#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) +#define MII_88E1318S_PHY_WOL_CTRL 0x10 +#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) +#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) #define MII_88E1121_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_DEF 0x0030 @@ -152,7 +152,7 @@ #define LPA_FIBER_1000HALF 0x40 #define LPA_FIBER_1000FULL 0x20 -#define LPA_PAUSE_FIBER 0x180 +#define LPA_PAUSE_FIBER 0x180 #define LPA_PAUSE_ASYM_FIBER 0x100 #define ADVERTISE_FIBER_1000HALF 0x40 @@ -596,7 +596,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) if (changed == 0) { /* Advertisement hasn't changed, but maybe aneg was never on to - * begin with? Or maybe phy was isolated? + * begin with? Or maybe phy was isolated? */ int ctl = phy_read(phydev, MII_BMCR); @@ -1515,7 +1515,7 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data) } #ifndef UINT64_MAX -#define UINT64_MAX (u64)(~((u64)0)) +#define UINT64_MAX (u64)(~((u64)0)) #endif static u64 marvell_get_stat(struct phy_device *phydev, int i) { -- cgit v1.2.3-55-g7522 From 3438634456c40985bc12eafbd7aacb648ac2d454 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:41:45 +0200 Subject: net: phy: marvell: Use core genphy_soft_reset() Rather than using an open coded equivalent, use the core genphy_soft_reset() function. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 47 ++++++++++++----------------------------------- 1 file changed, 12 insertions(+), 35 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 6a5256ceb11e..33a52532fac6 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -292,17 +292,11 @@ static int marvell_config_aneg(struct phy_device *phydev) return err; if (phydev->autoneg != AUTONEG_ENABLE) { - int bmcr; - /* A write to speed/duplex bits (that is performed by * genphy_config_aneg() call above) must be followed by * a software reset. Otherwise, the write has no effect. */ - bmcr = phy_read(phydev, MII_BMCR); - if (bmcr < 0) - return bmcr; - - err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; } @@ -318,8 +312,7 @@ static int m88e1101_config_aneg(struct phy_device *phydev) * that certain registers get written in order * to restart autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); - + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -354,7 +347,7 @@ static int m88e1111_config_aneg(struct phy_device *phydev) * that certain registers get written in order * to restart autonegotiation */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) @@ -370,17 +363,11 @@ static int m88e1111_config_aneg(struct phy_device *phydev) return err; if (phydev->autoneg != AUTONEG_ENABLE) { - int bmcr; - /* A write to speed/duplex bits (that is performed by * genphy_config_aneg() call above) must be followed by * a software reset. Otherwise, the write has no effect. */ - bmcr = phy_read(phydev, MII_BMCR); - if (bmcr < 0) - return bmcr; - - err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; } @@ -493,7 +480,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) marvell_set_page(phydev, oldpage); - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -656,9 +643,7 @@ static int m88e1116r_config_init(struct phy_device *phydev) int temp; int err; - temp = phy_read(phydev, MII_BMCR); - temp |= BMCR_RESET; - err = phy_write(phydev, MII_BMCR, temp); + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -689,14 +674,10 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, MII_BMCR); - temp |= BMCR_RESET; - err = phy_write(phydev, MII_BMCR, temp); + err = genphy_soft_reset(phydev); if (err < 0) return err; - mdelay(500); - return marvell_config_init(phydev); } @@ -804,14 +785,10 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev) return err; /* soft reset */ - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; - do - temp = phy_read(phydev, MII_BMCR); - while (temp & BMCR_RESET); - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); if (temp < 0) return temp; @@ -850,7 +827,7 @@ static int m88e1111_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1121_config_init(struct phy_device *phydev) @@ -912,7 +889,7 @@ static int m88e1118_config_aneg(struct phy_device *phydev) { int err; - err = phy_write(phydev, MII_BMCR, BMCR_RESET); + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -961,7 +938,7 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1149_config_init(struct phy_device *phydev) @@ -987,7 +964,7 @@ static int m88e1149_config_init(struct phy_device *phydev) if (err < 0) return err; - return phy_write(phydev, MII_BMCR, BMCR_RESET); + return genphy_soft_reset(phydev); } static int m88e1145_config_init_rgmii(struct phy_device *phydev) -- cgit v1.2.3-55-g7522 From 61111598b08bcdc84688de81c59605d56b197fbf Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:41:46 +0200 Subject: net: phy: marvell: consolidate RGMII delay code The same code is repeated for different PHY versions. Put it into a help and call when needed. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 54 +++++++++++++++++++---------------------------- 1 file changed, 22 insertions(+), 32 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 33a52532fac6..c1b724ab5f25 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -61,13 +61,6 @@ #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 #define MII_M1145_PHY_EXT_SR 0x1b -#define MII_M1145_PHY_EXT_CR 0x14 -#define MII_M1145_RGMII_RX_DELAY 0x0080 -#define MII_M1145_RGMII_TX_DELAY 0x0002 -#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 -#define MII_M1145_HWCFG_MODE_MASK 0xf -#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 - #define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 #define MII_M1145_HWCFG_MODE_MASK 0xf #define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 @@ -76,8 +69,8 @@ #define MII_M1111_PHY_LED_DIRECT 0x4100 #define MII_M1111_PHY_LED_COMBINE 0x411c #define MII_M1111_PHY_EXT_CR 0x14 -#define MII_M1111_RX_DELAY 0x80 -#define MII_M1111_TX_DELAY 0x2 +#define MII_M1111_RGMII_RX_DELAY BIT(7) +#define MII_M1111_RGMII_TX_DELAY BIT(1) #define MII_M1111_PHY_EXT_SR 0x1b #define MII_M1111_HWCFG_MODE_MASK 0xf @@ -700,9 +693,8 @@ static int m88e3016_config_init(struct phy_device *phydev) return marvell_config_init(phydev); } -static int m88e1111_config_init_rgmii(struct phy_device *phydev) +static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev) { - int err; int temp; temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); @@ -710,16 +702,24 @@ static int m88e1111_config_init_rgmii(struct phy_device *phydev) return temp; if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) { - temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); + temp |= (MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY); } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) { - temp &= ~MII_M1111_TX_DELAY; - temp |= MII_M1111_RX_DELAY; + temp &= ~MII_M1111_RGMII_TX_DELAY; + temp |= MII_M1111_RGMII_RX_DELAY; } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) { - temp &= ~MII_M1111_RX_DELAY; - temp |= MII_M1111_TX_DELAY; + temp &= ~MII_M1111_RGMII_RX_DELAY; + temp |= MII_M1111_RGMII_TX_DELAY; } - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); + return phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); +} + +static int m88e1111_config_init_rgmii(struct phy_device *phydev) +{ + int temp; + int err; + + err = m88e1111_config_init_rgmii_delays(phydev); if (err < 0) return err; @@ -760,16 +760,11 @@ static int m88e1111_config_init_sgmii(struct phy_device *phydev) static int m88e1111_config_init_rtbi(struct phy_device *phydev) { - int err; int temp; + int err; - temp = phy_read(phydev, MII_M1111_PHY_EXT_CR); - if (temp < 0) - return temp; - - temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY); - err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp); - if (err < 0) + err = m88e1111_config_init_rgmii_delays(phydev); + if (err) return err; temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); @@ -969,15 +964,10 @@ static int m88e1149_config_init(struct phy_device *phydev) static int m88e1145_config_init_rgmii(struct phy_device *phydev) { + int temp; int err; - int temp = phy_read(phydev, MII_M1145_PHY_EXT_CR); - - if (temp < 0) - return temp; - - temp |= (MII_M1145_RGMII_RX_DELAY | MII_M1145_RGMII_TX_DELAY); - err = phy_write(phydev, MII_M1145_PHY_EXT_CR, temp); + err = m88e1111_config_init_rgmii_delays(phydev); if (err < 0) return err; -- cgit v1.2.3-55-g7522 From 865b813aa273cd411066d05c47154d5d1490eac7 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:41:47 +0200 Subject: net: phy: marvell: Consolidate setting the phy-mode The same code is repeated a few times. Refactor into a helped. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 88 +++++++++++++++++++++-------------------------- 1 file changed, 40 insertions(+), 48 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index c1b724ab5f25..275647ebaa81 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -60,11 +60,6 @@ #define MII_M1011_PHY_SCR_MDI_X 0x0020 #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 -#define MII_M1145_PHY_EXT_SR 0x1b -#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4 -#define MII_M1145_HWCFG_MODE_MASK 0xf -#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000 - #define MII_M1111_PHY_LED_CONTROL 0x18 #define MII_M1111_PHY_LED_DIRECT 0x4100 #define MII_M1111_PHY_LED_COMBINE 0x411c @@ -74,12 +69,13 @@ #define MII_M1111_PHY_EXT_SR 0x1b #define MII_M1111_HWCFG_MODE_MASK 0xf -#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb #define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3 #define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4 +#define MII_M1111_HWCFG_MODE_RTBI 0x7 #define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9 -#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000 -#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000 +#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb +#define MII_M1111_HWCFG_FIBER_COPPER_RES BIT(13) +#define MII_M1111_HWCFG_FIBER_COPPER_AUTO BIT(15) #define MII_88E1121_PHY_MSCR_REG 21 #define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) @@ -693,6 +689,27 @@ static int m88e3016_config_init(struct phy_device *phydev) return marvell_config_init(phydev); } +static int m88e1111_config_init_hwcfg_mode(struct phy_device *phydev, + u16 mode, + int fibre_copper_auto) +{ + int temp; + + temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); + if (temp < 0) + return temp; + + temp &= ~(MII_M1111_HWCFG_MODE_MASK | + MII_M1111_HWCFG_FIBER_COPPER_AUTO | + MII_M1111_HWCFG_FIBER_COPPER_RES); + temp |= mode; + + if (fibre_copper_auto) + temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; + + return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); +} + static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev) { int temp; @@ -740,17 +757,11 @@ static int m88e1111_config_init_rgmii(struct phy_device *phydev) static int m88e1111_config_init_sgmii(struct phy_device *phydev) { int err; - int temp; - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK); - temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK; - temp |= MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + err = m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_SGMII_NO_CLK, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); if (err < 0) return err; @@ -760,22 +771,16 @@ static int m88e1111_config_init_sgmii(struct phy_device *phydev) static int m88e1111_config_init_rtbi(struct phy_device *phydev) { - int temp; int err; err = m88e1111_config_init_rgmii_delays(phydev); if (err) return err; - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + err = m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_RTBI, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); if (err < 0) return err; @@ -784,16 +789,10 @@ static int m88e1111_config_init_rtbi(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, MII_M1111_PHY_EXT_SR); - if (temp < 0) - return temp; - - temp &= ~(MII_M1111_HWCFG_MODE_MASK | - MII_M1111_HWCFG_FIBER_COPPER_RES); - temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | - MII_M1111_HWCFG_FIBER_COPPER_AUTO; - - return phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); + return m88e1111_config_init_hwcfg_mode( + phydev, + MII_M1111_HWCFG_MODE_RTBI, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); } static int m88e1111_config_init(struct phy_device *phydev) @@ -999,16 +998,9 @@ static int m88e1145_config_init_rgmii(struct phy_device *phydev) static int m88e1145_config_init_sgmii(struct phy_device *phydev) { - int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR); - - if (temp < 0) - return temp; - - temp &= ~MII_M1145_HWCFG_MODE_MASK; - temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK; - temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO; - - return phy_write(phydev, MII_M1145_PHY_EXT_SR, temp); + return m88e1111_config_init_hwcfg_mode( + phydev, MII_M1111_HWCFG_MODE_SGMII_NO_CLK, + MII_M1111_HWCFG_FIBER_COPPER_AUTO); } static int m88e1145_config_init(struct phy_device *phydev) -- cgit v1.2.3-55-g7522 From 864dc729d528560e8d204267d66dae59972f462c Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:41:48 +0200 Subject: net: phy: marvell: Refactor m88e1121 RGMII delay configuration Turns out that MII_M1116R_CONTROL_REG_MAC is the same as MII_88E1121_PHY_MSCR_REG. Refactor the code to set the RGMII delays into a shared helper. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 62 +++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 29 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 275647ebaa81..408442bdef0a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -80,7 +80,7 @@ #define MII_88E1121_PHY_MSCR_REG 21 #define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) -#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4)) +#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) || BIT(4))) #define MII_88E1121_MISC_TEST 0x1a #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 @@ -127,8 +127,6 @@ #define MII_M1011_PHY_STATUS_RESOLVED 0x0800 #define MII_M1011_PHY_STATUS_LINK 0x0400 -#define MII_M1116R_CONTROL_REG_MAC 21 - #define MII_88E3016_PHY_SPEC_CTRL 0x10 #define MII_88E3016_DISABLE_SCRAMBLER 0x0200 #define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 @@ -442,7 +440,7 @@ static int marvell_of_reg_init(struct phy_device *phydev) } #endif /* CONFIG_OF_MDIO */ -static int m88e1121_config_aneg(struct phy_device *phydev) +static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev) { int err, oldpage, mscr; @@ -450,25 +448,40 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (oldpage < 0) return oldpage; - if (phy_interface_is_rgmii(phydev)) { - mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) & - MII_88E1121_PHY_MSCR_DELAY_MASK; - - if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) - mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | - MII_88E1121_PHY_MSCR_TX_DELAY); - else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) - mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; - else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) - mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; - - err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); - if (err < 0) - return err; + mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG); + if (mscr < 0) { + err = mscr; + goto out; } + mscr &= MII_88E1121_PHY_MSCR_DELAY_MASK; + + if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) + mscr |= (MII_88E1121_PHY_MSCR_RX_DELAY | + MII_88E1121_PHY_MSCR_TX_DELAY); + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + mscr |= MII_88E1121_PHY_MSCR_RX_DELAY; + else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + mscr |= MII_88E1121_PHY_MSCR_TX_DELAY; + + err = phy_write(phydev, MII_88E1121_PHY_MSCR_REG, mscr); + +out: marvell_set_page(phydev, oldpage); + return err; +} + +static int m88e1121_config_aneg(struct phy_device *phydev) +{ + int err = 0; + + if (phy_interface_is_rgmii(phydev)) { + err = m88e1121_config_aneg_rgmii_delays(phydev); + if (err) + return err; + } + err = genphy_soft_reset(phydev); if (err < 0) return err; @@ -650,16 +663,7 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; - err = marvell_set_page(phydev, MII_MARVELL_MSCR_PAGE); - if (err < 0) - return err; - temp = phy_read(phydev, MII_M1116R_CONTROL_REG_MAC); - temp |= (1 << 5); - temp |= (1 << 4); - err = phy_write(phydev, MII_M1116R_CONTROL_REG_MAC, temp); - if (err < 0) - return err; - err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); + err = m88e1121_config_aneg_rgmii_delays(phydev); if (err < 0) return err; -- cgit v1.2.3-55-g7522 From fecd5e910ebed27fa68c34bf7ecc300a12950897 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:41:49 +0200 Subject: net: phy: marvell: Use the set_polarity helper Some of the init functions unilaterally enable set auto cross over without using the helper. Make use of the helper, and respect the phydev MDI configuration. Clean up the #define used while setting polarity, and the other functions of the bits in the register. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 408442bdef0a..34fd15b904e7 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -55,10 +55,12 @@ #define MII_M1011_IMASK_INIT 0x6400 #define MII_M1011_IMASK_CLEAR 0x0000 -#define MII_M1011_PHY_SCR 0x10 -#define MII_M1011_PHY_SCR_MDI 0x0000 -#define MII_M1011_PHY_SCR_MDI_X 0x0020 -#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 +#define MII_M1011_PHY_SCR 0x10 +#define MII_M1011_PHY_SCR_DOWNSHIFT_EN BIT(11) +#define MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT 12 +#define MII_M1011_PHY_SCR_MDI (0x0 << 5) +#define MII_M1011_PHY_SCR_MDI_X (0x1 << 5) +#define MII_M1011_PHY_SCR_AUTO_CROSS (0x3 << 5) #define MII_M1111_PHY_LED_CONTROL 0x18 #define MII_M1111_PHY_LED_DIRECT 0x4100 @@ -486,8 +488,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; @@ -655,10 +656,13 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); + if (err < 0) + return err; + temp = phy_read(phydev, MII_M1011_PHY_SCR); - temp |= (7 << 12); /* max number of gigabit attempts */ - temp |= (1 << 11); /* enable downshift */ - temp |= MII_M1011_PHY_SCR_AUTO_CROSS; + temp |= (7 << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT); + temp |= MII_M1011_PHY_SCR_DOWNSHIFT_EN; err = phy_write(phydev, MII_M1011_PHY_SCR, temp); if (err < 0) return err; @@ -891,8 +895,7 @@ static int m88e1118_config_aneg(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_M1011_PHY_SCR, - MII_M1011_PHY_SCR_AUTO_CROSS); + err = marvell_set_polarity(phydev, phydev->mdix_ctrl); if (err < 0) return err; -- cgit v1.2.3-55-g7522 From 6ef05eb73c8f623aec11449c9c763f19fb8a8fb1 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sun, 30 Jul 2017 22:41:50 +0200 Subject: net: phy: marvell: Refactor setting downshift into a helper The 1116r has code to set downshift. Refactor this into a helper, so in future other marvell PHYs can use it. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 34fd15b904e7..361fe9927ef2 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -58,6 +58,7 @@ #define MII_M1011_PHY_SCR 0x10 #define MII_M1011_PHY_SCR_DOWNSHIFT_EN BIT(11) #define MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT 12 +#define MII_M1011_PHY_SRC_DOWNSHIFT_MASK 0x7800 #define MII_M1011_PHY_SCR_MDI (0x0 << 5) #define MII_M1011_PHY_SCR_MDI_X (0x1 << 5) #define MII_M1011_PHY_SCR_AUTO_CROSS (0x3 << 5) @@ -263,6 +264,23 @@ static int marvell_set_polarity(struct phy_device *phydev, int polarity) return 0; } +static int marvell_set_downshift(struct phy_device *phydev, bool enable, + u8 retries) +{ + int reg; + + reg = phy_read(phydev, MII_M1011_PHY_SCR); + if (reg < 0) + return reg; + + reg &= MII_M1011_PHY_SRC_DOWNSHIFT_MASK; + reg |= ((retries - 1) << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT); + if (enable) + reg |= MII_M1011_PHY_SCR_DOWNSHIFT_EN; + + return phy_write(phydev, MII_M1011_PHY_SCR, reg); +} + static int marvell_config_aneg(struct phy_device *phydev) { int err; @@ -643,7 +661,6 @@ static int marvell_config_init(struct phy_device *phydev) static int m88e1116r_config_init(struct phy_device *phydev) { - int temp; int err; err = genphy_soft_reset(phydev); @@ -660,10 +677,7 @@ static int m88e1116r_config_init(struct phy_device *phydev) if (err < 0) return err; - temp = phy_read(phydev, MII_M1011_PHY_SCR); - temp |= (7 << MII_M1011_PHY_SCR_DOWNSHIFT_SHIFT); - temp |= MII_M1011_PHY_SCR_DOWNSHIFT_EN; - err = phy_write(phydev, MII_M1011_PHY_SCR, temp); + err = marvell_set_downshift(phydev, true, 8); if (err < 0) return err; -- cgit v1.2.3-55-g7522 From 1f139ed9ec40521b4497aa34e107d38bb082b0e0 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Mon, 31 Jul 2017 10:09:41 +0200 Subject: ipv6: Avoid going through ->sk_net to access the netns There is no need to go through sk->sk_net to access the net namespace and its sysctl variables because we allocate the sock and initialize sk_net just a few lines earlier in the same routine. Signed-off-by: Jakub Sitnicki Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index a88b5b5b7955..0a7c74049a0c 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -210,7 +210,7 @@ lookup_protocol: np->mcast_hops = IPV6_DEFAULT_MCASTHOPS; np->mc_loop = 1; np->pmtudisc = IPV6_PMTUDISC_WANT; - np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk)); + np->autoflowlabel = ip6_default_np_autolabel(net); sk->sk_ipv6only = net->ipv6.sysctl.bindv6only; /* Init the ipv4 part of the socket since we can have sockets -- cgit v1.2.3-55-g7522 From d829b9e230f4138fb6194e854e1bb46f737f1c3d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 31 Jul 2017 17:59:39 -0700 Subject: Bluetooth: btusb: add ID for LiteOn 04ca:3016 Contains a QCA6174A-5 chipset, with USB BT. Let's support loading firmware on it. From usb-devices: T: Bus=02 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 2.01 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=04ca ProdID=3016 Rev=00.01 C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb Signed-off-by: Brian Norris Signed-off-by: Johan Hedberg --- drivers/bluetooth/btusb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1cefff772cd0..24cc8383fdd4 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -269,6 +269,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, -- cgit v1.2.3-55-g7522 From 235acb1894321f91c771464d90c4010a9bef90bb Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 1 Jun 2017 12:10:32 +0200 Subject: iwlwifi: refactor out paging code Refactor the paging code from mvm to be used by different opmodes. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Makefile | 1 + drivers/net/wireless/intel/iwlwifi/fw/api.h | 43 +++ drivers/net/wireless/intel/iwlwifi/fw/paging.c | 417 ++++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 99 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 44 --- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 12 +- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 344 +------------------ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 12 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 2 + 9 files changed, 572 insertions(+), 402 deletions(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/paging.c create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/runtime.h diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 20bd261223af..186a5b2fb6e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -11,6 +11,7 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o +iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api.h b/drivers/net/wireless/intel/iwlwifi/fw/api.h index 0e107f916ce3..f9bcbaf87ce2 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api.h @@ -226,4 +226,47 @@ struct iwl_phy_db_cmd { u8 data[]; } __packed; +#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ + +/** + * struct iwl_fw_paging_cmd - paging layout + * + * Send to FW the paging layout in the driver. + * + * @flags: various flags for the command + * @block_size: the block size in powers of 2 + * @block_num: number of blocks specified in the command. + * @device_phy_addr: virtual addresses from device side + */ +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + +/** + * enum iwl_fw_item_id - FW item IDs + * + * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload + * download + */ +enum iwl_fw_item_id { + IWL_FW_ITEM_ID_PAGING = 3, +}; + +/** + * struct iwl_fw_get_item_cmd - get an item from the fw + * @item_id: ID of item to obtain, see &enum iwl_fw_item_id + */ +struct iwl_fw_get_item_cmd { + __le32 item_id; +} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ + +struct iwl_fw_get_item_resp { + __le32 item_id; + __le32 item_byte_cnt; + __le32 item_val; +} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ + #endif /* __iwl_fw_api_h__*/ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c new file mode 100644 index 000000000000..2f4044922be3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -0,0 +1,417 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" + +/* FIXME */ +#define FW_PAGING_BLOCK_CMD 0x4f +#define FW_GET_ITEM_CMD 0x1a + +void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt) +{ + int i; + + if (!fwrt->fw_paging_db[0].fw_paging_block) + return; + + for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { + struct iwl_fw_paging *paging = &fwrt->fw_paging_db[i]; + + if (!paging->fw_paging_block) { + IWL_DEBUG_FW(fwrt, + "Paging: block %d already freed, continue to next page\n", + i); + + continue; + } + dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys, + paging->fw_paging_size, DMA_BIDIRECTIONAL); + + __free_pages(paging->fw_paging_block, + get_order(paging->fw_paging_size)); + paging->fw_paging_block = NULL; + } + kfree(fwrt->trans->paging_download_buf); + fwrt->trans->paging_download_buf = NULL; + fwrt->trans->paging_db = NULL; + + memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db)); +} +IWL_EXPORT_SYMBOL(iwl_free_fw_paging); + +static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt, + const struct fw_img *image) +{ + struct page *block; + dma_addr_t phys = 0; + int blk_idx, order, num_of_pages, size, dma_enabled; + + if (fwrt->fw_paging_db[0].fw_paging_block) + return 0; + + dma_enabled = is_device_dma_capable(fwrt->trans->dev); + + /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ + BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); + + num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; + fwrt->num_of_paging_blk = + DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); + fwrt->num_of_pages_in_last_blk = + num_of_pages - + NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1); + + IWL_DEBUG_FW(fwrt, + "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", + fwrt->num_of_paging_blk, + fwrt->num_of_pages_in_last_blk); + + /* + * Allocate CSS and paging blocks in dram. + */ + for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { + /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ + size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; + order = get_order(size); + block = alloc_pages(GFP_KERNEL, order); + if (!block) { + /* free all the previous pages since we failed */ + iwl_free_fw_paging(fwrt); + return -ENOMEM; + } + + fwrt->fw_paging_db[blk_idx].fw_paging_block = block; + fwrt->fw_paging_db[blk_idx].fw_paging_size = size; + + if (dma_enabled) { + phys = dma_map_page(fwrt->trans->dev, block, 0, + PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(fwrt->trans->dev, phys)) { + /* + * free the previous pages and the current one + * since we failed to map_page. + */ + iwl_free_fw_paging(fwrt); + return -ENOMEM; + } + fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys; + } else { + fwrt->fw_paging_db[blk_idx].fw_paging_phys = + PAGING_ADDR_SIG | + blk_idx << BLOCK_2_EXP_SIZE; + } + + if (!blk_idx) + IWL_DEBUG_FW(fwrt, + "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", + order); + else + IWL_DEBUG_FW(fwrt, + "Paging: allocated 32K bytes (order %d) for firmware paging.\n", + order); + } + + return 0; +} + +static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt, + const struct fw_img *image) +{ + int sec_idx, idx; + u32 offset = 0; + + /* + * find where is the paging image start point: + * if CPU2 exist and it's in paging format, then the image looks like: + * CPU1 sections (2 or more) + * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 + * CPU2 sections (not paged) + * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 + * non paged to CPU2 paging sec + * CPU2 paging CSS + * CPU2 paging image (including instruction and data) + */ + for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { + if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { + sec_idx++; + break; + } + } + + /* + * If paging is enabled there should be at least 2 more sections left + * (one for CSS and one for Paging data) + */ + if (sec_idx >= image->num_sec - 1) { + IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n"); + iwl_free_fw_paging(fwrt); + return -EINVAL; + } + + /* copy the CSS block to the dram */ + IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n", + sec_idx); + + memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block), + image->sec[sec_idx].data, + fwrt->fw_paging_db[0].fw_paging_size); + dma_sync_single_for_device(fwrt->trans->dev, + fwrt->fw_paging_db[0].fw_paging_phys, + fwrt->fw_paging_db[0].fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d CSS bytes to first block\n", + fwrt->fw_paging_db[0].fw_paging_size); + + sec_idx++; + + /* + * copy the paging blocks to the dram + * loop index start from 1 since that CSS block already copied to dram + * and CSS index is 0. + * loop stop at num_of_paging_blk since that last block is not full. + */ + for (idx = 1; idx < fwrt->num_of_paging_blk; idx++) { + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), + image->sec[sec_idx].data + offset, + block->fw_paging_size); + dma_sync_single_for_device(fwrt->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d paging bytes to block %d\n", + fwrt->fw_paging_db[idx].fw_paging_size, + idx); + + offset += fwrt->fw_paging_db[idx].fw_paging_size; + } + + /* copy the last paging block */ + if (fwrt->num_of_pages_in_last_blk > 0) { + struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx]; + + memcpy(page_address(block->fw_paging_block), + image->sec[sec_idx].data + offset, + FW_PAGING_SIZE * fwrt->num_of_pages_in_last_blk); + dma_sync_single_for_device(fwrt->trans->dev, + block->fw_paging_phys, + block->fw_paging_size, + DMA_BIDIRECTIONAL); + + IWL_DEBUG_FW(fwrt, + "Paging: copied %d pages in the last block %d\n", + fwrt->num_of_pages_in_last_blk, idx); + } + + return 0; +} + +static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt, + const struct fw_img *fw) +{ + int ret; + + ret = iwl_alloc_fw_paging_mem(fwrt, fw); + if (ret) + return ret; + + return iwl_fill_paging_mem(fwrt, fw); +} + +/* send paging cmd to FW in case CPU2 has paging image */ +static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt, + const struct fw_img *fw) +{ + struct iwl_fw_paging_cmd paging_cmd = { + .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | + PAGING_CMD_IS_ENABLED | + (fwrt->num_of_pages_in_last_blk << + PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), + .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), + .block_num = cpu_to_le32(fwrt->num_of_paging_blk), + }; + struct iwl_host_cmd hcmd = { + .id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .len = { sizeof(paging_cmd), }, + .data = { &paging_cmd, }, + }; + int blk_idx; + + /* loop for for all paging blocks + CSS block */ + for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) { + dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys; + __le32 phy_addr; + + addr = addr >> PAGE_2_EXP_SIZE; + phy_addr = cpu_to_le32(addr); + paging_cmd.device_phy_addr[blk_idx] = phy_addr; + } + + return iwl_trans_send_cmd(fwrt->trans, &hcmd); +} + +/* + * Send paging item cmd to FW in case CPU2 has paging image + */ +static int iwl_trans_get_paging_item(struct iwl_fw_runtime *fwrt) +{ + int ret; + struct iwl_fw_get_item_cmd fw_get_item_cmd = { + .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), + }; + struct iwl_fw_get_item_resp *item_resp; + struct iwl_host_cmd cmd = { + .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &fw_get_item_cmd, }, + .len = { sizeof(fw_get_item_cmd), }, + }; + + ret = iwl_trans_send_cmd(fwrt->trans, &cmd); + if (ret) { + IWL_ERR(fwrt, + "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", + ret); + return ret; + } + + item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; + if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { + IWL_ERR(fwrt, + "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", + le32_to_cpu(item_resp->item_id)); + ret = -EIO; + goto exit; + } + + /* Add an extra page for headers */ + fwrt->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + + FW_PAGING_SIZE, + GFP_KERNEL); + if (!fwrt->trans->paging_download_buf) { + ret = -ENOMEM; + goto exit; + } + fwrt->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); + fwrt->trans->paging_db = fwrt->fw_paging_db; + IWL_DEBUG_FW(fwrt, + "Paging: got paging request address (paging_req_addr 0x%08x)\n", + fwrt->trans->paging_req_addr); + +exit: + iwl_free_resp(&cmd); + + return ret; +} + +int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type) +{ + const struct fw_img *fw = &fwrt->fw->img[type]; + int ret; + + if (fwrt->trans->cfg->gen2) + return 0; + + /* + * Configure and operate fw paging mechanism. + * The driver configures the paging flow only once. + * The CPU2 paging image is included in the IWL_UCODE_INIT image. + */ + if (!fw->paging_mem_size) + return 0; + + /* + * When dma is not enabled, the driver needs to copy / write + * the downloaded / uploaded page to / from the smem. + * This gets the location of the place were the pages are + * stored. + */ + if (!is_device_dma_capable(fwrt->trans->dev)) { + ret = iwl_trans_get_paging_item(fwrt); + if (ret) { + IWL_ERR(fwrt, "failed to get FW paging item\n"); + return ret; + } + } + + ret = iwl_save_fw_paging(fwrt, fw); + if (ret) { + IWL_ERR(fwrt, "failed to save the FW paging image\n"); + return ret; + } + + ret = iwl_send_paging_cmd(fwrt, fw); + if (ret) { + IWL_ERR(fwrt, "failed to send the paging cmd\n"); + iwl_free_fw_paging(fwrt); + return ret; + } + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_init_paging); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h new file mode 100644 index 000000000000..c5d564305d30 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -0,0 +1,99 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_runtime_h__ +#define __iwl_fw_runtime_h__ + +#include "iwl-config.h" +#include "iwl-trans.h" +#include "img.h" +#include "api.h" + +/** + * struct iwl_fw_runtime - runtime data for firmware + * @fw: firmware image + * @cfg: NIC configuration + * @dev: device pointer + * @fw_paging_db: paging database + * @num_of_paging_blk: number of paging blocks + * @num_of_pages_in_last_blk: number of pages in the last block + */ +struct iwl_fw_runtime { + struct iwl_trans *trans; + const struct iwl_fw *fw; + struct device *dev; + + /* Paging */ + struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; + u16 num_of_paging_blk; + u16 num_of_pages_in_last_blk; +}; + +static inline void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, + struct iwl_trans *trans, + const struct iwl_fw *fw) +{ + memset(fwrt, 0, sizeof(*fwrt)); + fwrt->trans = trans; + fwrt->fw = fw; + fwrt->dev = trans->dev; +} + +int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); +void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); + +#endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index aad265dcfaf5..e1a29cde92d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -1000,44 +1000,6 @@ struct iwl_nvm_access_cmd { u8 data[]; } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ -#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ - -/** - * struct iwl_fw_paging_cmd - paging layout - * - * (FW_PAGING_BLOCK_CMD = 0x4f) - * - * Send to FW the paging layout in the driver. - * - * @flags: various flags for the command - * @block_size: the block size in powers of 2 - * @block_num: number of blocks specified in the command. - * @device_phy_addr: virtual addresses from device side - */ -struct iwl_fw_paging_cmd { - __le32 flags; - __le32 block_size; - __le32 block_num; - __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; -} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ - -/* - * Fw items ID's - * - * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload - * download - */ -enum iwl_fw_item_id { - IWL_FW_ITEM_ID_PAGING = 3, -}; - -/* - * struct iwl_fw_get_item_cmd - get an item from the fw - */ -struct iwl_fw_get_item_cmd { - __le32 item_id; -} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ - #define CONT_REC_COMMAND_SIZE 80 #define ENABLE_CONT_RECORDING 0x15 #define DISABLE_CONT_RECORDING 0x16 @@ -1058,12 +1020,6 @@ struct iwl_continuous_record_cmd { sizeof(struct iwl_continuous_record_mode)]; } __packed; -struct iwl_fw_get_item_resp { - __le32 item_id; - __le32 item_byte_cnt; - __le32 item_val; -} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ - /** * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD * @offset: offset in bytes into the section diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 1602b360353c..ba32753fda5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -694,8 +694,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* Make room for fw's virtual image pages, if it exists */ if (!mvm->trans->cfg->gen2 && mvm->fw->img[mvm->cur_ucode].paging_mem_size && - mvm->fw_paging_db[0].fw_paging_block) - file_len += mvm->num_of_paging_blk * + mvm->fwrt.fw_paging_db[0].fw_paging_block) + file_len += mvm->fwrt.num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + PAGING_BLOCK_SIZE); @@ -833,12 +833,12 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* Dump fw's virtual image */ if (!mvm->trans->cfg->gen2 && mvm->fw->img[mvm->cur_ucode].paging_mem_size && - mvm->fw_paging_db[0].fw_paging_block) { - for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { + mvm->fwrt.fw_paging_db[0].fw_paging_block) { + for (i = 1; i < mvm->fwrt.num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = - mvm->fw_paging_db[i].fw_paging_block; - dma_addr_t addr = mvm->fw_paging_db[i].fw_paging_phys; + mvm->fwrt.fw_paging_db[i].fw_paging_block; + dma_addr_t addr = mvm->fwrt.fw_paging_db[i].fw_paging_phys; dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); dump_data->len = cpu_to_le32(sizeof(*paging) + diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 79e7a7a285dc..47715eec22e6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -144,134 +144,6 @@ static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) return ret; } -void iwl_free_fw_paging(struct iwl_mvm *mvm) -{ - int i; - - if (!mvm->fw_paging_db[0].fw_paging_block) - return; - - for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) { - struct iwl_fw_paging *paging = &mvm->fw_paging_db[i]; - - if (!paging->fw_paging_block) { - IWL_DEBUG_FW(mvm, - "Paging: block %d already freed, continue to next page\n", - i); - - continue; - } - dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys, - paging->fw_paging_size, DMA_BIDIRECTIONAL); - - __free_pages(paging->fw_paging_block, - get_order(paging->fw_paging_size)); - paging->fw_paging_block = NULL; - } - kfree(mvm->trans->paging_download_buf); - mvm->trans->paging_download_buf = NULL; - mvm->trans->paging_db = NULL; - - memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); -} - -static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image) -{ - int sec_idx, idx; - u32 offset = 0; - - /* - * find where is the paging image start point: - * if CPU2 exist and it's in paging format, then the image looks like: - * CPU1 sections (2 or more) - * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2 - * CPU2 sections (not paged) - * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2 - * non paged to CPU2 paging sec - * CPU2 paging CSS - * CPU2 paging image (including instruction and data) - */ - for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) { - if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) { - sec_idx++; - break; - } - } - - /* - * If paging is enabled there should be at least 2 more sections left - * (one for CSS and one for Paging data) - */ - if (sec_idx >= image->num_sec - 1) { - IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n"); - iwl_free_fw_paging(mvm); - return -EINVAL; - } - - /* copy the CSS block to the dram */ - IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n", - sec_idx); - - memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block), - image->sec[sec_idx].data, - mvm->fw_paging_db[0].fw_paging_size); - dma_sync_single_for_device(mvm->trans->dev, - mvm->fw_paging_db[0].fw_paging_phys, - mvm->fw_paging_db[0].fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d CSS bytes to first block\n", - mvm->fw_paging_db[0].fw_paging_size); - - sec_idx++; - - /* - * copy the paging blocks to the dram - * loop index start from 1 since that CSS block already copied to dram - * and CSS index is 0. - * loop stop at num_of_paging_blk since that last block is not full. - */ - for (idx = 1; idx < mvm->num_of_paging_blk; idx++) { - struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; - - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - block->fw_paging_size); - dma_sync_single_for_device(mvm->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - - IWL_DEBUG_FW(mvm, - "Paging: copied %d paging bytes to block %d\n", - mvm->fw_paging_db[idx].fw_paging_size, - idx); - - offset += mvm->fw_paging_db[idx].fw_paging_size; - } - - /* copy the last paging block */ - if (mvm->num_of_pages_in_last_blk > 0) { - struct iwl_fw_paging *block = &mvm->fw_paging_db[idx]; - - memcpy(page_address(block->fw_paging_block), - image->sec[sec_idx].data + offset, - FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk); - dma_sync_single_for_device(mvm->trans->dev, - block->fw_paging_phys, - block->fw_paging_size, - DMA_BIDIRECTIONAL); - - IWL_DEBUG_FW(mvm, - "Paging: copied %d pages in the last block %d\n", - mvm->num_of_pages_in_last_blk, idx); - } - - return 0; -} - void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { @@ -293,178 +165,6 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm, le32_to_cpu(dump_data[i])); } -static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm, - const struct fw_img *image) -{ - struct page *block; - dma_addr_t phys = 0; - int blk_idx, order, num_of_pages, size, dma_enabled; - - if (mvm->fw_paging_db[0].fw_paging_block) - return 0; - - dma_enabled = is_device_dma_capable(mvm->trans->dev); - - /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */ - BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE); - - num_of_pages = image->paging_mem_size / FW_PAGING_SIZE; - mvm->num_of_paging_blk = - DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP); - mvm->num_of_pages_in_last_blk = - num_of_pages - - NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1); - - IWL_DEBUG_FW(mvm, - "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n", - mvm->num_of_paging_blk, - mvm->num_of_pages_in_last_blk); - - /* - * Allocate CSS and paging blocks in dram. - */ - for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */ - size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE; - order = get_order(size); - block = alloc_pages(GFP_KERNEL, order); - if (!block) { - /* free all the previous pages since we failed */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - - mvm->fw_paging_db[blk_idx].fw_paging_block = block; - mvm->fw_paging_db[blk_idx].fw_paging_size = size; - - if (dma_enabled) { - phys = dma_map_page(mvm->trans->dev, block, 0, - PAGE_SIZE << order, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(mvm->trans->dev, phys)) { - /* - * free the previous pages and the current one - * since we failed to map_page. - */ - iwl_free_fw_paging(mvm); - return -ENOMEM; - } - mvm->fw_paging_db[blk_idx].fw_paging_phys = phys; - } else { - mvm->fw_paging_db[blk_idx].fw_paging_phys = - PAGING_ADDR_SIG | - blk_idx << BLOCK_2_EXP_SIZE; - } - - if (!blk_idx) - IWL_DEBUG_FW(mvm, - "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n", - order); - else - IWL_DEBUG_FW(mvm, - "Paging: allocated 32K bytes (order %d) for firmware paging.\n", - order); - } - - return 0; -} - -static int iwl_save_fw_paging(struct iwl_mvm *mvm, - const struct fw_img *fw) -{ - int ret; - - ret = iwl_alloc_fw_paging_mem(mvm, fw); - if (ret) - return ret; - - return iwl_fill_paging_mem(mvm, fw); -} - -/* send paging cmd to FW in case CPU2 has paging image */ -static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw) -{ - struct iwl_fw_paging_cmd paging_cmd = { - .flags = cpu_to_le32(PAGING_CMD_IS_SECURED | - PAGING_CMD_IS_ENABLED | - (mvm->num_of_pages_in_last_blk << - PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)), - .block_size = cpu_to_le32(BLOCK_2_EXP_SIZE), - .block_num = cpu_to_le32(mvm->num_of_paging_blk), - }; - int blk_idx; - - /* loop for for all paging blocks + CSS block */ - for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) { - dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys; - __le32 phy_addr; - - addr = addr >> PAGE_2_EXP_SIZE; - phy_addr = cpu_to_le32(addr); - paging_cmd.device_phy_addr[blk_idx] = phy_addr; - } - - return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD, - IWL_ALWAYS_LONG_GROUP, 0), - 0, sizeof(paging_cmd), &paging_cmd); -} - -/* - * Send paging item cmd to FW in case CPU2 has paging image - */ -static int iwl_trans_get_paging_item(struct iwl_mvm *mvm) -{ - int ret; - struct iwl_fw_get_item_cmd fw_get_item_cmd = { - .item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING), - }; - - struct iwl_fw_get_item_resp *item_resp; - struct iwl_host_cmd cmd = { - .id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0), - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &fw_get_item_cmd, }, - }; - - cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd); - - ret = iwl_mvm_send_cmd(mvm, &cmd); - if (ret) { - IWL_ERR(mvm, - "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n", - ret); - return ret; - } - - item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data; - if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) { - IWL_ERR(mvm, - "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n", - le32_to_cpu(item_resp->item_id)); - ret = -EIO; - goto exit; - } - - /* Add an extra page for headers */ - mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE + - FW_PAGING_SIZE, - GFP_KERNEL); - if (!mvm->trans->paging_download_buf) { - ret = -ENOMEM; - goto exit; - } - mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val); - mvm->trans->paging_db = mvm->fw_paging_db; - IWL_DEBUG_FW(mvm, - "Paging: got paging request address (paging_req_addr 0x%08x)\n", - mvm->trans->paging_req_addr); - -exit: - iwl_free_resp(&cmd); - - return ret; -} - static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, struct iwl_rx_packet *pkt, void *data) { @@ -544,48 +244,6 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait, return false; } -static int iwl_mvm_init_paging(struct iwl_mvm *mvm) -{ - const struct fw_img *fw = &mvm->fw->img[mvm->cur_ucode]; - int ret; - - /* - * Configure and operate fw paging mechanism. - * The driver configures the paging flow only once. - * The CPU2 paging image is included in the IWL_UCODE_INIT image. - */ - if (!fw->paging_mem_size) - return 0; - - /* - * When dma is not enabled, the driver needs to copy / write - * the downloaded / uploaded page to / from the smem. - * This gets the location of the place were the pages are - * stored. - */ - if (!is_device_dma_capable(mvm->trans->dev)) { - ret = iwl_trans_get_paging_item(mvm); - if (ret) { - IWL_ERR(mvm, "failed to get FW paging item\n"); - return ret; - } - } - - ret = iwl_save_fw_paging(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to save the FW paging image\n"); - return ret; - } - - ret = iwl_send_paging_cmd(mvm, fw); - if (ret) { - IWL_ERR(mvm, "failed to send the paging cmd\n"); - iwl_free_fw_paging(mvm); - return ret; - } - - return 0; -} static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type) { @@ -1495,7 +1153,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) return ret; - return iwl_mvm_init_paging(mvm); + return iwl_init_paging(&mvm->fwrt, mvm->cur_ucode); } int iwl_mvm_up(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index eaacfaf37206..f8fe15cd80c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -87,6 +87,7 @@ #include "fw-api.h" #include "constants.h" #include "tof.h" +#include "fw/runtime.h" #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ @@ -815,10 +816,7 @@ struct iwl_mvm { /* NVM sections */ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS]; - /* Paging section */ - struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; - u16 num_of_paging_blk; - u16 num_of_pages_in_last_blk; + struct iwl_fw_runtime fwrt; /* EEPROM MAC addresses */ struct mac_address addresses[IWL_MVM_MAX_ADDRESSES]; @@ -1571,9 +1569,6 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); -/* Paging */ -void iwl_free_fw_paging(struct iwl_mvm *mvm); - /* MVM debugfs */ #ifdef CONFIG_IWLWIFI_DEBUGFS int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); @@ -1798,8 +1793,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { - if (!iwl_mvm_has_new_tx_api(mvm)) - iwl_free_fw_paging(mvm); + iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); mvm->fw_dbg_conf = FW_DBG_INVALID; iwl_trans_stop_device(mvm->trans); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4d1188b8736a..8c49ac903c8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -580,6 +580,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw = fw; mvm->hw = hw; + iwl_fw_runtime_init(&mvm->fwrt, trans, fw); + mvm->init_status = 0; if (iwl_mvm_has_new_rx_api(mvm)) { -- cgit v1.2.3-55-g7522 From d0b813fcdc22d091f8c9eee3a1484dca162667c8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 1 Jun 2017 14:44:40 +0200 Subject: iwlwifi: refactor shared mem parsing Refactor the shared memory command parsing into common code. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/fw/api.h | 87 +++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 19 +++ drivers/net/wireless/intel/iwlwifi/fw/smem.c | 156 ++++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 87 ------------- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 18 +-- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 91 +------------- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 14 --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 3 +- 9 files changed, 275 insertions(+), 202 deletions(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/smem.c diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 186a5b2fb6e3..ddc2bfb501c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -11,7 +11,7 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o -iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o +iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api.h b/drivers/net/wireless/intel/iwlwifi/fw/api.h index f9bcbaf87ce2..afd333e57790 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api.h @@ -269,4 +269,91 @@ struct iwl_fw_get_item_resp { __le32 item_val; } __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ +#define TX_FIFO_MAX_NUM_9000 8 +#define TX_FIFO_MAX_NUM 15 +#define RX_FIFO_MAX_NUM 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 + +/** + * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information + * + * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not + * accessible) + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to + * 0x0 as accessible only via DBGM RDAT) + * @sample_buff_size: internal sample buff size + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre + * 8000 HW set to 0x0 as not accessible) + * @txfifo_size: size of TXF0 ... TXF7 + * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @rxfifo_addr: Start address of rxFifo + * @internal_txfifo_addr: start address of internalFifo + * @internal_txfifo_size: internal fifos' size + * + * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG + * set, the last 3 members don't exist. + */ +struct iwl_shared_mem_cfg_v2 { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; + __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ + +/** + * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration + * + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) + * @txfifo_size: size of TX FIFOs + * @rxfifo1_addr: RXF1 addr + * @rxfifo1_size: RXF1 size + */ +struct iwl_shared_mem_lmac_cfg { + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_addr; + __le32 rxfifo1_size; + +} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ + +/** + * struct iwl_shared_mem_cfg - Shared memory configuration information + * + * @shared_mem_addr: shared memory address + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr + * @sample_buff_size: internal sample buff size + * @rxfifo2_addr: start addr of RXF2 + * @rxfifo2_size: size of RXF2 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @lmac_num: number of LMACs (1 or 2) + * @lmac_smem: per - LMAC smem data + */ +struct iwl_shared_mem_cfg { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 rxfifo2_addr; + __le32 rxfifo2_size; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 lmac_num; + struct iwl_shared_mem_lmac_cfg lmac_smem[2]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ + #endif /* __iwl_fw_api_h__*/ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index c5d564305d30..185902ce7e13 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -63,6 +63,19 @@ #include "img.h" #include "api.h" +#define MAX_NUM_LMAC 2 +struct iwl_fwrt_shared_mem_cfg { + int num_lmacs; + int num_txfifo_entries; + struct { + u32 txfifo_size[TX_FIFO_MAX_NUM]; + u32 rxfifo1_size; + } lmac[MAX_NUM_LMAC]; + u32 rxfifo2_size; + u32 internal_txfifo_addr; + u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +}; + /** * struct iwl_fw_runtime - runtime data for firmware * @fw: firmware image @@ -71,6 +84,7 @@ * @fw_paging_db: paging database * @num_of_paging_blk: number of paging blocks * @num_of_pages_in_last_blk: number of pages in the last block + * @smem_cfg: saved firmware SMEM configuration */ struct iwl_fw_runtime { struct iwl_trans *trans; @@ -81,6 +95,9 @@ struct iwl_fw_runtime { struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; u16 num_of_paging_blk; u16 num_of_pages_in_last_blk; + + /* memory configuration */ + struct iwl_fwrt_shared_mem_cfg smem_cfg; }; static inline void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, @@ -96,4 +113,6 @@ static inline void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); +void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); + #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c new file mode 100644 index 000000000000..053993bf00f9 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -0,0 +1,156 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" + +/* FIXME */ +#define SHARED_MEM_CFG_CMD 0x00 +#define SYSTEM_GROUP 0x2 +#define SHARED_MEM_CFG 0x25 + +static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt) +{ + struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; + int i, lmac; + int lmac_num = le32_to_cpu(mem_cfg->lmac_num); + + if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) + return; + + fwrt->smem_cfg.num_lmacs = lmac_num; + fwrt->smem_cfg.num_txfifo_entries = + ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); + fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); + + for (lmac = 0; lmac < lmac_num; lmac++) { + struct iwl_shared_mem_lmac_cfg *lmac_cfg = + &mem_cfg->lmac_smem[lmac]; + + for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) + fwrt->smem_cfg.lmac[lmac].txfifo_size[i] = + le32_to_cpu(lmac_cfg->txfifo_size[i]); + fwrt->smem_cfg.lmac[lmac].rxfifo1_size = + le32_to_cpu(lmac_cfg->rxfifo1_size); + } +} + +static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt, + struct iwl_rx_packet *pkt) +{ + struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data; + int i; + + fwrt->smem_cfg.num_lmacs = 1; + + fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); + for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) + fwrt->smem_cfg.lmac[0].txfifo_size[i] = + le32_to_cpu(mem_cfg->txfifo_size[i]); + + fwrt->smem_cfg.lmac[0].rxfifo1_size = + le32_to_cpu(mem_cfg->rxfifo_size[0]); + fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); + + /* new API has more data, from rxfifo_addr field and on */ + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) != + sizeof(mem_cfg->internal_txfifo_size)); + + for (i = 0; + i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); + i++) + fwrt->smem_cfg.internal_txfifo_size[i] = + le32_to_cpu(mem_cfg->internal_txfifo_size[i]); + } +} + +void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) +{ + struct iwl_host_cmd cmd = { + .flags = CMD_WANT_SKB, + .data = { NULL, }, + .len = { 0, }, + }; + struct iwl_rx_packet *pkt; + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) + cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); + else + cmd.id = SHARED_MEM_CFG; + + if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd))) + return; + + pkt = cmd.resp_pkt; + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) + iwl_parse_shared_mem_a000(fwrt, pkt); + else + iwl_parse_shared_mem(fwrt, pkt); + + IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n"); + + iwl_free_resp(&cmd); +} +IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index e1a29cde92d9..bfc865ad8904 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -2481,93 +2481,6 @@ struct iwl_tdls_config_res { struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; } __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ -#define TX_FIFO_MAX_NUM_9000 8 -#define TX_FIFO_MAX_NUM 15 -#define RX_FIFO_MAX_NUM 2 -#define TX_FIFO_INTERNAL_MAX_NUM 6 - -/** - * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information - * - * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not - * accessible) - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to - * 0x0 as accessible only via DBGM RDAT) - * @sample_buff_size: internal sample buff size - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre - * 8000 HW set to 0x0 as not accessible) - * @txfifo_size: size of TXF0 ... TXF7 - * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @rxfifo_addr: Start address of rxFifo - * @internal_txfifo_addr: start address of internalFifo - * @internal_txfifo_size: internal fifos' size - * - * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG - * set, the last 3 members don't exist. - */ -struct iwl_shared_mem_cfg_v2 { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; - __le32 rxfifo_size[RX_FIFO_MAX_NUM]; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 rxfifo_addr; - __le32 internal_txfifo_addr; - __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ - -/** - * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration - * - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) - * @txfifo_size: size of TX FIFOs - * @rxfifo1_addr: RXF1 addr - * @rxfifo1_size: RXF1 size - */ -struct iwl_shared_mem_lmac_cfg { - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM]; - __le32 rxfifo1_addr; - __le32 rxfifo1_size; - -} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ - -/** - * struct iwl_shared_mem_cfg - Shared memory configuration information - * - * @shared_mem_addr: shared memory address - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr - * @sample_buff_size: internal sample buff size - * @rxfifo2_addr: start addr of RXF2 - * @rxfifo2_size: size of RXF2 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @lmac_num: number of LMACs (1 or 2) - * @lmac_smem: per - LMAC smem data - */ -struct iwl_shared_mem_cfg { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 rxfifo2_addr; - __le32 rxfifo2_size; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 lmac_num; - struct iwl_shared_mem_lmac_cfg lmac_smem[2]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ - /** * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index ba32753fda5a..9d54005b6d50 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -212,7 +212,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, struct iwl_fw_error_dump_data **dump_data) { struct iwl_fw_error_dump_fifo *fifo_hdr; - struct iwl_mvm_shared_mem_cfg *cfg = &mvm->smem_cfg; + struct iwl_fwrt_shared_mem_cfg *cfg = &mvm->fwrt.smem_cfg; u32 *fifo_data; u32 fifo_len; unsigned long flags; @@ -227,12 +227,12 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size, RXF_DIFF_FROM_PREV, 1); /* Pull LMAC2 RXF1 */ - if (mvm->smem_cfg.num_lmacs > 1) + if (mvm->fwrt.smem_cfg.num_lmacs > 1) iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size, LMAC2_PRPH_OFFSET, 2); /* Pull TXF data from LMAC1 */ - for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + for (i = 0; i < mvm->fwrt.smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i], @@ -240,8 +240,8 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, } /* Pull TXF data from LMAC2 */ - if (mvm->smem_cfg.num_lmacs > 1) { - for (i = 0; i < mvm->smem_cfg.num_txfifo_entries; i++) { + if (mvm->fwrt.smem_cfg.num_lmacs > 1) { + for (i = 0; i < mvm->fwrt.smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM + LMAC2_PRPH_OFFSET, @@ -257,11 +257,11 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; - i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); + i < ARRAY_SIZE(mvm->fwrt.smem_cfg.internal_txfifo_size); i++) { fifo_hdr = (void *)(*dump_data)->data; fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->smem_cfg.internal_txfifo_size[i]; + fifo_len = mvm->fwrt.smem_cfg.internal_txfifo_size[i]; /* No need to try to read the data if the length is 0 */ if (fifo_len == 0) @@ -277,7 +277,7 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, /* Mark the number of TXF we're pulling now */ iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + - mvm->smem_cfg.num_txfifo_entries); + mvm->fwrt.smem_cfg.num_txfifo_entries); fifo_hdr->available_bytes = cpu_to_le32(iwl_trans_read_prph(mvm->trans, @@ -582,7 +582,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->smem_cfg; + struct iwl_fwrt_shared_mem_cfg *mem_cfg = &mvm->fwrt.smem_cfg; fifo_data_len = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 47715eec22e6..161b7452baa2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -568,95 +568,6 @@ out: return ret; } -static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data; - int i, lmac; - int lmac_num = le32_to_cpu(mem_cfg->lmac_num); - - if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem))) - return; - - mvm->smem_cfg.num_lmacs = lmac_num; - mvm->smem_cfg.num_txfifo_entries = - ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size); - mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size); - - for (lmac = 0; lmac < lmac_num; lmac++) { - struct iwl_shared_mem_lmac_cfg *lmac_cfg = - &mem_cfg->lmac_smem[lmac]; - - for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++) - mvm->smem_cfg.lmac[lmac].txfifo_size[i] = - le32_to_cpu(lmac_cfg->txfifo_size[i]); - mvm->smem_cfg.lmac[lmac].rxfifo1_size = - le32_to_cpu(lmac_cfg->rxfifo1_size); - } -} - -static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm, - struct iwl_rx_packet *pkt) -{ - struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data; - int i; - - mvm->smem_cfg.num_lmacs = 1; - - mvm->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size); - for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) - mvm->smem_cfg.lmac[0].txfifo_size[i] = - le32_to_cpu(mem_cfg->txfifo_size[i]); - - mvm->smem_cfg.lmac[0].rxfifo1_size = - le32_to_cpu(mem_cfg->rxfifo_size[0]); - mvm->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]); - - /* new API has more data, from rxfifo_addr field and on */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - BUILD_BUG_ON(sizeof(mvm->smem_cfg.internal_txfifo_size) != - sizeof(mem_cfg->internal_txfifo_size)); - - for (i = 0; - i < ARRAY_SIZE(mvm->smem_cfg.internal_txfifo_size); - i++) - mvm->smem_cfg.internal_txfifo_size[i] = - le32_to_cpu(mem_cfg->internal_txfifo_size[i]); - } -} - -static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm) -{ - struct iwl_host_cmd cmd = { - .flags = CMD_WANT_SKB, - .data = { NULL, }, - .len = { 0, }, - }; - struct iwl_rx_packet *pkt; - - lockdep_assert_held(&mvm->mutex); - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) - cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0); - else - cmd.id = SHARED_MEM_CFG; - - if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd))) - return; - - pkt = cmd.resp_pkt; - if (iwl_mvm_has_new_tx_api(mvm)) - iwl_mvm_parse_shared_mem_a000(mvm, pkt); - else - iwl_mvm_parse_shared_mem(mvm, pkt); - - IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n"); - - iwl_free_resp(&cmd); -} - static int iwl_mvm_config_ltr(struct iwl_mvm *mvm) { struct iwl_ltr_config_cmd cmd = { @@ -1174,7 +1085,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } - iwl_mvm_get_shared_mem_conf(mvm); + iwl_get_shared_mem_conf(&mvm->fwrt); ret = iwl_mvm_sf_update(mvm, NULL, false); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f8fe15cd80c5..739e7cbea82f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -607,19 +607,6 @@ enum iwl_mvm_tdls_cs_state { IWL_MVM_TDLS_SW_ACTIVE, }; -#define MAX_NUM_LMAC 2 -struct iwl_mvm_shared_mem_cfg { - int num_lmacs; - int num_txfifo_entries; - struct { - u32 txfifo_size[TX_FIFO_MAX_NUM]; - u32 rxfifo1_size; - } lmac[MAX_NUM_LMAC]; - u32 rxfifo2_size; - u32 internal_txfifo_addr; - u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -}; - /** * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn @@ -1053,7 +1040,6 @@ struct iwl_mvm { } peer; } tdls_cs; - struct iwl_mvm_shared_mem_cfg smem_cfg; u32 ciphers[IWL_MVM_NUM_CIPHERS]; struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 60360ed73f26..c7ca6bd3129c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -761,7 +761,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * fifo to be able to send bursts. */ max_amsdu_len = min_t(unsigned int, max_amsdu_len, - mvm->smem_cfg.lmac[0].txfifo_size[txf] - 256); + mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] - + 256); if (unlikely(dbg_max_amsdu_len)) max_amsdu_len = min_t(unsigned int, max_amsdu_len, -- cgit v1.2.3-55-g7522 From 702e975d6a60027968423a4fdaaf0831da87b73b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 2 Jun 2017 11:56:58 +0200 Subject: iwlwifi: track current firmware image in common code Track the current firmware image in the common code instead of in the opmode so that later patches can access it there in a common way. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 10 ++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 12 ++++++------ drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 6 +++--- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 14 +++++++------- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 12 ++++++------ drivers/net/wireless/intel/iwlwifi/mvm/power.c | 23 ++++++++++++----------- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 6 +++--- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 10 +++++----- 9 files changed, 52 insertions(+), 42 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 185902ce7e13..c483a76cb0e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -85,6 +85,8 @@ struct iwl_fwrt_shared_mem_cfg { * @num_of_paging_blk: number of paging blocks * @num_of_pages_in_last_blk: number of pages in the last block * @smem_cfg: saved firmware SMEM configuration + * @cur_fw_img: current firmware image, must be maintained by + * the driver by calling &iwl_fw_set_current_image() */ struct iwl_fw_runtime { struct iwl_trans *trans; @@ -96,6 +98,8 @@ struct iwl_fw_runtime { u16 num_of_paging_blk; u16 num_of_pages_in_last_blk; + enum iwl_ucode_type cur_fw_img; + /* memory configuration */ struct iwl_fwrt_shared_mem_cfg smem_cfg; }; @@ -110,6 +114,12 @@ static inline void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, fwrt->dev = trans->dev; } +static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, + enum iwl_ucode_type cur_fw_img) +{ + fwrt->cur_fw_img = cur_fw_img; +} + int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type); void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index c1c9c489edc9..9991494314e2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -84,7 +84,7 @@ static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, int pos, budget; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); @@ -105,7 +105,7 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, int ret; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; mutex_lock(&mvm->mutex); @@ -122,7 +122,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, u32 flush_arg; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (kstrtou32(buf, 0, &flush_arg)) @@ -155,7 +155,7 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, int sta_id, drain, ret; if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) @@ -192,7 +192,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, return -EINVAL; /* default is to dump the entire data segment */ - img = &mvm->fw->img[mvm->cur_ucode]; + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; len = img->sec[IWL_UCODE_SECTION_DATA].len; @@ -224,7 +224,7 @@ static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf, if (!iwl_mvm_firmware_running(mvm)) return -EINVAL; - img = &mvm->fw->img[mvm->cur_ucode]; + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset; img_len = img->sec[IWL_UCODE_SECTION_DATA].len; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 9d54005b6d50..0c12e604f22b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -572,7 +572,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { const struct fw_img *img; - img = &mvm->fw->img[mvm->cur_ucode]; + img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; } else { @@ -693,7 +693,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* Make room for fw's virtual image pages, if it exists */ if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->cur_ucode].paging_mem_size && + mvm->fw->img[mvm->fwrt.cur_fw_img].paging_mem_size && mvm->fwrt.fw_paging_db[0].fw_paging_block) file_len += mvm->fwrt.num_of_paging_blk * (sizeof(*dump_data) + @@ -832,7 +832,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* Dump fw's virtual image */ if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->cur_ucode].paging_mem_size && + mvm->fw->img[mvm->fwrt.cur_fw_img].paging_mem_size && mvm->fwrt.fw_paging_db[0].fw_paging_block) { for (i = 1; i < mvm->fwrt.num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 161b7452baa2..08108620b977 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -251,7 +251,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, struct iwl_mvm_alive_data alive_data; const struct fw_img *fw; int ret, i; - enum iwl_ucode_type old_type = mvm->cur_ucode; + enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img; static const u16 alive_cmd[] = { MVM_ALIVE }; struct iwl_sf_region st_fwrd_space; @@ -264,7 +264,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, fw = iwl_get_ucode_image(mvm->fw, ucode_type); if (WARN_ON(!fw)) return -EINVAL; - mvm->cur_ucode = ucode_type; + iwl_fw_set_current_image(&mvm->fwrt, ucode_type); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); iwl_init_notification_wait(&mvm->notif_wait, &alive_wait, @@ -273,7 +273,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT); if (ret) { - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); iwl_remove_notification(&mvm->notif_wait, &alive_wait); return ret; } @@ -297,13 +297,13 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, SB_CPU_1_STATUS), iwl_read_prph(trans, SB_CPU_2_STATUS)); - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); return ret; } if (!alive_data.valid) { IWL_ERR(mvm, "Loaded ucode is not valid!\n"); - mvm->cur_ucode = old_type; + iwl_fw_set_current_image(&mvm->fwrt, old_type); return -EIO; } @@ -432,7 +432,7 @@ error: static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) { struct iwl_phy_cfg_cmd phy_cfg_cmd; - enum iwl_ucode_type ucode_type = mvm->cur_ucode; + enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; /* Set parameters */ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); @@ -1064,7 +1064,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) return ret; - return iwl_init_paging(&mvm->fwrt, mvm->cur_ucode); + return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } int iwl_mvm_up(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 739e7cbea82f..988f4c331d15 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -754,7 +754,6 @@ struct iwl_mvm { */ struct iwl_mvm_vif *bf_allowed_vif; - enum iwl_ucode_type cur_ucode; bool hw_registered; bool calibrating; u32 error_event_table[2]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 8c49ac903c8c..60f0c9975538 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -621,9 +621,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } mvm->sf_state = SF_UNINIT; if (iwl_mvm_has_new_tx_api(mvm)) - mvm->cur_ucode = IWL_UCODE_REGULAR; + iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); else - mvm->cur_ucode = IWL_UCODE_INIT; + iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); @@ -1133,7 +1133,7 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) * Stop the device if we run OPERATIONAL firmware or if we are in the * middle of the calibrations. */ - return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating); + return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating); } static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) @@ -1263,7 +1263,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) reprobe->dev = mvm->trans->dev; INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); - } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && + } else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR && mvm->hw_registered) { /* don't let the transport/FW power down */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1441,7 +1441,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n"); - if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) return -EINVAL; set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); @@ -1667,7 +1667,7 @@ int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm) IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n"); - if (WARN_ON_ONCE(mvm->cur_ucode != IWL_UCODE_REGULAR)) + if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)) return -EINVAL; mutex_lock(&mvm->d0i3_suspend_mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index e684811f8e8b..c05e5ac565ae 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -186,7 +186,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, if (!mvmvif->queue_params[ac].uapsd) continue; - if (mvm->cur_ucode != IWL_UCODE_WOWLAN) + if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN) cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK); @@ -220,14 +220,15 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, BIT(IEEE80211_AC_BK))) { cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK); cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL); - cmd->snooze_window = (mvm->cur_ucode == IWL_UCODE_WOWLAN) ? - cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : - cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); + cmd->snooze_window = + (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? + cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) : + cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW); } cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len; - if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags & + if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) { cmd->rx_data_timeout_uapsd = cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT); @@ -502,7 +503,7 @@ static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm, struct iwl_mac_power_cmd cmd = {}; iwl_mvm_power_build_cmd(mvm, vif, &cmd, - mvm->cur_ucode != IWL_UCODE_WOWLAN); + mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN); iwl_mvm_power_log(mvm, &cmd); #ifdef CONFIG_IWLWIFI_DEBUGFS memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd)); @@ -525,8 +526,8 @@ int iwl_mvm_power_update_device(struct iwl_mvm *mvm) cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #ifdef CONFIG_IWLWIFI_DEBUGFS - if ((mvm->cur_ucode == IWL_UCODE_WOWLAN) ? mvm->disable_power_off_d3 : - mvm->disable_power_off) + if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ? + mvm->disable_power_off_d3 : mvm->disable_power_off) cmd.flags &= cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK); #endif @@ -933,7 +934,7 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, if (!mvmvif->bf_data.bf_enabled) return 0; - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 453a785a3ea5..a638bd69a1f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -629,7 +629,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } @@ -680,7 +680,7 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto out; } @@ -795,7 +795,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, mutex_lock(&mvm->mutex); if (!iwl_mvm_firmware_running(mvm) || - mvm->cur_ucode != IWL_UCODE_REGULAR) { + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { ret = -EIO; goto unlock; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index fc5a490880d0..4e80c8fa4741 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -464,8 +464,8 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); + (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) + ? "Init" : "RT"); return; } @@ -500,7 +500,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) struct iwl_error_event_table table; u32 val; - if (mvm->cur_ucode == IWL_UCODE_INIT) { + if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) { if (!base) base = mvm->fw->init_errlog_ptr; } else { @@ -512,8 +512,8 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) IWL_ERR(mvm, "Not valid error log pointer 0x%08X for %s uCode\n", base, - (mvm->cur_ucode == IWL_UCODE_INIT) - ? "Init" : "RT"); + (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) + ? "Init" : "RT"); return; } -- cgit v1.2.3-55-g7522 From 7174beb60c53209c6e98c9cee20efdee97ef880f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 1 Jun 2017 16:03:19 +0200 Subject: iwlwifi: refactor firmware debug code Split out the firmware debug code to be more general, so that it can be used by different subdrivers. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 1099 ++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/dbg.h | 214 ++++ drivers/net/wireless/intel/iwlwifi/fw/init.c | 75 ++ drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 42 +- drivers/net/wireless/intel/iwlwifi/mvm/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 9 +- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 1031 ------------------ drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h | 175 ---- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 8 +- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 7 +- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 72 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 40 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 99 +- drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 11 +- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 7 +- .../net/wireless/intel/iwlwifi/mvm/time-event.c | 15 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 17 +- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 6 +- 20 files changed, 1541 insertions(+), 1391 deletions(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/dbg.c create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/dbg.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/init.c delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index ddc2bfb501c1..fd12b7394c5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -11,7 +11,7 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o -iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o +iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c new file mode 100644 index 000000000000..77245fcba996 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -0,0 +1,1099 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include +#include "iwl-drv.h" +#include "runtime.h" +#include "dbg.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "iwl-csr.h" + +/** + * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump + * + * @fwrt_ptr: pointer to the buffer coming from fwrt + * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the + * transport's data. + * @trans_len: length of the valid data in trans_ptr + * @fwrt_len: length of the valid data in fwrt_ptr + */ +struct iwl_fw_dump_ptrs { + struct iwl_trans_dump_data *trans_ptr; + void *fwrt_ptr; + u32 fwrt_len; +}; + +#define RADIO_REG_MAX_READ 0x2ad +static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) +{ + u8 *pos = (void *)(*dump_data)->data; + unsigned long flags; + int i; + + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) + return; + + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); + (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); + + for (i = 0; i < RADIO_REG_MAX_READ; i++) { + u32 rd_cmd = RADIO_RSP_RD_CMD; + + rd_cmd |= i << RADIO_RSP_ADDR_POS; + iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd); + *pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT); + + pos++; + } + + *dump_data = iwl_fw_error_next_data(*dump_data); + + iwl_trans_release_nic_access(fwrt->trans, &flags); +} + +static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + int i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = size; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + return; + + /* Add a TLV for the RXF */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(fifo_num); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + RXF_RD_D_SPACE + offset)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + RXF_RD_WR_PTR + offset)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + RXF_RD_RD_PTR + offset)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + RXF_RD_FENCE_PTR + offset)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + RXF_SET_FENCE_MODE + offset)); + + /* Lock fence */ + iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1); + /* Set fence pointer to the same place like WR pointer */ + iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1); + /* Set fence offset */ + iwl_trans_write_prph(fwrt->trans, + RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (i = 0; i < fifo_len; i++) + fifo_data[i] = iwl_trans_read_prph(fwrt->trans, + RXF_FIFO_RD_FENCE_INC + + offset); + *dump_data = iwl_fw_error_next_data(*dump_data); +} + +static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + int size, u32 offset, int fifo_num) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + u32 *fifo_data; + u32 fifo_len; + int i; + + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = size; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + return; + + /* Add a TLV for the FIFO */ + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); + (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(fifo_num); + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_FIFO_ITEM_CNT + offset)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_WR_PTR + offset)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_RD_PTR + offset)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_FENCE_PTR + offset)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_LOCK_FENCE + offset)); + + /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ + iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset, + TXF_WR_PTR + offset); + + /* Dummy-read to advance the read pointer to the head */ + iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (i = 0; i < fifo_len; i++) + fifo_data[i] = iwl_trans_read_prph(fwrt->trans, + TXF_READ_MODIFY_DATA + + offset); + *dump_data = iwl_fw_error_next_data(*dump_data); +} + +static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; + u32 *fifo_data; + u32 fifo_len; + unsigned long flags; + int i, j; + + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) + return; + + /* Pull RXF1 */ + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); + /* Pull RXF2 */ + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, + RXF_DIFF_FROM_PREV, 1); + /* Pull LMAC2 RXF1 */ + if (fwrt->smem_cfg.num_lmacs > 1) + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, + LMAC2_PRPH_OFFSET, 2); + + /* Pull TXF data from LMAC1 */ + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); + iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], + 0, i); + } + + /* Pull TXF data from LMAC2 */ + if (fwrt->smem_cfg.num_lmacs > 1) { + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(fwrt->trans, + TXF_LARC_NUM + LMAC2_PRPH_OFFSET, + i); + iwl_fwrt_dump_txf(fwrt, dump_data, + cfg->lmac[1].txfifo_size[i], + LMAC2_PRPH_OFFSET, + i + cfg->num_txfifo_entries); + } + } + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + /* Pull UMAC internal TXF data from all TXFs */ + for (i = 0; + i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); + i++) { + fifo_hdr = (void *)(*dump_data)->data; + fifo_data = (void *)fifo_hdr->data; + fifo_len = fwrt->smem_cfg.internal_txfifo_size[i]; + + /* No need to try to read the data if the length is 0 */ + if (fifo_len == 0) + continue; + + /* Add a TLV for the internal FIFOs */ + (*dump_data)->type = + cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); + (*dump_data)->len = + cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); + + fifo_hdr->fifo_num = cpu_to_le32(i); + + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i + + fwrt->smem_cfg.num_txfifo_entries); + + fifo_hdr->available_bytes = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_CPU2_FIFO_ITEM_CNT)); + fifo_hdr->wr_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_CPU2_WR_PTR)); + fifo_hdr->rd_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_CPU2_RD_PTR)); + fifo_hdr->fence_ptr = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_CPU2_FENCE_PTR)); + fifo_hdr->fence_mode = + cpu_to_le32(iwl_trans_read_prph(fwrt->trans, + TXF_CPU2_LOCK_FENCE)); + + /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ + iwl_trans_write_prph(fwrt->trans, + TXF_CPU2_READ_MODIFY_ADDR, + TXF_CPU2_WR_PTR); + + /* Dummy-read to advance the read pointer to head */ + iwl_trans_read_prph(fwrt->trans, + TXF_CPU2_READ_MODIFY_DATA); + + /* Read FIFO */ + fifo_len /= sizeof(u32); /* Size in DWORDS */ + for (j = 0; j < fifo_len; j++) + fifo_data[j] = + iwl_trans_read_prph(fwrt->trans, + TXF_CPU2_READ_MODIFY_DATA); + *dump_data = iwl_fw_error_next_data(*dump_data); + } + } + + iwl_trans_release_nic_access(fwrt->trans, &flags); +} + +#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ +#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ + +struct iwl_prph_range { + u32 start, end; +}; + +static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { + { .start = 0x00a00000, .end = 0x00a00000 }, + { .start = 0x00a0000c, .end = 0x00a00024 }, + { .start = 0x00a0002c, .end = 0x00a0003c }, + { .start = 0x00a00410, .end = 0x00a00418 }, + { .start = 0x00a00420, .end = 0x00a00420 }, + { .start = 0x00a00428, .end = 0x00a00428 }, + { .start = 0x00a00430, .end = 0x00a0043c }, + { .start = 0x00a00444, .end = 0x00a00444 }, + { .start = 0x00a004c0, .end = 0x00a004cc }, + { .start = 0x00a004d8, .end = 0x00a004d8 }, + { .start = 0x00a004e0, .end = 0x00a004f0 }, + { .start = 0x00a00840, .end = 0x00a00840 }, + { .start = 0x00a00850, .end = 0x00a00858 }, + { .start = 0x00a01004, .end = 0x00a01008 }, + { .start = 0x00a01010, .end = 0x00a01010 }, + { .start = 0x00a01018, .end = 0x00a01018 }, + { .start = 0x00a01024, .end = 0x00a01024 }, + { .start = 0x00a0102c, .end = 0x00a01034 }, + { .start = 0x00a0103c, .end = 0x00a01040 }, + { .start = 0x00a01048, .end = 0x00a01094 }, + { .start = 0x00a01c00, .end = 0x00a01c20 }, + { .start = 0x00a01c58, .end = 0x00a01c58 }, + { .start = 0x00a01c7c, .end = 0x00a01c7c }, + { .start = 0x00a01c28, .end = 0x00a01c54 }, + { .start = 0x00a01c5c, .end = 0x00a01c5c }, + { .start = 0x00a01c60, .end = 0x00a01cdc }, + { .start = 0x00a01ce0, .end = 0x00a01d0c }, + { .start = 0x00a01d18, .end = 0x00a01d20 }, + { .start = 0x00a01d2c, .end = 0x00a01d30 }, + { .start = 0x00a01d40, .end = 0x00a01d5c }, + { .start = 0x00a01d80, .end = 0x00a01d80 }, + { .start = 0x00a01d98, .end = 0x00a01d9c }, + { .start = 0x00a01da8, .end = 0x00a01da8 }, + { .start = 0x00a01db8, .end = 0x00a01df4 }, + { .start = 0x00a01dc0, .end = 0x00a01dfc }, + { .start = 0x00a01e00, .end = 0x00a01e2c }, + { .start = 0x00a01e40, .end = 0x00a01e60 }, + { .start = 0x00a01e68, .end = 0x00a01e6c }, + { .start = 0x00a01e74, .end = 0x00a01e74 }, + { .start = 0x00a01e84, .end = 0x00a01e90 }, + { .start = 0x00a01e9c, .end = 0x00a01ec4 }, + { .start = 0x00a01ed0, .end = 0x00a01ee0 }, + { .start = 0x00a01f00, .end = 0x00a01f1c }, + { .start = 0x00a01f44, .end = 0x00a01ffc }, + { .start = 0x00a02000, .end = 0x00a02048 }, + { .start = 0x00a02068, .end = 0x00a020f0 }, + { .start = 0x00a02100, .end = 0x00a02118 }, + { .start = 0x00a02140, .end = 0x00a0214c }, + { .start = 0x00a02168, .end = 0x00a0218c }, + { .start = 0x00a021c0, .end = 0x00a021c0 }, + { .start = 0x00a02400, .end = 0x00a02410 }, + { .start = 0x00a02418, .end = 0x00a02420 }, + { .start = 0x00a02428, .end = 0x00a0242c }, + { .start = 0x00a02434, .end = 0x00a02434 }, + { .start = 0x00a02440, .end = 0x00a02460 }, + { .start = 0x00a02468, .end = 0x00a024b0 }, + { .start = 0x00a024c8, .end = 0x00a024cc }, + { .start = 0x00a02500, .end = 0x00a02504 }, + { .start = 0x00a0250c, .end = 0x00a02510 }, + { .start = 0x00a02540, .end = 0x00a02554 }, + { .start = 0x00a02580, .end = 0x00a025f4 }, + { .start = 0x00a02600, .end = 0x00a0260c }, + { .start = 0x00a02648, .end = 0x00a02650 }, + { .start = 0x00a02680, .end = 0x00a02680 }, + { .start = 0x00a026c0, .end = 0x00a026d0 }, + { .start = 0x00a02700, .end = 0x00a0270c }, + { .start = 0x00a02804, .end = 0x00a02804 }, + { .start = 0x00a02818, .end = 0x00a0281c }, + { .start = 0x00a02c00, .end = 0x00a02db4 }, + { .start = 0x00a02df4, .end = 0x00a02fb0 }, + { .start = 0x00a03000, .end = 0x00a03014 }, + { .start = 0x00a0301c, .end = 0x00a0302c }, + { .start = 0x00a03034, .end = 0x00a03038 }, + { .start = 0x00a03040, .end = 0x00a03048 }, + { .start = 0x00a03060, .end = 0x00a03068 }, + { .start = 0x00a03070, .end = 0x00a03074 }, + { .start = 0x00a0307c, .end = 0x00a0307c }, + { .start = 0x00a03080, .end = 0x00a03084 }, + { .start = 0x00a0308c, .end = 0x00a03090 }, + { .start = 0x00a03098, .end = 0x00a03098 }, + { .start = 0x00a030a0, .end = 0x00a030a0 }, + { .start = 0x00a030a8, .end = 0x00a030b4 }, + { .start = 0x00a030bc, .end = 0x00a030bc }, + { .start = 0x00a030c0, .end = 0x00a0312c }, + { .start = 0x00a03c00, .end = 0x00a03c5c }, + { .start = 0x00a04400, .end = 0x00a04454 }, + { .start = 0x00a04460, .end = 0x00a04474 }, + { .start = 0x00a044c0, .end = 0x00a044ec }, + { .start = 0x00a04500, .end = 0x00a04504 }, + { .start = 0x00a04510, .end = 0x00a04538 }, + { .start = 0x00a04540, .end = 0x00a04548 }, + { .start = 0x00a04560, .end = 0x00a0457c }, + { .start = 0x00a04590, .end = 0x00a04598 }, + { .start = 0x00a045c0, .end = 0x00a045f4 }, +}; + +static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { + { .start = 0x00a05c00, .end = 0x00a05c18 }, + { .start = 0x00a05400, .end = 0x00a056e8 }, + { .start = 0x00a08000, .end = 0x00a098bc }, + { .start = 0x00a02400, .end = 0x00a02758 }, +}; + +static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + u32 i; + + for (i = 0; i < len_bytes; i += 4) + *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); +} + +static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start, + u32 len_bytes, __le32 *data) +{ + unsigned long flags; + bool success = false; + + if (iwl_trans_grab_nic_access(trans, &flags)) { + success = true; + _iwl_read_prph_block(trans, start, len_bytes, data); + iwl_trans_release_nic_access(trans, &flags); + } + + return success; +} + +static void iwl_dump_prph(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + const struct iwl_prph_range *iwl_prph_dump_addr, + u32 range_len) +{ + struct iwl_fw_error_dump_prph *prph; + unsigned long flags; + u32 i; + + if (!iwl_trans_grab_nic_access(trans, &flags)) + return; + + for (i = 0; i < range_len; i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); + (*data)->len = cpu_to_le32(sizeof(*prph) + + num_bytes_in_chunk); + prph = (void *)(*data)->data; + prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); + + _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, + /* our range is inclusive, hence + 4 */ + iwl_prph_dump_addr[i].end - + iwl_prph_dump_addr[i].start + 4, + (void *)prph->data); + + *data = iwl_fw_error_next_data(*data); + } + + iwl_trans_release_nic_access(trans, &flags); +} + +/* + * alloc_sgtable - allocates scallerlist table in the given size, + * fills it with pages and returns it + * @size: the size (in bytes) of the table +*/ +static struct scatterlist *alloc_sgtable(int size) +{ + int alloc_size, nents, i; + struct page *new_page; + struct scatterlist *iter; + struct scatterlist *table; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + table = kcalloc(nents, sizeof(*table), GFP_KERNEL); + if (!table) + return NULL; + sg_init_table(table, nents); + iter = table; + for_each_sg(table, iter, sg_nents(table), i) { + new_page = alloc_page(GFP_KERNEL); + if (!new_page) { + /* release all previous allocated pages in the table */ + iter = table; + for_each_sg(table, iter, sg_nents(table), i) { + new_page = sg_page(iter); + if (new_page) + __free_page(new_page); + } + return NULL; + } + alloc_size = min_t(int, size, PAGE_SIZE); + size -= PAGE_SIZE; + sg_set_page(iter, new_page, alloc_size, 0); + } + return table; +} + +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) +{ + struct iwl_fw_error_dump_file *dump_file; + struct iwl_fw_error_dump_data *dump_data; + struct iwl_fw_error_dump_info *dump_info; + struct iwl_fw_error_dump_mem *dump_mem; + struct iwl_fw_error_dump_trigger_desc *dump_trig; + struct iwl_fw_dump_ptrs *fw_error_dump; + struct scatterlist *sg_dump_data; + u32 sram_len, sram_ofs; + const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; + u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; + u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; + u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ? + 0 : fwrt->trans->cfg->dccm2_len; + bool monitor_dump_only = false; + int i; + + /* there's no point in fw dump if the bus is dead */ + if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) { + IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n"); + goto out; + } + + if (fwrt->dump.trig && + fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) + monitor_dump_only = true; + + fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); + if (!fw_error_dump) + goto out; + + /* SRAM - include stack CCM if driver knows the values for it */ + if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { + const struct fw_img *img; + + img = &fwrt->fw->img[fwrt->cur_fw_img]; + sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; + sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; + } else { + sram_ofs = fwrt->trans->cfg->dccm_offset; + sram_len = fwrt->trans->cfg->dccm_len; + } + + /* reading RXF/TXF sizes */ + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { + struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; + + fifo_data_len = 0; + + /* Count RXF2 size */ + if (mem_cfg->rxfifo2_size) { + /* Add header info */ + fifo_data_len += mem_cfg->rxfifo2_size + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + + /* Count RXF1 sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + if (!mem_cfg->lmac[i].rxfifo1_size) + continue; + + /* Add header info */ + fifo_data_len += mem_cfg->lmac[i].rxfifo1_size + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + + /* Count TXF sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + int j; + + for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { + if (!mem_cfg->lmac[i].txfifo_size[j]) + continue; + + /* Add header info */ + fifo_data_len += + mem_cfg->lmac[i].txfifo_size[j] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + } + + if (fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { + for (i = 0; + i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); + i++) { + if (!mem_cfg->internal_txfifo_size[i]) + continue; + + /* Add header info */ + fifo_data_len += + mem_cfg->internal_txfifo_size[i] + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } + } + + /* Make room for PRPH registers */ + if (!fwrt->trans->cfg->gen2) { + for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); + i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = + iwl_prph_dump_addr_comm[i].end - + iwl_prph_dump_addr_comm[i].start + 4; + + prph_len += sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + } + + if (!fwrt->trans->cfg->gen2 && + fwrt->trans->cfg->mq_rx_supported) { + for (i = 0; i < + ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { + /* The range includes both boundaries */ + int num_bytes_in_chunk = + iwl_prph_dump_addr_9000[i].end - + iwl_prph_dump_addr_9000[i].start + 4; + + prph_len += sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_prph) + + num_bytes_in_chunk; + } + } + + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) + radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; + } + + file_len = sizeof(*dump_file) + + sizeof(*dump_data) * 2 + + fifo_data_len + + prph_len + + radio_len + + sizeof(*dump_info); + + /* Make room for the SMEM, if it exists */ + if (smem_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; + + /* Make room for the secondary SRAM, if it exists */ + if (sram2_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; + + /* Make room for MEM segments */ + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i].len); + } + + /* Make room for fw's virtual image pages, if it exists */ + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && + fwrt->fw_paging_db[0].fw_paging_block) + file_len += fwrt->num_of_paging_blk * + (sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_paging) + + PAGING_BLOCK_SIZE); + + /* If we only want a monitor dump, reset the file length */ + if (monitor_dump_only) { + file_len = sizeof(*dump_file) + sizeof(*dump_data) + + sizeof(*dump_info); + } + + if (fwrt->dump.desc) + file_len += sizeof(*dump_data) + sizeof(*dump_trig) + + fwrt->dump.desc->len; + + if (!fwrt->fw->n_dbg_mem_tlv) + file_len += sram_len + sizeof(*dump_mem); + + dump_file = vzalloc(file_len); + if (!dump_file) { + kfree(fw_error_dump); + goto out; + } + + fw_error_dump->fwrt_ptr = dump_file; + + dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); + dump_data = (void *)dump_file->data; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_info)); + dump_info = (void *)dump_data->data; + dump_info->device_family = + fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); + dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, + sizeof(dump_info->fw_human_readable)); + strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, + sizeof(dump_info->dev_human_readable)); + strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, + sizeof(dump_info->bus_human_readable)); + + dump_data = iwl_fw_error_next_data(dump_data); + /* We only dump the FIFOs if the FW is in error state */ + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { + iwl_fw_dump_fifos(fwrt, &dump_data); + if (radio_len) + iwl_read_radio_regs(fwrt, &dump_data); + } + + if (fwrt->dump.desc) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_trig) + + fwrt->dump.desc->len); + dump_trig = (void *)dump_data->data; + memcpy(dump_trig, &fwrt->dump.desc->trig_desc, + sizeof(*dump_trig) + fwrt->dump.desc->len); + + dump_data = iwl_fw_error_next_data(dump_data); + } + + /* In case we only want monitor dump, skip to dump trasport data */ + if (monitor_dump_only) + goto dump_trans_data; + + if (!fwrt->fw->n_dbg_mem_tlv) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(sram_ofs); + iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data, + sram_len); + dump_data = iwl_fw_error_next_data(dump_data); + } + + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { + u32 len = le32_to_cpu(fw_dbg_mem[i].len); + u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); + bool success; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = fw_dbg_mem[i].data_type; + dump_mem->offset = cpu_to_le32(ofs); + + switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { + case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): + iwl_trans_read_mem_bytes(fwrt->trans, ofs, + dump_mem->data, + len); + success = true; + break; + case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): + success = iwl_read_prph_block(fwrt->trans, ofs, len, + (void *)dump_mem->data); + break; + default: + /* + * shouldn't get here, we ignored this kind + * of TLV earlier during the TLV parsing?! + */ + WARN_ON(1); + success = false; + } + + if (success) + dump_data = iwl_fw_error_next_data(dump_data); + } + + if (smem_len) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); + dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset); + iwl_trans_read_mem_bytes(fwrt->trans, + fwrt->trans->cfg->smem_offset, + dump_mem->data, smem_len); + dump_data = iwl_fw_error_next_data(dump_data); + } + + if (sram2_len) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); + dump_mem = (void *)dump_data->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); + dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset); + iwl_trans_read_mem_bytes(fwrt->trans, + fwrt->trans->cfg->dccm2_offset, + dump_mem->data, sram2_len); + dump_data = iwl_fw_error_next_data(dump_data); + } + + /* Dump fw's virtual image */ + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && + fwrt->fw_paging_db[0].fw_paging_block) { + for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { + struct iwl_fw_error_dump_paging *paging; + struct page *pages = + fwrt->fw_paging_db[i].fw_paging_block; + dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; + + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); + dump_data->len = cpu_to_le32(sizeof(*paging) + + PAGING_BLOCK_SIZE); + paging = (void *)dump_data->data; + paging->index = cpu_to_le32(i); + dma_sync_single_for_cpu(fwrt->trans->dev, addr, + PAGING_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + memcpy(paging->data, page_address(pages), + PAGING_BLOCK_SIZE); + dump_data = iwl_fw_error_next_data(dump_data); + } + } + + if (prph_len) { + iwl_dump_prph(fwrt->trans, &dump_data, + iwl_prph_dump_addr_comm, + ARRAY_SIZE(iwl_prph_dump_addr_comm)); + + if (fwrt->trans->cfg->mq_rx_supported) + iwl_dump_prph(fwrt->trans, &dump_data, + iwl_prph_dump_addr_9000, + ARRAY_SIZE(iwl_prph_dump_addr_9000)); + } + +dump_trans_data: + fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, + fwrt->dump.trig); + fw_error_dump->fwrt_len = file_len; + if (fw_error_dump->trans_ptr) + file_len += fw_error_dump->trans_ptr->len; + dump_file->file_len = cpu_to_le32(file_len); + + sg_dump_data = alloc_sgtable(file_len); + if (sg_dump_data) { + sg_pcopy_from_buffer(sg_dump_data, + sg_nents(sg_dump_data), + fw_error_dump->fwrt_ptr, + fw_error_dump->fwrt_len, 0); + if (fw_error_dump->trans_ptr) + sg_pcopy_from_buffer(sg_dump_data, + sg_nents(sg_dump_data), + fw_error_dump->trans_ptr->data, + fw_error_dump->trans_ptr->len, + fw_error_dump->fwrt_len); + dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len, + GFP_KERNEL); + } + vfree(fw_error_dump->fwrt_ptr); + vfree(fw_error_dump->trans_ptr); + kfree(fw_error_dump); + +out: + iwl_fw_free_dump_desc(fwrt); + fwrt->dump.trig = NULL; + clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); +} +IWL_EXPORT_SYMBOL(iwl_fw_error_dump); + +const struct iwl_fw_dump_desc iwl_dump_desc_assert = { + .trig_desc = { + .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), + }, +}; +IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); + +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger) +{ + unsigned int delay = 0; + + if (trigger) + delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); + + if (WARN(fwrt->trans->state == IWL_TRANS_NO_FW, + "Can't collect dbg data when FW isn't alive\n")) + return -EIO; + + if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status)) + return -EBUSY; + + if (WARN_ON(fwrt->dump.desc)) + iwl_fw_free_dump_desc(fwrt); + + IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n", + le32_to_cpu(desc->trig_desc.type)); + + fwrt->dump.desc = desc; + fwrt->dump.trig = trigger; + + schedule_delayed_work(&fwrt->dump.wk, delay); + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); + +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger) +{ + struct iwl_fw_dump_desc *desc; + + desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + desc->len = len; + desc->trig_desc.type = cpu_to_le32(trig); + memcpy(desc->trig_desc.data, str, len); + + return iwl_fw_dbg_collect_desc(fwrt, desc, trigger); +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); + +int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) +{ + u16 occurrences = le16_to_cpu(trigger->occurrences); + int ret, len = 0; + char buf[64]; + + if (!occurrences) + return 0; + + if (fmt) { + va_list ap; + + buf[sizeof(buf) - 1] = '\0'; + + va_start(ap, fmt); + vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + + /* check for truncation */ + if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) + buf[sizeof(buf) - 1] = '\0'; + + len = strlen(buf) + 1; + } + + ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, + trigger); + + if (ret) + return ret; + + trigger->occurrences = cpu_to_le16(occurrences - 1); + return 0; +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig); + +int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id) +{ + u8 *ptr; + int ret; + int i; + + if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv), + "Invalid configuration %d\n", conf_id)) + return -EINVAL; + + /* EARLY START - firmware's configuration is hard coded */ + if ((!fwrt->fw->dbg_conf_tlv[conf_id] || + !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && + conf_id == FW_DBG_START_FROM_ALIVE) + return 0; + + if (!fwrt->fw->dbg_conf_tlv[conf_id]) + return -EINVAL; + + if (fwrt->dump.conf != FW_DBG_INVALID) + IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n", + fwrt->dump.conf); + + /* Send all HCMDs for configuring the FW debug */ + ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd; + for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { + struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; + struct iwl_host_cmd hcmd = { + .id = cmd->id, + .len = { le16_to_cpu(cmd->len), }, + .data = { cmd->data, }, + }; + + ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); + if (ret) + return ret; + + ptr += sizeof(*cmd); + ptr += le16_to_cpu(cmd->len); + } + + fwrt->dump.conf = conf_id; + + return 0; +} +IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf); + +void iwl_fw_error_dump_wk(struct work_struct *work) +{ + struct iwl_fw_runtime *fwrt = + container_of(work, struct iwl_fw_runtime, dump.wk.work); + + if (fwrt->ops && fwrt->ops->dump_start && + fwrt->ops->dump_start(fwrt->ops_ctx)) + return; + + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { + /* stop recording */ + iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); + + iwl_fw_error_dump(fwrt); + + /* start recording again if the firmware is not crashed */ + if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && + fwrt->fw->dbg_dest_tlv) { + iwl_clear_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x100); + iwl_clear_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x1); + iwl_set_bits_prph(fwrt->trans, + MON_BUFF_SAMPLE_CTL, 0x1); + } + } else { + u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); + u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); + + /* stop recording */ + iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0); + udelay(100); + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0); + /* wait before we collect the data till the DBGC stop */ + udelay(500); + + iwl_fw_error_dump(fwrt); + + /* start recording again if the firmware is not crashed */ + if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) && + fwrt->fw->dbg_dest_tlv) { + iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample); + iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); + } + } + + if (fwrt->ops && fwrt->ops->dump_end) + fwrt->ops->dump_end(fwrt->ops_ctx); +} + diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h new file mode 100644 index 000000000000..0f810ea89d31 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -0,0 +1,214 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_dbg_h__ +#define __iwl_fw_dbg_h__ +#include +#include +#include "runtime.h" +#include "file.h" +#include "error-dump.h" + +/** + * struct iwl_fw_dump_desc - describes the dump + * @len: length of trig_desc->data + * @trig_desc: the description of the dump + */ +struct iwl_fw_dump_desc { + size_t len; + /* must be last */ + struct iwl_fw_error_dump_trigger_desc trig_desc; +}; + +extern const struct iwl_fw_dump_desc iwl_dump_desc_assert; + +static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) +{ + if (fwrt->dump.desc != &iwl_dump_desc_assert) + kfree(fwrt->dump.desc); + fwrt->dump.desc = NULL; +} + +void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); +int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, + const struct iwl_fw_dump_desc *desc, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + const struct iwl_fw_dbg_trigger_tlv *trigger); +int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trigger, + const char *fmt, ...) __printf(3, 4); +int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id); + +#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ + void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ + unlikely(__dbg_trigger); \ +}) + +static inline struct iwl_fw_dbg_trigger_tlv* +_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) +{ + return fw->dbg_trigger_tlv[id]; +} + +#define iwl_fw_dbg_get_trigger(fw, id) ({ \ + BUILD_BUG_ON(!__builtin_constant_p(id)); \ + BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ + _iwl_fw_dbg_get_trigger((fw), (id)); \ +}) + +static inline bool +iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, + struct wireless_dev *wdev) +{ + u32 trig_vif = le32_to_cpu(trig->vif_type); + + return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || + wdev->iftype == trig_vif; +} + +static inline bool +iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && + (fwrt->dump.conf == FW_DBG_INVALID || + (BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids)))); +} + +static inline bool +iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + unsigned long wind_jiff = + msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms)); + u32 id = le32_to_cpu(trig->id); + + /* If this is the first event checked, jump to update start ts */ + if (fwrt->dump.non_collect_ts_start[id] && + (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff, + jiffies))) + return true; + + fwrt->dump.non_collect_ts_start[id] = jiffies; + return false; +} + +static inline bool +iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, + struct iwl_fw_dbg_trigger_tlv *trig) +{ + if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev)) + return false; + + if (iwl_fw_dbg_no_trig_window(fwrt, trig)) { + IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n", + trig->id); + return false; + } + + return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig); +} + +static inline void +_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, + struct wireless_dev *wdev, + struct iwl_fw_dbg_trigger_tlv *trigger) +{ + if (!trigger) + return; + + if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger)) + return; + + iwl_fw_dbg_collect_trig(fwrt, trigger, NULL); +} + +#define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig) \ + _iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev), \ + iwl_fw_dbg_get_trigger((fwrt)->fw,\ + (trig))) + +static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) +{ + fwrt->dump.conf = FW_DBG_INVALID; +} + +void iwl_fw_error_dump_wk(struct work_struct *work); + +static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt) +{ + flush_delayed_work(&fwrt->dump.wk); +} + +static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt) +{ + cancel_delayed_work_sync(&fwrt->dump.wk); +} + +#endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c new file mode 100644 index 000000000000..bfe5316bbb6a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -0,0 +1,75 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "dbg.h" + +void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx) +{ + memset(fwrt, 0, sizeof(*fwrt)); + fwrt->trans = trans; + fwrt->fw = fw; + fwrt->dev = trans->dev; + fwrt->dump.conf = FW_DBG_INVALID; + fwrt->ops = ops; + fwrt->ops_ctx = ops_ctx; + INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); +} +IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index c483a76cb0e5..02f1bc985383 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -63,6 +63,11 @@ #include "img.h" #include "api.h" +struct iwl_fw_runtime_ops { + int (*dump_start)(void *ctx); + void (*dump_end)(void *ctx); +}; + #define MAX_NUM_LMAC 2 struct iwl_fwrt_shared_mem_cfg { int num_lmacs; @@ -76,23 +81,36 @@ struct iwl_fwrt_shared_mem_cfg { u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; }; +enum iwl_fw_runtime_status { + IWL_FWRT_STATUS_DUMPING = 0, +}; + /** * struct iwl_fw_runtime - runtime data for firmware * @fw: firmware image * @cfg: NIC configuration * @dev: device pointer + * @ops: user ops + * @ops_ctx: user ops context + * @status: status flags * @fw_paging_db: paging database * @num_of_paging_blk: number of paging blocks * @num_of_pages_in_last_blk: number of pages in the last block * @smem_cfg: saved firmware SMEM configuration * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() + * @dump: debug dump data */ struct iwl_fw_runtime { struct iwl_trans *trans; const struct iwl_fw *fw; struct device *dev; + const struct iwl_fw_runtime_ops *ops; + void *ops_ctx; + + unsigned long status; + /* Paging */ struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS]; u16 num_of_paging_blk; @@ -102,17 +120,23 @@ struct iwl_fw_runtime { /* memory configuration */ struct iwl_fwrt_shared_mem_cfg smem_cfg; + + /* debug */ + struct { + const struct iwl_fw_dump_desc *desc; + const struct iwl_fw_dbg_trigger_tlv *trig; + struct delayed_work wk; + + u8 conf; + + /* ts of the beginning of a non-collect fw dbg data period */ + unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; + } dump; }; -static inline void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, - struct iwl_trans *trans, - const struct iwl_fw *fw) -{ - memset(fwrt, 0, sizeof(*fwrt)); - fwrt->trans = trans; - fwrt->fw = fw; - fwrt->dev = trans->dev; -} +void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, + const struct iwl_fw *fw, + const struct iwl_fw_runtime_ops *ops, void *ops_ctx); static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type cur_fw_img) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 83ac807e547d..00e6737dda72 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -6,7 +6,7 @@ iwlmvm-y += power.o coex.o iwlmvm-y += tt.o offloading.o tdls.o iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o -iwlmvm-y += tof.o fw-dbg.o +iwlmvm-y += tof.o iwlmvm-$(CONFIG_PM) += d3.o ccflags-y += -I$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 6fda8627b726..21845034d80d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -111,7 +111,6 @@ #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 #define IWL_MVM_HW_CSUM_DISABLE 0 #define IWL_MVM_PARSE_NVM 0 -#define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 9991494314e2..29f1d1807415 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -69,7 +69,6 @@ #include #include "mvm.h" -#include "fw-dbg.h" #include "sta.h" #include "iwl-io.h" #include "debugfs.h" @@ -1123,7 +1122,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, int pos = 0; mutex_lock(&mvm->mutex); - conf = mvm->fw_dbg_conf; + conf = mvm->fwrt.dump.conf; mutex_unlock(&mvm->mutex); pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf); @@ -1190,7 +1189,7 @@ static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm, return -EINVAL; mutex_lock(&mvm->mutex); - ret = iwl_mvm_start_fw_dbg_conf(mvm, conf_id); + ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id); mutex_unlock(&mvm->mutex); return ret ?: count; @@ -1211,8 +1210,8 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, if (count == 0) return 0; - iwl_mvm_fw_dbg_collect(mvm, FW_DBG_TRIGGER_USER, buf, - (count - 1), NULL); + iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, + (count - 1), NULL); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c deleted file mode 100644 index 0c12e604f22b..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ /dev/null @@ -1,1031 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include - -#include "fw-dbg.h" -#include "iwl-io.h" -#include "mvm.h" -#include "iwl-prph.h" -#include "iwl-csr.h" - -#define RADIO_REG_MAX_READ 0x2ad -static void iwl_mvm_read_radio_reg(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) -{ - u8 *pos = (void *)(*dump_data)->data; - unsigned long flags; - int i; - - if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) - return; - - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG); - (*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ); - - for (i = 0; i < RADIO_REG_MAX_READ; i++) { - u32 rd_cmd = RADIO_RSP_RD_CMD; - - rd_cmd |= i << RADIO_RSP_ADDR_POS; - iwl_write_prph_no_grab(mvm->trans, RSP_RADIO_CMD, rd_cmd); - *pos = (u8)iwl_read_prph_no_grab(mvm->trans, RSP_RADIO_RDDAT); - - pos++; - } - - *dump_data = iwl_fw_error_next_data(*dump_data); - - iwl_trans_release_nic_access(mvm->trans, &flags); -} - -static void iwl_mvm_dump_rxf(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data, - int size, u32 offset, int fifo_num) -{ - struct iwl_fw_error_dump_fifo *fifo_hdr; - u32 *fifo_data; - u32 fifo_len; - int i; - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = size; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - return; - - /* Add a TLV for the RXF */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(fifo_num); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_D_SPACE + offset)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_WR_PTR + offset)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_RD_PTR + offset)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_RD_FENCE_PTR + offset)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - RXF_SET_FENCE_MODE + offset)); - - /* Lock fence */ - iwl_trans_write_prph(mvm->trans, RXF_SET_FENCE_MODE + offset, 0x1); - /* Set fence pointer to the same place like WR pointer */ - iwl_trans_write_prph(mvm->trans, RXF_LD_WR2FENCE + offset, 0x1); - /* Set fence offset */ - iwl_trans_write_prph(mvm->trans, - RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (i = 0; i < fifo_len; i++) - fifo_data[i] = iwl_trans_read_prph(mvm->trans, - RXF_FIFO_RD_FENCE_INC + - offset); - *dump_data = iwl_fw_error_next_data(*dump_data); -} - -static void iwl_mvm_dump_txf(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data, - int size, u32 offset, int fifo_num) -{ - struct iwl_fw_error_dump_fifo *fifo_hdr; - u32 *fifo_data; - u32 fifo_len; - int i; - - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = size; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - return; - - /* Add a TLV for the FIFO */ - (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF); - (*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(fifo_num); - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FIFO_ITEM_CNT + offset)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_WR_PTR + offset)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_RD_PTR + offset)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_FENCE_PTR + offset)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_LOCK_FENCE + offset)); - - /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */ - iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR + offset, - TXF_WR_PTR + offset); - - /* Dummy-read to advance the read pointer to the head */ - iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA + offset); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (i = 0; i < fifo_len; i++) - fifo_data[i] = iwl_trans_read_prph(mvm->trans, - TXF_READ_MODIFY_DATA + - offset); - *dump_data = iwl_fw_error_next_data(*dump_data); -} - -static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm, - struct iwl_fw_error_dump_data **dump_data) -{ - struct iwl_fw_error_dump_fifo *fifo_hdr; - struct iwl_fwrt_shared_mem_cfg *cfg = &mvm->fwrt.smem_cfg; - u32 *fifo_data; - u32 fifo_len; - unsigned long flags; - int i, j; - - if (!iwl_trans_grab_nic_access(mvm->trans, &flags)) - return; - - /* Pull RXF1 */ - iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); - /* Pull RXF2 */ - iwl_mvm_dump_rxf(mvm, dump_data, cfg->rxfifo2_size, - RXF_DIFF_FROM_PREV, 1); - /* Pull LMAC2 RXF1 */ - if (mvm->fwrt.smem_cfg.num_lmacs > 1) - iwl_mvm_dump_rxf(mvm, dump_data, cfg->lmac[1].rxfifo1_size, - LMAC2_PRPH_OFFSET, 2); - - /* Pull TXF data from LMAC1 */ - for (i = 0; i < mvm->fwrt.smem_cfg.num_txfifo_entries; i++) { - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i); - iwl_mvm_dump_txf(mvm, dump_data, cfg->lmac[0].txfifo_size[i], - 0, i); - } - - /* Pull TXF data from LMAC2 */ - if (mvm->fwrt.smem_cfg.num_lmacs > 1) { - for (i = 0; i < mvm->fwrt.smem_cfg.num_txfifo_entries; i++) { - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, - TXF_LARC_NUM + LMAC2_PRPH_OFFSET, - i); - iwl_mvm_dump_txf(mvm, dump_data, - cfg->lmac[1].txfifo_size[i], - LMAC2_PRPH_OFFSET, - i + cfg->num_txfifo_entries); - } - } - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - /* Pull UMAC internal TXF data from all TXFs */ - for (i = 0; - i < ARRAY_SIZE(mvm->fwrt.smem_cfg.internal_txfifo_size); - i++) { - fifo_hdr = (void *)(*dump_data)->data; - fifo_data = (void *)fifo_hdr->data; - fifo_len = mvm->fwrt.smem_cfg.internal_txfifo_size[i]; - - /* No need to try to read the data if the length is 0 */ - if (fifo_len == 0) - continue; - - /* Add a TLV for the internal FIFOs */ - (*dump_data)->type = - cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF); - (*dump_data)->len = - cpu_to_le32(fifo_len + sizeof(*fifo_hdr)); - - fifo_hdr->fifo_num = cpu_to_le32(i); - - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i + - mvm->fwrt.smem_cfg.num_txfifo_entries); - - fifo_hdr->available_bytes = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_CPU2_FIFO_ITEM_CNT)); - fifo_hdr->wr_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_CPU2_WR_PTR)); - fifo_hdr->rd_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_CPU2_RD_PTR)); - fifo_hdr->fence_ptr = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_CPU2_FENCE_PTR)); - fifo_hdr->fence_mode = - cpu_to_le32(iwl_trans_read_prph(mvm->trans, - TXF_CPU2_LOCK_FENCE)); - - /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */ - iwl_trans_write_prph(mvm->trans, - TXF_CPU2_READ_MODIFY_ADDR, - TXF_CPU2_WR_PTR); - - /* Dummy-read to advance the read pointer to head */ - iwl_trans_read_prph(mvm->trans, - TXF_CPU2_READ_MODIFY_DATA); - - /* Read FIFO */ - fifo_len /= sizeof(u32); /* Size in DWORDS */ - for (j = 0; j < fifo_len; j++) - fifo_data[j] = - iwl_trans_read_prph(mvm->trans, - TXF_CPU2_READ_MODIFY_DATA); - *dump_data = iwl_fw_error_next_data(*dump_data); - } - } - - iwl_trans_release_nic_access(mvm->trans, &flags); -} - -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm) -{ - if (mvm->fw_dump_desc != &iwl_mvm_dump_desc_assert) - kfree(mvm->fw_dump_desc); - mvm->fw_dump_desc = NULL; -} - -#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */ -#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */ - -struct iwl_prph_range { - u32 start, end; -}; - -static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = { - { .start = 0x00a00000, .end = 0x00a00000 }, - { .start = 0x00a0000c, .end = 0x00a00024 }, - { .start = 0x00a0002c, .end = 0x00a0003c }, - { .start = 0x00a00410, .end = 0x00a00418 }, - { .start = 0x00a00420, .end = 0x00a00420 }, - { .start = 0x00a00428, .end = 0x00a00428 }, - { .start = 0x00a00430, .end = 0x00a0043c }, - { .start = 0x00a00444, .end = 0x00a00444 }, - { .start = 0x00a004c0, .end = 0x00a004cc }, - { .start = 0x00a004d8, .end = 0x00a004d8 }, - { .start = 0x00a004e0, .end = 0x00a004f0 }, - { .start = 0x00a00840, .end = 0x00a00840 }, - { .start = 0x00a00850, .end = 0x00a00858 }, - { .start = 0x00a01004, .end = 0x00a01008 }, - { .start = 0x00a01010, .end = 0x00a01010 }, - { .start = 0x00a01018, .end = 0x00a01018 }, - { .start = 0x00a01024, .end = 0x00a01024 }, - { .start = 0x00a0102c, .end = 0x00a01034 }, - { .start = 0x00a0103c, .end = 0x00a01040 }, - { .start = 0x00a01048, .end = 0x00a01094 }, - { .start = 0x00a01c00, .end = 0x00a01c20 }, - { .start = 0x00a01c58, .end = 0x00a01c58 }, - { .start = 0x00a01c7c, .end = 0x00a01c7c }, - { .start = 0x00a01c28, .end = 0x00a01c54 }, - { .start = 0x00a01c5c, .end = 0x00a01c5c }, - { .start = 0x00a01c60, .end = 0x00a01cdc }, - { .start = 0x00a01ce0, .end = 0x00a01d0c }, - { .start = 0x00a01d18, .end = 0x00a01d20 }, - { .start = 0x00a01d2c, .end = 0x00a01d30 }, - { .start = 0x00a01d40, .end = 0x00a01d5c }, - { .start = 0x00a01d80, .end = 0x00a01d80 }, - { .start = 0x00a01d98, .end = 0x00a01d9c }, - { .start = 0x00a01da8, .end = 0x00a01da8 }, - { .start = 0x00a01db8, .end = 0x00a01df4 }, - { .start = 0x00a01dc0, .end = 0x00a01dfc }, - { .start = 0x00a01e00, .end = 0x00a01e2c }, - { .start = 0x00a01e40, .end = 0x00a01e60 }, - { .start = 0x00a01e68, .end = 0x00a01e6c }, - { .start = 0x00a01e74, .end = 0x00a01e74 }, - { .start = 0x00a01e84, .end = 0x00a01e90 }, - { .start = 0x00a01e9c, .end = 0x00a01ec4 }, - { .start = 0x00a01ed0, .end = 0x00a01ee0 }, - { .start = 0x00a01f00, .end = 0x00a01f1c }, - { .start = 0x00a01f44, .end = 0x00a01ffc }, - { .start = 0x00a02000, .end = 0x00a02048 }, - { .start = 0x00a02068, .end = 0x00a020f0 }, - { .start = 0x00a02100, .end = 0x00a02118 }, - { .start = 0x00a02140, .end = 0x00a0214c }, - { .start = 0x00a02168, .end = 0x00a0218c }, - { .start = 0x00a021c0, .end = 0x00a021c0 }, - { .start = 0x00a02400, .end = 0x00a02410 }, - { .start = 0x00a02418, .end = 0x00a02420 }, - { .start = 0x00a02428, .end = 0x00a0242c }, - { .start = 0x00a02434, .end = 0x00a02434 }, - { .start = 0x00a02440, .end = 0x00a02460 }, - { .start = 0x00a02468, .end = 0x00a024b0 }, - { .start = 0x00a024c8, .end = 0x00a024cc }, - { .start = 0x00a02500, .end = 0x00a02504 }, - { .start = 0x00a0250c, .end = 0x00a02510 }, - { .start = 0x00a02540, .end = 0x00a02554 }, - { .start = 0x00a02580, .end = 0x00a025f4 }, - { .start = 0x00a02600, .end = 0x00a0260c }, - { .start = 0x00a02648, .end = 0x00a02650 }, - { .start = 0x00a02680, .end = 0x00a02680 }, - { .start = 0x00a026c0, .end = 0x00a026d0 }, - { .start = 0x00a02700, .end = 0x00a0270c }, - { .start = 0x00a02804, .end = 0x00a02804 }, - { .start = 0x00a02818, .end = 0x00a0281c }, - { .start = 0x00a02c00, .end = 0x00a02db4 }, - { .start = 0x00a02df4, .end = 0x00a02fb0 }, - { .start = 0x00a03000, .end = 0x00a03014 }, - { .start = 0x00a0301c, .end = 0x00a0302c }, - { .start = 0x00a03034, .end = 0x00a03038 }, - { .start = 0x00a03040, .end = 0x00a03048 }, - { .start = 0x00a03060, .end = 0x00a03068 }, - { .start = 0x00a03070, .end = 0x00a03074 }, - { .start = 0x00a0307c, .end = 0x00a0307c }, - { .start = 0x00a03080, .end = 0x00a03084 }, - { .start = 0x00a0308c, .end = 0x00a03090 }, - { .start = 0x00a03098, .end = 0x00a03098 }, - { .start = 0x00a030a0, .end = 0x00a030a0 }, - { .start = 0x00a030a8, .end = 0x00a030b4 }, - { .start = 0x00a030bc, .end = 0x00a030bc }, - { .start = 0x00a030c0, .end = 0x00a0312c }, - { .start = 0x00a03c00, .end = 0x00a03c5c }, - { .start = 0x00a04400, .end = 0x00a04454 }, - { .start = 0x00a04460, .end = 0x00a04474 }, - { .start = 0x00a044c0, .end = 0x00a044ec }, - { .start = 0x00a04500, .end = 0x00a04504 }, - { .start = 0x00a04510, .end = 0x00a04538 }, - { .start = 0x00a04540, .end = 0x00a04548 }, - { .start = 0x00a04560, .end = 0x00a0457c }, - { .start = 0x00a04590, .end = 0x00a04598 }, - { .start = 0x00a045c0, .end = 0x00a045f4 }, -}; - -static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = { - { .start = 0x00a05c00, .end = 0x00a05c18 }, - { .start = 0x00a05400, .end = 0x00a056e8 }, - { .start = 0x00a08000, .end = 0x00a098bc }, - { .start = 0x00a02400, .end = 0x00a02758 }, -}; - -static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start, - u32 len_bytes, __le32 *data) -{ - u32 i; - - for (i = 0; i < len_bytes; i += 4) - *data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i)); -} - -static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start, - u32 len_bytes, __le32 *data) -{ - unsigned long flags; - bool success = false; - - if (iwl_trans_grab_nic_access(trans, &flags)) { - success = true; - _iwl_read_prph_block(trans, start, len_bytes, data); - iwl_trans_release_nic_access(trans, &flags); - } - - return success; -} - -static void iwl_dump_prph(struct iwl_trans *trans, - struct iwl_fw_error_dump_data **data, - const struct iwl_prph_range *iwl_prph_dump_addr, - u32 range_len) -{ - struct iwl_fw_error_dump_prph *prph; - unsigned long flags; - u32 i; - - if (!iwl_trans_grab_nic_access(trans, &flags)) - return; - - for (i = 0; i < range_len; i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4; - - (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); - (*data)->len = cpu_to_le32(sizeof(*prph) + - num_bytes_in_chunk); - prph = (void *)(*data)->data; - prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start); - - _iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start, - /* our range is inclusive, hence + 4 */ - iwl_prph_dump_addr[i].end - - iwl_prph_dump_addr[i].start + 4, - (void *)prph->data); - - *data = iwl_fw_error_next_data(*data); - } - - iwl_trans_release_nic_access(trans, &flags); -} - -/* - * alloc_sgtable - allocates scallerlist table in the given size, - * fills it with pages and returns it - * @size: the size (in bytes) of the table -*/ -static struct scatterlist *alloc_sgtable(int size) -{ - int alloc_size, nents, i; - struct page *new_page; - struct scatterlist *iter; - struct scatterlist *table; - - nents = DIV_ROUND_UP(size, PAGE_SIZE); - table = kcalloc(nents, sizeof(*table), GFP_KERNEL); - if (!table) - return NULL; - sg_init_table(table, nents); - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) { - /* release all previous allocated pages in the table */ - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = sg_page(iter); - if (new_page) - __free_page(new_page); - } - return NULL; - } - alloc_size = min_t(int, size, PAGE_SIZE); - size -= PAGE_SIZE; - sg_set_page(iter, new_page, alloc_size, 0); - } - return table; -} - -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) -{ - struct iwl_fw_error_dump_file *dump_file; - struct iwl_fw_error_dump_data *dump_data; - struct iwl_fw_error_dump_info *dump_info; - struct iwl_fw_error_dump_mem *dump_mem; - struct iwl_fw_error_dump_trigger_desc *dump_trig; - struct iwl_mvm_dump_ptrs *fw_error_dump; - struct scatterlist *sg_dump_data; - u32 sram_len, sram_ofs; - const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = mvm->fw->dbg_mem_tlv; - u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; - u32 smem_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->smem_len; - u32 sram2_len = mvm->fw->n_dbg_mem_tlv ? 0 : mvm->cfg->dccm2_len; - bool monitor_dump_only = false; - int i; - - if (!IWL_MVM_COLLECT_FW_ERR_DUMP && - !mvm->trans->dbg_dest_tlv) - return; - - lockdep_assert_held(&mvm->mutex); - - /* there's no point in fw dump if the bus is dead */ - if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) { - IWL_ERR(mvm, "Skip fw error dump since bus is dead\n"); - goto out; - } - - if (mvm->fw_dump_trig && - mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) - monitor_dump_only = true; - - fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL); - if (!fw_error_dump) - goto out; - - /* SRAM - include stack CCM if driver knows the values for it */ - if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) { - const struct fw_img *img; - - img = &mvm->fw->img[mvm->fwrt.cur_fw_img]; - sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset; - sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; - } else { - sram_ofs = mvm->cfg->dccm_offset; - sram_len = mvm->cfg->dccm_len; - } - - /* reading RXF/TXF sizes */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - struct iwl_fwrt_shared_mem_cfg *mem_cfg = &mvm->fwrt.smem_cfg; - - fifo_data_len = 0; - - /* Count RXF2 size */ - if (mem_cfg->rxfifo2_size) { - /* Add header info */ - fifo_data_len += mem_cfg->rxfifo2_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - /* Count RXF1 sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - if (!mem_cfg->lmac[i].rxfifo1_size) - continue; - - /* Add header info */ - fifo_data_len += mem_cfg->lmac[i].rxfifo1_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - /* Count TXF sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - int j; - - for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { - if (!mem_cfg->lmac[i].txfifo_size[j]) - continue; - - /* Add header info */ - fifo_data_len += - mem_cfg->lmac[i].txfifo_size[j] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { - for (i = 0; - i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); - i++) { - if (!mem_cfg->internal_txfifo_size[i]) - continue; - - /* Add header info */ - fifo_data_len += - mem_cfg->internal_txfifo_size[i] + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - } - - /* Make room for PRPH registers */ - if (!mvm->trans->cfg->gen2) { - for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); - i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_comm[i].end - - iwl_prph_dump_addr_comm[i].start + 4; - - prph_len += sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - } - - if (!mvm->trans->cfg->gen2 && mvm->cfg->mq_rx_supported) { - for (i = 0; i < - ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { - /* The range includes both boundaries */ - int num_bytes_in_chunk = - iwl_prph_dump_addr_9000[i].end - - iwl_prph_dump_addr_9000[i].start + 4; - - prph_len += sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_prph) + - num_bytes_in_chunk; - } - } - - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) - radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; - } - - file_len = sizeof(*dump_file) + - sizeof(*dump_data) * 2 + - fifo_data_len + - prph_len + - radio_len + - sizeof(*dump_info); - - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; - - /* Make room for MEM segments */ - for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - le32_to_cpu(fw_dbg_mem[i].len); - } - - /* Make room for fw's virtual image pages, if it exists */ - if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->fwrt.cur_fw_img].paging_mem_size && - mvm->fwrt.fw_paging_db[0].fw_paging_block) - file_len += mvm->fwrt.num_of_paging_blk * - (sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_paging) + - PAGING_BLOCK_SIZE); - - /* If we only want a monitor dump, reset the file length */ - if (monitor_dump_only) { - file_len = sizeof(*dump_file) + sizeof(*dump_data) + - sizeof(*dump_info); - } - - if (mvm->fw_dump_desc) - file_len += sizeof(*dump_data) + sizeof(*dump_trig) + - mvm->fw_dump_desc->len; - - if (!mvm->fw->n_dbg_mem_tlv) - file_len += sram_len + sizeof(*dump_mem); - - dump_file = vzalloc(file_len); - if (!dump_file) { - kfree(fw_error_dump); - goto out; - } - - fw_error_dump->op_mode_ptr = dump_file; - - dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); - dump_data = (void *)dump_file->data; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_info)); - dump_info = (void *)dump_data->data; - dump_info->device_family = - mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); - dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev)); - memcpy(dump_info->fw_human_readable, mvm->fw->human_readable, - sizeof(dump_info->fw_human_readable)); - strncpy(dump_info->dev_human_readable, mvm->cfg->name, - sizeof(dump_info->dev_human_readable)); - strncpy(dump_info->bus_human_readable, mvm->dev->bus->name, - sizeof(dump_info->bus_human_readable)); - - dump_data = iwl_fw_error_next_data(dump_data); - /* We only dump the FIFOs if the FW is in error state */ - if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) { - iwl_mvm_dump_fifos(mvm, &dump_data); - if (radio_len) - iwl_mvm_read_radio_reg(mvm, &dump_data); - } - - if (mvm->fw_dump_desc) { - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_trig) + - mvm->fw_dump_desc->len); - dump_trig = (void *)dump_data->data; - memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc, - sizeof(*dump_trig) + mvm->fw_dump_desc->len); - - dump_data = iwl_fw_error_next_data(dump_data); - } - - /* In case we only want monitor dump, skip to dump trasport data */ - if (monitor_dump_only) - goto dump_trans_data; - - if (!mvm->fw->n_dbg_mem_tlv) { - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(sram_ofs); - iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data, - sram_len); - dump_data = iwl_fw_error_next_data(dump_data); - } - - for (i = 0; i < mvm->fw->n_dbg_mem_tlv; i++) { - u32 len = le32_to_cpu(fw_dbg_mem[i].len); - u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); - bool success; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = fw_dbg_mem[i].data_type; - dump_mem->offset = cpu_to_le32(ofs); - - switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) { - case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR): - iwl_trans_read_mem_bytes(mvm->trans, ofs, - dump_mem->data, - len); - success = true; - break; - case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH): - success = iwl_read_prph_block(mvm->trans, ofs, len, - (void *)dump_mem->data); - break; - default: - /* - * shouldn't get here, we ignored this kind - * of TLV earlier during the TLV parsing?! - */ - WARN_ON(1); - success = false; - } - - if (success) - dump_data = iwl_fw_error_next_data(dump_data); - } - - if (smem_len) { - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM); - dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset, - dump_mem->data, smem_len); - dump_data = iwl_fw_error_next_data(dump_data); - } - - if (sram2_len) { - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); - dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); - dump_mem = (void *)dump_data->data; - dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM); - dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset); - iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset, - dump_mem->data, sram2_len); - dump_data = iwl_fw_error_next_data(dump_data); - } - - /* Dump fw's virtual image */ - if (!mvm->trans->cfg->gen2 && - mvm->fw->img[mvm->fwrt.cur_fw_img].paging_mem_size && - mvm->fwrt.fw_paging_db[0].fw_paging_block) { - for (i = 1; i < mvm->fwrt.num_of_paging_blk + 1; i++) { - struct iwl_fw_error_dump_paging *paging; - struct page *pages = - mvm->fwrt.fw_paging_db[i].fw_paging_block; - dma_addr_t addr = mvm->fwrt.fw_paging_db[i].fw_paging_phys; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); - dump_data->len = cpu_to_le32(sizeof(*paging) + - PAGING_BLOCK_SIZE); - paging = (void *)dump_data->data; - paging->index = cpu_to_le32(i); - dma_sync_single_for_cpu(mvm->trans->dev, addr, - PAGING_BLOCK_SIZE, - DMA_BIDIRECTIONAL); - memcpy(paging->data, page_address(pages), - PAGING_BLOCK_SIZE); - dump_data = iwl_fw_error_next_data(dump_data); - } - } - - if (prph_len) { - iwl_dump_prph(mvm->trans, &dump_data, - iwl_prph_dump_addr_comm, - ARRAY_SIZE(iwl_prph_dump_addr_comm)); - - if (mvm->cfg->mq_rx_supported) - iwl_dump_prph(mvm->trans, &dump_data, - iwl_prph_dump_addr_9000, - ARRAY_SIZE(iwl_prph_dump_addr_9000)); - } - -dump_trans_data: - fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans, - mvm->fw_dump_trig); - fw_error_dump->op_mode_len = file_len; - if (fw_error_dump->trans_ptr) - file_len += fw_error_dump->trans_ptr->len; - dump_file->file_len = cpu_to_le32(file_len); - - sg_dump_data = alloc_sgtable(file_len); - if (sg_dump_data) { - sg_pcopy_from_buffer(sg_dump_data, - sg_nents(sg_dump_data), - fw_error_dump->op_mode_ptr, - fw_error_dump->op_mode_len, 0); - if (fw_error_dump->trans_ptr) - sg_pcopy_from_buffer(sg_dump_data, - sg_nents(sg_dump_data), - fw_error_dump->trans_ptr->data, - fw_error_dump->trans_ptr->len, - fw_error_dump->op_mode_len); - dev_coredumpsg(mvm->trans->dev, sg_dump_data, file_len, - GFP_KERNEL); - } - vfree(fw_error_dump->op_mode_ptr); - vfree(fw_error_dump->trans_ptr); - kfree(fw_error_dump); - -out: - iwl_mvm_free_fw_dump_desc(mvm); - mvm->fw_dump_trig = NULL; - clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status); -} - -const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = { - .trig_desc = { - .type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT), - }, -}; - -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - const struct iwl_mvm_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger) -{ - unsigned int delay = 0; - - if (trigger) - delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay)); - - if (WARN(mvm->trans->state == IWL_TRANS_NO_FW, - "Can't collect dbg data when FW isn't alive\n")) - return -EIO; - - if (test_and_set_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status)) - return -EBUSY; - - if (WARN_ON(mvm->fw_dump_desc)) - iwl_mvm_free_fw_dump_desc(mvm); - - IWL_WARN(mvm, "Collecting data: trigger %d fired.\n", - le32_to_cpu(desc->trig_desc.type)); - - mvm->fw_dump_desc = desc; - mvm->fw_dump_trig = trigger; - - schedule_delayed_work(&mvm->fw_dump_wk, delay); - - return 0; -} - -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger) -{ - struct iwl_mvm_dump_desc *desc; - - desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); - if (!desc) - return -ENOMEM; - - desc->len = len; - desc->trig_desc.type = cpu_to_le32(trig); - memcpy(desc->trig_desc.data, str, len); - - return iwl_mvm_fw_dbg_collect_desc(mvm, desc, trigger); -} - -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) -{ - u16 occurrences = le16_to_cpu(trigger->occurrences); - int ret, len = 0; - char buf[64]; - - if (!occurrences) - return 0; - - if (fmt) { - va_list ap; - - buf[sizeof(buf) - 1] = '\0'; - - va_start(ap, fmt); - vsnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - - /* check for truncation */ - if (WARN_ON_ONCE(buf[sizeof(buf) - 1])) - buf[sizeof(buf) - 1] = '\0'; - - len = strlen(buf) + 1; - } - - ret = iwl_mvm_fw_dbg_collect(mvm, le32_to_cpu(trigger->id), buf, len, - trigger); - - if (ret) - return ret; - - trigger->occurrences = cpu_to_le16(occurrences - 1); - return 0; -} - -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) -{ - u8 *ptr; - int ret; - int i; - - if (WARN_ONCE(conf_id >= ARRAY_SIZE(mvm->fw->dbg_conf_tlv), - "Invalid configuration %d\n", conf_id)) - return -EINVAL; - - /* EARLY START - firmware's configuration is hard coded */ - if ((!mvm->fw->dbg_conf_tlv[conf_id] || - !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && - conf_id == FW_DBG_START_FROM_ALIVE) - return 0; - - if (!mvm->fw->dbg_conf_tlv[conf_id]) - return -EINVAL; - - if (mvm->fw_dbg_conf != FW_DBG_INVALID) - IWL_WARN(mvm, "FW already configured (%d) - re-configuring\n", - mvm->fw_dbg_conf); - - /* Send all HCMDs for configuring the FW debug */ - ptr = (void *)&mvm->fw->dbg_conf_tlv[conf_id]->hcmd; - for (i = 0; i < mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) { - struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr; - - ret = iwl_mvm_send_cmd_pdu(mvm, cmd->id, 0, - le16_to_cpu(cmd->len), cmd->data); - if (ret) - return ret; - - ptr += sizeof(*cmd); - ptr += le16_to_cpu(cmd->len); - } - - mvm->fw_dbg_conf = conf_id; - - return 0; -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h deleted file mode 100644 index 4a5287a0c617..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h +++ /dev/null @@ -1,175 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __mvm_fw_dbg_h__ -#define __mvm_fw_dbg_h__ -#include "fw/file.h" -#include "fw/error-dump.h" -#include "mvm.h" - -void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm); -void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm); -int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm, - const struct iwl_mvm_dump_desc *desc, - const struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect(struct iwl_mvm *mvm, enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - const struct iwl_fw_dbg_trigger_tlv *trigger); -int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trigger, - const char *fmt, ...) __printf(3, 4); -int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 id); - -#define iwl_fw_dbg_trigger_enabled(fw, id) ({ \ - void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)]; \ - unlikely(__dbg_trigger); \ -}) - -static inline struct iwl_fw_dbg_trigger_tlv* -_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id) -{ - return fw->dbg_trigger_tlv[id]; -} - -#define iwl_fw_dbg_get_trigger(fw, id) ({ \ - BUILD_BUG_ON(!__builtin_constant_p(id)); \ - BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX); \ - _iwl_fw_dbg_get_trigger((fw), (id)); \ -}) - -static inline bool -iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig, - struct ieee80211_vif *vif) -{ - u32 trig_vif = le32_to_cpu(trig->vif_type); - - return trig_vif == IWL_FW_DBG_CONF_VIF_ANY || - ieee80211_vif_type_p2p(vif) == trig_vif; -} - -static inline bool -iwl_fw_dbg_trigger_stop_conf_match(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trig) -{ - return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) && - (mvm->fw_dbg_conf == FW_DBG_INVALID || - (BIT(mvm->fw_dbg_conf) & le32_to_cpu(trig->stop_conf_ids)))); -} - -static inline bool -iwl_fw_dbg_no_trig_window(struct iwl_mvm *mvm, - struct iwl_fw_dbg_trigger_tlv *trig) -{ - unsigned long wind_jiff = - msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms)); - u32 id = le32_to_cpu(trig->id); - - /* If this is the first event checked, jump to update start ts */ - if (mvm->fw_dbg_non_collect_ts_start[id] && - (time_after(mvm->fw_dbg_non_collect_ts_start[id] + wind_jiff, - jiffies))) - return true; - - mvm->fw_dbg_non_collect_ts_start[id] = jiffies; - return false; -} - -static inline bool -iwl_fw_dbg_trigger_check_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_fw_dbg_trigger_tlv *trig) -{ - if (vif && !iwl_fw_dbg_trigger_vif_match(trig, vif)) - return false; - - if (iwl_fw_dbg_no_trig_window(mvm, trig)) { - IWL_WARN(mvm, "Trigger %d occurred while no-collect window.\n", - trig->id); - return false; - } - - return iwl_fw_dbg_trigger_stop_conf_match(mvm, trig); -} - -static inline void -_iwl_fw_dbg_trigger_simple_stop(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct iwl_fw_dbg_trigger_tlv *trigger) -{ - if (!trigger) - return; - - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) - return; - - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); -} - -#define iwl_fw_dbg_trigger_simple_stop(mvm, vif, trig) \ - _iwl_fw_dbg_trigger_simple_stop((mvm), (vif), \ - iwl_fw_dbg_get_trigger((mvm)->fw,\ - (trig))) - -#endif /* __mvm_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 08108620b977..e6f6de2500db 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -78,7 +78,7 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" -#include "fw-dbg.h" +#include "fw/dbg.h" #include "iwl-phy-db.h" #define MVM_UCODE_ALIVE_TIMEOUT HZ @@ -1091,11 +1091,11 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); - mvm->fw_dbg_conf = FW_DBG_INVALID; + mvm->fwrt.dump.conf = FW_DBG_INVALID; /* if we have a destination, assume EARLY START */ if (mvm->fw->dbg_dest_tlv) - mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE; - iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE); + mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; + iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index dc631b23e189..d130bdd76368 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -72,7 +72,6 @@ #include "fw-api.h" #include "mvm.h" #include "time-event.h" -#include "fw-dbg.h" const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_VO, @@ -1559,12 +1558,14 @@ static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac, /* TODO: implement start trigger */ - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trigger)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + trigger)) return; if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx || rx_missed_bcon >= stop_trig_missed_bcon) - iwl_mvm_fw_dbg_collect_trig(mvm, trigger, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL); } void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index bcde1ba0f1c8..ab6c6bf79565 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -87,7 +87,6 @@ #include "fw/error-dump.h" #include "iwl-prph.h" #include "iwl-nvm-parse.h" -#include "fw-dbg.h" static const struct ieee80211_iface_limit iwl_mvm_limits[] = { { @@ -845,11 +844,11 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) return true; } -#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ - do { \ - if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \ +#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ + do { \ + if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ + break; \ + iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ } while (0) static void @@ -866,7 +865,8 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; switch (action) { @@ -1029,8 +1029,8 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) * on D3->D0 transition */ if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { - mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert; - iwl_mvm_fw_error_dump(mvm); + mvm->fwrt.dump.desc = &iwl_dump_desc_assert; + iwl_fw_error_dump(&mvm->fwrt); } /* cleanup all stale references (scan, roc), but keep the @@ -1072,7 +1072,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) mvm->vif_count = 0; mvm->rx_ba_sessions = 0; - mvm->fw_dbg_conf = FW_DBG_INVALID; + mvm->fwrt.dump.conf = FW_DBG_INVALID; /* keep statistics ticking */ iwl_mvm_accu_radio_stats(mvm); @@ -1249,16 +1249,16 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) * Lock and clear the firmware running bit here already, so that * new commands coming in elsewhere, e.g. from debugfs, will not * be able to proceed. This is important here because one of those - * debugfs files causes the fw_dump_wk to be triggered, and if we + * debugfs files causes the firmware dump to be triggered, and if we * don't stop debugfs accesses before canceling that it could be * retriggered after we flush it but before we've cleared the bit. */ clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - cancel_delayed_work_sync(&mvm->fw_dump_wk); + iwl_fw_cancel_dump(&mvm->fwrt); cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); cancel_delayed_work_sync(&mvm->scan_timeout_dwork); - iwl_mvm_free_fw_dump_desc(mvm); + iwl_fw_free_dump_desc(&mvm->fwrt); mutex_lock(&mvm->mutex); __iwl_mvm_mac_stop(mvm); @@ -2566,7 +2566,8 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS); tdls_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(tdls_trig->action_bitmap & BIT(action))) @@ -2576,9 +2577,9 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "TDLS event occurred, peer %pM, action %d", - peer_addr, action); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "TDLS event occurred, peer %pM, action %d", + peer_addr, action); } static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, @@ -3876,7 +3877,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", chsw->chandef.center_freq1); - iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH); + iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), + FW_DBG_TRIGGER_CHANNEL_SWITCH); switch (vif->type) { case NL80211_IFTYPE_AP: @@ -4151,11 +4154,11 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, struct ieee80211_vif *vif, const struct ieee80211_event *event) { -#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ - do { \ - if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ - break; \ - iwl_mvm_fw_dbg_collect_trig(mvm, trig, _fmt); \ +#define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ + do { \ + if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ + break; \ + iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ } while (0) struct iwl_fw_dbg_trigger_tlv *trig; @@ -4166,7 +4169,8 @@ static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (event->u.mlme.data == ASSOC_EVENT) { @@ -4207,16 +4211,17 @@ static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR received from %pM, tid %d, ssn %d", - event->u.ba.sta->addr, event->u.ba.tid, - event->u.ba.ssn); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "BAR received from %pM, tid %d, ssn %d", + event->u.ba.sta->addr, event->u.ba.tid, + event->u.ba.ssn); } static void @@ -4232,15 +4237,16 @@ iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) return; if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Frame from %pM timed out, tid %d", - event->u.ba.sta->addr, event->u.ba.tid); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Frame from %pM timed out, tid %d", + event->u.ba.sta->addr, event->u.ba.tid); } static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 988f4c331d15..6fa2c44e6edd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -88,6 +88,7 @@ #include "constants.h" #include "tof.h" #include "fw/runtime.h" +#include "fw/dbg.h" #define IWL_MVM_MAX_ADDRESSES 5 /* RSSI offset for WkP */ @@ -138,34 +139,6 @@ struct iwl_mvm_mod_params { }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; -/** - * struct iwl_mvm_dump_ptrs - set of pointers needed for the fw-error-dump - * - * @op_mode_ptr: pointer to the buffer coming from the mvm op_mode - * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the - * transport's data. - * @trans_len: length of the valid data in trans_ptr - * @op_mode_len: length of the valid data in op_mode_ptr - */ -struct iwl_mvm_dump_ptrs { - struct iwl_trans_dump_data *trans_ptr; - void *op_mode_ptr; - u32 op_mode_len; -}; - -/** - * struct iwl_mvm_dump_desc - describes the dump - * @len: length of trig_desc->data - * @trig_desc: the description of the dump - */ -struct iwl_mvm_dump_desc { - size_t len; - /* must be last */ - struct iwl_fw_error_dump_trigger_desc trig_desc; -}; - -extern const struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert; - struct iwl_mvm_phy_ctxt { u16 id; u16 color; @@ -831,9 +804,6 @@ struct iwl_mvm { /* max number of simultaneous scans the FW supports */ unsigned int max_scans; - /* ts of the beginning of a non-collect fw dbg data period */ - unsigned long fw_dbg_non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; - /* UMAC scan tracking */ u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; @@ -909,10 +879,6 @@ struct iwl_mvm { /* -1 for always, 0 for never, >0 for that many times */ s8 fw_restart; - u8 fw_dbg_conf; - struct delayed_work fw_dump_wk; - const struct iwl_mvm_dump_desc *fw_dump_desc; - const struct iwl_fw_dbg_trigger_tlv *fw_dump_trig; #ifdef CONFIG_IWLWIFI_LEDS struct led_classdev led; @@ -1077,7 +1043,6 @@ struct iwl_mvm { * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3 * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done - * @IWL_MVM_STATUS_DUMPING_FW_LOG: FW log is being dumped * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running */ enum iwl_mvm_status { @@ -1088,7 +1053,6 @@ enum iwl_mvm_status { IWL_MVM_STATUS_IN_D0I3, IWL_MVM_STATUS_ROC_AUX_RUNNING, IWL_MVM_STATUS_D3_RECONFIG, - IWL_MVM_STATUS_DUMPING_FW_LOG, IWL_MVM_STATUS_FIRMWARE_RUNNING, }; @@ -1780,7 +1744,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) { iwl_free_fw_paging(&mvm->fwrt); clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); - mvm->fw_dbg_conf = FW_DBG_INVALID; + iwl_fw_dump_conf_clear(&mvm->fwrt); iwl_trans_stop_device(mvm->trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 60f0c9975538..3c4d82045c1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -84,7 +84,6 @@ #include "rs.h" #include "fw-api-scan.h" #include "time-event.h" -#include "fw-dbg.h" #include "fw-api.h" #include "fw-api-scan.h" @@ -510,8 +509,6 @@ static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg) return 0; } -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work); - static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) { struct iwl_mvm *mvm = @@ -535,6 +532,34 @@ unlock: mutex_unlock(&mvm->mutex); } +static int iwl_mvm_fwrt_dump_start(void *ctx) +{ + struct iwl_mvm *mvm = ctx; + int ret; + + ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + + return 0; +} + +static void iwl_mvm_fwrt_dump_end(void *ctx) +{ + struct iwl_mvm *mvm = ctx; + + mutex_unlock(&mvm->mutex); + + iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); +} + +static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { + .dump_start = iwl_mvm_fwrt_dump_start, + .dump_end = iwl_mvm_fwrt_dump_end, +}; + static struct iwl_op_mode * iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, struct dentry *dbgfs_dir) @@ -580,7 +605,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw = fw; mvm->hw = hw; - iwl_fw_runtime_init(&mvm->fwrt, trans, fw); + iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm); mvm->init_status = 0; @@ -639,7 +664,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); - INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); @@ -802,7 +826,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); out_free: - flush_delayed_work(&mvm->fw_dump_wk); + iwl_fw_flush_dump(&mvm->fwrt); if (iwlmvm_mod_params.init_dbg) return op_mode; @@ -922,7 +946,7 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF); cmds_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) { @@ -934,9 +958,9 @@ static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, cmds_trig->cmds[i].group_id != pkt->hdr.group_id) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "CMD 0x%02x.%02x received", - pkt->hdr.group_id, pkt->hdr.cmd); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "CMD 0x%02x.%02x received", + pkt->hdr.group_id, pkt->hdr.cmd); break; } } @@ -1162,57 +1186,6 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) module_put(THIS_MODULE); } -static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) -{ - struct iwl_mvm *mvm = - container_of(work, struct iwl_mvm, fw_dump_wk.work); - - if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT)) - return; - - mutex_lock(&mvm->mutex); - - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { - /* stop recording */ - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); - - iwl_mvm_fw_error_dump(mvm); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && - mvm->fw->dbg_dest_tlv) { - iwl_clear_bits_prph(mvm->trans, - MON_BUFF_SAMPLE_CTL, 0x100); - iwl_clear_bits_prph(mvm->trans, - MON_BUFF_SAMPLE_CTL, 0x1); - iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x1); - } - } else { - u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); - u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); - - /* stop recording */ - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); - udelay(100); - iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); - /* wait before we collect the data till the DBGC stop */ - udelay(500); - - iwl_mvm_fw_error_dump(mvm); - - /* start recording again if the firmware is not crashed */ - if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && - mvm->fw->dbg_dest_tlv) { - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample); - iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl); - } - } - - mutex_unlock(&mvm->mutex); - - iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); -} - void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) { iwl_abort_notification_waits(&mvm->notif_wait); @@ -1236,8 +1209,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) * can't recover this since we're already half suspended. */ if (!mvm->fw_restart && fw_error) { - iwl_mvm_fw_dbg_collect_desc(mvm, &iwl_mvm_dump_desc_assert, - NULL); + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, + NULL); } else if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 622d543abb70..184c749766f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -67,7 +67,6 @@ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" -#include "fw-dbg.h" /* * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler @@ -397,10 +396,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rssi = le32_to_cpu(rssi_trig->rssi); trig_check = - iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(mvmsta->vif), trig); if (trig_check && rx_status->signal < rssi) - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + NULL); } if (ieee80211_is_data(hdr->frame_control)) @@ -624,7 +625,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS); trig_stats = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; trig_offset = le32_to_cpu(trig_stats->stop_offset); @@ -636,7 +637,7 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL); } void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index f3e608196369..13733252c1fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -63,7 +63,6 @@ #include "iwl-trans.h" #include "mvm.h" #include "fw-api.h" -#include "fw-dbg.h" static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) @@ -906,10 +905,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rssi = le32_to_cpu(rssi_trig->rssi); trig_check = - iwl_fw_dbg_trigger_check_stop(mvm, mvmsta->vif, + iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(mvmsta->vif), trig); if (trig_check && rx_status->signal < rssi) - iwl_mvm_fw_dbg_collect_trig(mvm, trig, NULL); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + NULL); } if (ieee80211_is_data(hdr->frame_control)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 5a682722adce..fcf2d1e4ff4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -73,7 +73,6 @@ #include "mvm.h" #include "iwl-io.h" #include "iwl-prph.h" -#include "fw-dbg.h" /* * For the high priority TE use a time event type that has similar priority to @@ -248,7 +247,9 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT); te_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, te_data->vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(te_data->vif), + trig)) return; for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) { @@ -263,11 +264,11 @@ static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm, !(trig_status_bitmap & BIT(le32_to_cpu(notif->status)))) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Time event %d Action 0x%x received status: %d", - te_data->id, - le32_to_cpu(notif->action), - le32_to_cpu(notif->status)); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Time event %d Action 0x%x received status: %d", + te_data->id, + le32_to_cpu(notif->action), + le32_to_cpu(notif->status)); break; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index c7ca6bd3129c..f263a1902e27 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -74,7 +74,6 @@ #include "iwl-eeprom-parse.h" #include "mvm.h" #include "sta.h" -#include "fw-dbg.h" static void iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, @@ -89,15 +88,15 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA); ba_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid))) return; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "BAR sent to %pM, tid %d, ssn %d", - addr, tid, ssn); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "BAR sent to %pM, tid %d, ssn %d", + addr, tid, ssn); } #define OPT_HDR(type, skb, off) \ @@ -1296,7 +1295,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); status_trig = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig)) return; for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { @@ -1307,9 +1306,9 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) continue; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, - "Tx status %d was received", - status & TX_STATUS_MSK); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, + "Tx status %d was received", + status & TX_STATUS_MSK); break; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 4e80c8fa4741..3ccd16f26b91 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -70,7 +70,6 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "iwl-csr.h" -#include "fw-dbg.h" #include "mvm.h" #include "fw-api-rs.h" @@ -1190,14 +1189,15 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif, trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME); trig_mlme = (void *)trig->data; - if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig)) + if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, + ieee80211_vif_to_wdev(vif), trig)) goto out; if (trig_mlme->stop_connection_loss && --trig_mlme->stop_connection_loss) goto out; - iwl_mvm_fw_dbg_collect_trig(mvm, trig, "%s", errmsg); + iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg); out: ieee80211_connection_loss(vif); -- cgit v1.2.3-55-g7522 From d172a5eff629127c048272e5d44e8b9f9cd30eac Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Fri, 2 Jun 2017 15:15:53 +0200 Subject: iwlwifi: reorganize firmware API Apart from DVM, all firmware uses the same base API, and there's code outside iwlmvm that needs to interact with it. Reflect this in the source better and reorganize the firmware API to a new fw/api/ directory. While at it, split the already pretty large fw-api.h file into a number of smaller files, going from almost 3k lines in there to a maximum number of lines less than 1k. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api.h | 359 --- drivers/net/wireless/intel/iwlwifi/fw/api/alive.h | 190 ++ .../net/wireless/intel/iwlwifi/fw/api/binding.h | 144 ++ drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h | 195 ++ drivers/net/wireless/intel/iwlwifi/fw/api/coex.h | 252 ++ .../net/wireless/intel/iwlwifi/fw/api/commands.h | 657 +++++ drivers/net/wireless/intel/iwlwifi/fw/api/config.h | 192 ++ .../net/wireless/intel/iwlwifi/fw/api/context.h | 94 + drivers/net/wireless/intel/iwlwifi/fw/api/d3.h | 466 ++++ .../net/wireless/intel/iwlwifi/fw/api/datapath.h | 127 + drivers/net/wireless/intel/iwlwifi/fw/api/debug.h | 345 +++ drivers/net/wireless/intel/iwlwifi/fw/api/filter.h | 183 ++ .../net/wireless/intel/iwlwifi/fw/api/mac-cfg.h | 152 ++ drivers/net/wireless/intel/iwlwifi/fw/api/mac.h | 409 +++ .../net/wireless/intel/iwlwifi/fw/api/nvm-reg.h | 378 +++ .../net/wireless/intel/iwlwifi/fw/api/offload.h | 101 + drivers/net/wireless/intel/iwlwifi/fw/api/paging.h | 108 + .../net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h | 164 ++ drivers/net/wireless/intel/iwlwifi/fw/api/phy.h | 258 ++ drivers/net/wireless/intel/iwlwifi/fw/api/power.h | 526 ++++ drivers/net/wireless/intel/iwlwifi/fw/api/rs.h | 408 +++ drivers/net/wireless/intel/iwlwifi/fw/api/rx.h | 589 +++++ drivers/net/wireless/intel/iwlwifi/fw/api/scan.h | 787 ++++++ drivers/net/wireless/intel/iwlwifi/fw/api/sf.h | 138 + drivers/net/wireless/intel/iwlwifi/fw/api/sta.h | 573 +++++ drivers/net/wireless/intel/iwlwifi/fw/api/stats.h | 474 ++++ drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h | 208 ++ .../net/wireless/intel/iwlwifi/fw/api/time-event.h | 386 +++ drivers/net/wireless/intel/iwlwifi/fw/api/tof.h | 393 +++ drivers/net/wireless/intel/iwlwifi/fw/api/tx.h | 912 +++++++ drivers/net/wireless/intel/iwlwifi/fw/api/txq.h | 156 ++ drivers/net/wireless/intel/iwlwifi/fw/paging.c | 5 +- drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 3 +- drivers/net/wireless/intel/iwlwifi/fw/smem.c | 6 +- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 3 +- drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 2 +- .../net/wireless/intel/iwlwifi/mvm/debugfs-vif.c | 2 +- .../net/wireless/intel/iwlwifi/mvm/fw-api-coex.h | 257 -- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h | 471 ---- .../net/wireless/intel/iwlwifi/mvm/fw-api-mac.h | 396 --- .../net/wireless/intel/iwlwifi/mvm/fw-api-power.h | 531 ---- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h | 413 --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 574 ----- .../net/wireless/intel/iwlwifi/mvm/fw-api-scan.h | 792 ------ .../net/wireless/intel/iwlwifi/mvm/fw-api-sta.h | 578 ----- .../net/wireless/intel/iwlwifi/mvm/fw-api-stats.h | 479 ---- .../net/wireless/intel/iwlwifi/mvm/fw-api-tof.h | 398 --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h | 917 ------- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 2714 +------------------- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tof.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tof.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 5 +- 58 files changed, 10011 insertions(+), 8877 deletions(-) delete mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/alive.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/binding.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/coex.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/commands.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/config.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/context.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/d3.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/debug.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/filter.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/mac.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/offload.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/paging.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/phy.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/power.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/rs.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/rx.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/scan.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/sf.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/sta.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/stats.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/tof.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/tx.h create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/txq.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h delete mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api.h b/drivers/net/wireless/intel/iwlwifi/fw/api.h deleted file mode 100644 index afd333e57790..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/api.h +++ /dev/null @@ -1,359 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __iwl_fw_api_h__ -#define __iwl_fw_api_h__ - -/** - * DOC: Host command section - * - * A host command is a command issued by the upper layer to the fw. There are - * several versions of fw that have several APIs. The transport layer is - * completely agnostic to these differences. - * The transport does provide helper functionality (i.e. SYNC / ASYNC mode), - */ -#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) -#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) -#define SEQ_TO_INDEX(s) ((s) & 0xff) -#define INDEX_TO_SEQ(i) ((i) & 0xff) -#define SEQ_RX_FRAME cpu_to_le16(0x8000) - -/* - * those functions retrieve specific information from - * the id field in the iwl_host_cmd struct which contains - * the command id, the group id and the version of the command - * and vice versa -*/ -static inline u8 iwl_cmd_opcode(u32 cmdid) -{ - return cmdid & 0xFF; -} - -static inline u8 iwl_cmd_groupid(u32 cmdid) -{ - return ((cmdid & 0xFF00) >> 8); -} - -static inline u8 iwl_cmd_version(u32 cmdid) -{ - return ((cmdid & 0xFF0000) >> 16); -} - -static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) -{ - return opcode + (groupid << 8) + (version << 16); -} - -/* make u16 wide id out of u8 group and opcode */ -#define WIDE_ID(grp, opcode) (((grp) << 8) | (opcode)) -#define DEF_ID(opcode) ((1 << 8) | (opcode)) - -/* due to the conversion, this group is special; new groups - * should be defined in the appropriate fw-api header files - */ -#define IWL_ALWAYS_LONG_GROUP 1 - -/** - * struct iwl_cmd_header - * - * This header format appears in the beginning of each command sent from the - * driver, and each response/notification received from uCode. - */ -struct iwl_cmd_header { - u8 cmd; /* Command ID: REPLY_RXON, etc. */ - u8 group_id; - /* - * The driver sets up the sequence number to values of its choosing. - * uCode does not use this value, but passes it back to the driver - * when sending the response to each driver-originated command, so - * the driver can match the response to the command. Since the values - * don't get used by uCode, the driver may set up an arbitrary format. - * - * There is one exception: uCode sets bit 15 when it originates - * the response/notification, i.e. when the response/notification - * is not a direct response to a command sent by the driver. For - * example, uCode issues REPLY_RX when it sends a received frame - * to the driver; it is not a direct response to any driver command. - * - * The Linux driver uses the following format: - * - * 0:7 tfd index - position within TX queue - * 8:12 TX queue id - * 13:14 reserved - * 15 unsolicited RX or uCode-originated notification - */ - __le16 sequence; -} __packed; - -/** - * struct iwl_cmd_header_wide - * - * This header format appears in the beginning of each command sent from the - * driver, and each response/notification received from uCode. - * this is the wide version that contains more information about the command - * like length, version and command type - */ -struct iwl_cmd_header_wide { - u8 cmd; - u8 group_id; - __le16 sequence; - __le16 length; - u8 reserved; - u8 version; -} __packed; - -/** - * iwl_tx_queue_cfg_actions - TXQ config options - * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue - * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format - */ -enum iwl_tx_queue_cfg_actions { - TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), - TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), -}; - -/** - * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command - * @sta_id: station id - * @tid: tid of the queue - * @flags: see &enum iwl_tx_queue_cfg_actions - * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. - * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) - * @byte_cnt_addr: address of byte count table - * @tfdq_addr: address of TFD circular buffer - */ -struct iwl_tx_queue_cfg_cmd { - u8 sta_id; - u8 tid; - __le16 flags; - __le32 cb_size; - __le64 byte_cnt_addr; - __le64 tfdq_addr; -} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ - -/** - * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config - * @queue_number: queue number assigned to this RA -TID - * @flags: set on failure - * @write_pointer: initial value for write pointer - */ -struct iwl_tx_queue_cfg_rsp { - __le16 queue_number; - __le16 flags; - __le16 write_pointer; - __le16 reserved; -} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ - -/** - * struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations - * @type: type of the result - mostly ignored - * @length: length of the data - * @data: data, length in @length - */ -struct iwl_calib_res_notif_phy_db { - __le16 type; - __le16 length; - u8 data[]; -} __packed; - -/** - * struct iwl_phy_db_cmd - configure operational ucode - * @type: type of the data - * @length: length of the data - * @data: data, length in @length - */ -struct iwl_phy_db_cmd { - __le16 type; - __le16 length; - u8 data[]; -} __packed; - -#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ - -/** - * struct iwl_fw_paging_cmd - paging layout - * - * Send to FW the paging layout in the driver. - * - * @flags: various flags for the command - * @block_size: the block size in powers of 2 - * @block_num: number of blocks specified in the command. - * @device_phy_addr: virtual addresses from device side - */ -struct iwl_fw_paging_cmd { - __le32 flags; - __le32 block_size; - __le32 block_num; - __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; -} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ - -/** - * enum iwl_fw_item_id - FW item IDs - * - * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload - * download - */ -enum iwl_fw_item_id { - IWL_FW_ITEM_ID_PAGING = 3, -}; - -/** - * struct iwl_fw_get_item_cmd - get an item from the fw - * @item_id: ID of item to obtain, see &enum iwl_fw_item_id - */ -struct iwl_fw_get_item_cmd { - __le32 item_id; -} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ - -struct iwl_fw_get_item_resp { - __le32 item_id; - __le32 item_byte_cnt; - __le32 item_val; -} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ - -#define TX_FIFO_MAX_NUM_9000 8 -#define TX_FIFO_MAX_NUM 15 -#define RX_FIFO_MAX_NUM 2 -#define TX_FIFO_INTERNAL_MAX_NUM 6 - -/** - * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information - * - * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not - * accessible) - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to - * 0x0 as accessible only via DBGM RDAT) - * @sample_buff_size: internal sample buff size - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre - * 8000 HW set to 0x0 as not accessible) - * @txfifo_size: size of TXF0 ... TXF7 - * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @rxfifo_addr: Start address of rxFifo - * @internal_txfifo_addr: start address of internalFifo - * @internal_txfifo_size: internal fifos' size - * - * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG - * set, the last 3 members don't exist. - */ -struct iwl_shared_mem_cfg_v2 { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; - __le32 rxfifo_size[RX_FIFO_MAX_NUM]; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 rxfifo_addr; - __le32 internal_txfifo_addr; - __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ - -/** - * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration - * - * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) - * @txfifo_size: size of TX FIFOs - * @rxfifo1_addr: RXF1 addr - * @rxfifo1_size: RXF1 size - */ -struct iwl_shared_mem_lmac_cfg { - __le32 txfifo_addr; - __le32 txfifo_size[TX_FIFO_MAX_NUM]; - __le32 rxfifo1_addr; - __le32 rxfifo1_size; - -} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ - -/** - * struct iwl_shared_mem_cfg - Shared memory configuration information - * - * @shared_mem_addr: shared memory address - * @shared_mem_size: shared memory size - * @sample_buff_addr: internal sample (mon/adc) buff addr - * @sample_buff_size: internal sample buff size - * @rxfifo2_addr: start addr of RXF2 - * @rxfifo2_size: size of RXF2 - * @page_buff_addr: used by UMAC and performance debug (page miss analysis), - * when paging is not supported this should be 0 - * @page_buff_size: size of %page_buff_addr - * @lmac_num: number of LMACs (1 or 2) - * @lmac_smem: per - LMAC smem data - */ -struct iwl_shared_mem_cfg { - __le32 shared_mem_addr; - __le32 shared_mem_size; - __le32 sample_buff_addr; - __le32 sample_buff_size; - __le32 rxfifo2_addr; - __le32 rxfifo2_size; - __le32 page_buff_addr; - __le32 page_buff_size; - __le32 lmac_num; - struct iwl_shared_mem_lmac_cfg lmac_smem[2]; -} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ - -#endif /* __iwl_fw_api_h__*/ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h new file mode 100644 index 000000000000..6af6a9b32b69 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -0,0 +1,190 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_alive_h__ +#define __iwl_fw_api_alive_h__ + +/* alive response is_valid values */ +#define ALIVE_RESP_UCODE_OK BIT(0) +#define ALIVE_RESP_RFKILL BIT(1) + +/* alive response ver_type values */ +enum { + FW_TYPE_HW = 0, + FW_TYPE_PROT = 1, + FW_TYPE_AP = 2, + FW_TYPE_WOWLAN = 3, + FW_TYPE_TIMING = 4, + FW_TYPE_WIPAN = 5 +}; + +/* alive response ver_subtype values */ +enum { + FW_SUBTYPE_FULL_FEATURE = 0, + FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ + FW_SUBTYPE_REDUCED = 2, + FW_SUBTYPE_ALIVE_ONLY = 3, + FW_SUBTYPE_WOWLAN = 4, + FW_SUBTYPE_AP_SUBTYPE = 5, + FW_SUBTYPE_WIPAN = 6, + FW_SUBTYPE_INITIALIZE = 9 +}; + +#define IWL_ALIVE_STATUS_ERR 0xDEAD +#define IWL_ALIVE_STATUS_OK 0xCAFE + +#define IWL_ALIVE_FLG_RFKILL BIT(0) + +struct iwl_lmac_alive { + __le32 ucode_minor; + __le32 ucode_major; + u8 ver_subtype; + u8 ver_type; + u8 mac; + u8 opt; + __le32 timestamp; + __le32 error_event_table_ptr; /* SRAM address for error log */ + __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ + __le32 cpu_register_ptr; + __le32 dbgm_config_ptr; + __le32 alive_counter_ptr; + __le32 scd_base_ptr; /* SRAM address for SCD */ + __le32 st_fwrd_addr; /* pointer to Store and forward */ + __le32 st_fwrd_size; +} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ + +struct iwl_umac_alive { + __le32 umac_minor; /* UMAC version: minor */ + __le32 umac_major; /* UMAC version: major */ + __le32 error_info_addr; /* SRAM address for UMAC error log */ + __le32 dbg_print_buff_addr; +} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ + +struct mvm_alive_resp_v3 { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_3 */ + +struct mvm_alive_resp { + __le16 status; + __le16 flags; + struct iwl_lmac_alive lmac_data[2]; + struct iwl_umac_alive umac_data; +} __packed; /* ALIVE_RES_API_S_VER_4 */ + +/** + * enum iwl_extended_cfg_flag - commands driver may send before + * finishing init flow + * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command + * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands + * @IWL_INIT_PHY: driver is going to send PHY_DB commands + */ +enum iwl_extended_cfg_flags { + IWL_INIT_DEBUG_CFG, + IWL_INIT_NVM, + IWL_INIT_PHY, +}; + +/** + * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for + * before finishing init flows + * @init_flags: values from iwl_extended_cfg_flags + */ +struct iwl_init_extended_cfg_cmd { + __le32 init_flags; +} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ + +/** + * struct iwl_radio_version_notif - information on the radio version + * ( RADIO_VERSION_NOTIFICATION = 0x68 ) + * @radio_flavor: radio flavor + * @radio_step: radio version step + * @radio_dash: radio version dash + */ +struct iwl_radio_version_notif { + __le32 radio_flavor; + __le32 radio_step; + __le32 radio_dash; +} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ + +enum iwl_card_state_flags { + CARD_ENABLED = 0x00, + HW_CARD_DISABLED = 0x01, + SW_CARD_DISABLED = 0x02, + CT_KILL_CARD_DISABLED = 0x04, + HALT_CARD_DISABLED = 0x08, + CARD_DISABLED_MSK = 0x0f, + CARD_IS_RX_ON = 0x10, +}; + +/** + * struct iwl_radio_version_notif - information on the card state + * ( CARD_STATE_NOTIFICATION = 0xa1 ) + * @flags: &enum iwl_card_state_flags + */ +struct iwl_card_state_notif { + __le32 flags; +} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_alive_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h new file mode 100644 index 000000000000..d2717fafdf5b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h @@ -0,0 +1,144 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_binding_h__ +#define __iwl_fw_api_binding_h__ + +#define MAX_MACS_IN_BINDING (3) +#define MAX_BINDINGS (4) + +/** + * struct iwl_binding_cmd_v1 - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding, + * &enum iwl_ctxt_id_and_color + * @phy: PHY id and color which belongs to the binding, + * &enum iwl_ctxt_id_and_color + */ +struct iwl_binding_cmd_v1 { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; +} __packed; /* BINDING_CMD_API_S_VER_1 */ + +/** + * struct iwl_binding_cmd - configuring bindings + * ( BINDING_CONTEXT_CMD = 0x2b ) + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of FW_CTXT_ACTION_* + * @macs: array of MAC id and colors which belong to the binding + * &enum iwl_ctxt_id_and_color + * @phy: PHY id and color which belongs to the binding + * &enum iwl_ctxt_id_and_color + * @lmac_id: the lmac id the binding belongs to + */ +struct iwl_binding_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* BINDING_DATA_API_S_VER_1 */ + __le32 macs[MAX_MACS_IN_BINDING]; + __le32 phy; + __le32 lmac_id; +} __packed; /* BINDING_CMD_API_S_VER_2 */ + +#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) +#define IWL_LMAC_24G_INDEX 0 +#define IWL_LMAC_5G_INDEX 1 + +/* The maximal number of fragments in the FW's schedule session */ +#define IWL_MVM_MAX_QUOTA 128 + +/** + * struct iwl_time_quota_data - configuration of time quota per binding + * @id_and_color: ID and color of the relevant Binding, + * &enum iwl_ctxt_id_and_color + * @quota: absolute time quota in TU. The scheduler will try to divide the + * remainig quota (after Time Events) according to this quota. + * @max_duration: max uninterrupted context duration in TU + */ +struct iwl_time_quota_data { + __le32 id_and_color; + __le32 quota; + __le32 max_duration; +} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ + +/** + * struct iwl_time_quota_cmd - configuration of time quota between bindings + * ( TIME_QUOTA_CMD = 0x2c ) + * @quotas: allocations per binding + * Note: on non-CDB the fourth one is the auxilary mac and is + * essentially zero. + * On CDB the fourth one is a regular binding. + */ +struct iwl_time_quota_cmd { + struct iwl_time_quota_data quotas[MAX_BINDINGS]; +} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_binding_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h new file mode 100644 index 000000000000..fd97cccaedb8 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h @@ -0,0 +1,195 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_cmdhdr_h__ +#define __iwl_fw_api_cmdhdr_h__ + +/** + * DOC: Host command section + * + * A host command is a command issued by the upper layer to the fw. There are + * several versions of fw that have several APIs. The transport layer is + * completely agnostic to these differences. + * The transport does provide helper functionality (i.e. SYNC / ASYNC mode), + */ +#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) +#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) +#define SEQ_TO_INDEX(s) ((s) & 0xff) +#define INDEX_TO_SEQ(i) ((i) & 0xff) +#define SEQ_RX_FRAME cpu_to_le16(0x8000) + +/* + * those functions retrieve specific information from + * the id field in the iwl_host_cmd struct which contains + * the command id, the group id and the version of the command + * and vice versa +*/ +static inline u8 iwl_cmd_opcode(u32 cmdid) +{ + return cmdid & 0xFF; +} + +static inline u8 iwl_cmd_groupid(u32 cmdid) +{ + return ((cmdid & 0xFF00) >> 8); +} + +static inline u8 iwl_cmd_version(u32 cmdid) +{ + return ((cmdid & 0xFF0000) >> 16); +} + +static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) +{ + return opcode + (groupid << 8) + (version << 16); +} + +/* make u16 wide id out of u8 group and opcode */ +#define WIDE_ID(grp, opcode) (((grp) << 8) | (opcode)) +#define DEF_ID(opcode) ((1 << 8) | (opcode)) + +/* due to the conversion, this group is special; new groups + * should be defined in the appropriate fw-api header files + */ +#define IWL_ALWAYS_LONG_GROUP 1 + +/** + * struct iwl_cmd_header + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + */ +struct iwl_cmd_header { + u8 cmd; /* Command ID: REPLY_RXON, etc. */ + u8 group_id; + /* + * The driver sets up the sequence number to values of its choosing. + * uCode does not use this value, but passes it back to the driver + * when sending the response to each driver-originated command, so + * the driver can match the response to the command. Since the values + * don't get used by uCode, the driver may set up an arbitrary format. + * + * There is one exception: uCode sets bit 15 when it originates + * the response/notification, i.e. when the response/notification + * is not a direct response to a command sent by the driver. For + * example, uCode issues REPLY_RX when it sends a received frame + * to the driver; it is not a direct response to any driver command. + * + * The Linux driver uses the following format: + * + * 0:7 tfd index - position within TX queue + * 8:12 TX queue id + * 13:14 reserved + * 15 unsolicited RX or uCode-originated notification + */ + __le16 sequence; +} __packed; + +/** + * struct iwl_cmd_header_wide + * + * This header format appears in the beginning of each command sent from the + * driver, and each response/notification received from uCode. + * this is the wide version that contains more information about the command + * like length, version and command type + */ +struct iwl_cmd_header_wide { + u8 cmd; + u8 group_id; + __le16 sequence; + __le16 length; + u8 reserved; + u8 version; +} __packed; + +/** + * struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations + * @type: type of the result - mostly ignored + * @length: length of the data + * @data: data, length in @length + */ +struct iwl_calib_res_notif_phy_db { + __le16 type; + __le16 length; + u8 data[]; +} __packed; + +/** + * struct iwl_phy_db_cmd - configure operational ucode + * @type: type of the data + * @length: length of the data + * @data: data, length in @length + */ +struct iwl_phy_db_cmd { + __le16 type; + __le16 length; + u8 data[]; +} __packed; + +/** + * struct iwl_cmd_response - generic response struct for most commands + * @status: status of the command asked, changes for each one + */ +struct iwl_cmd_response { + __le32 status; +}; + +#endif /* __iwl_fw_api_cmdhdr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h new file mode 100644 index 000000000000..583f4189f55e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -0,0 +1,252 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_api_coex_h__ +#define __iwl_fw_api_coex_h__ + +#include +#include + +#define BITS(nb) (BIT(nb) - 1) + +enum iwl_bt_coex_lut_type { + BT_COEX_TIGHT_LUT = 0, + BT_COEX_LOOSE_LUT, + BT_COEX_TX_DIS_LUT, + + BT_COEX_MAX_LUT, + BT_COEX_INVALID_LUT = 0xff, +}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ + +#define BT_COEX_CORUN_LUT_SIZE (32) +#define BT_REDUCED_TX_POWER_BIT BIT(7) + +enum iwl_bt_coex_mode { + BT_COEX_DISABLE = 0x0, + BT_COEX_NW = 0x1, + BT_COEX_BT = 0x2, + BT_COEX_WIFI = 0x3, +}; /* BT_COEX_MODES_E */ + +enum iwl_bt_coex_enabled_modules { + BT_COEX_MPLUT_ENABLED = BIT(0), + BT_COEX_MPLUT_BOOST_ENABLED = BIT(1), + BT_COEX_SYNC2SCO_ENABLED = BIT(2), + BT_COEX_CORUN_ENABLED = BIT(3), + BT_COEX_HIGH_BAND_RET = BIT(4), +}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */ + +/** + * struct iwl_bt_coex_cmd - bt coex configuration command + * @mode: &enum iwl_bt_coex_mode + * @enabled_modules: &enum iwl_bt_coex_enabled_modules + * + * The structure is used for the BT_COEX command. + */ +struct iwl_bt_coex_cmd { + __le32 mode; + __le32 enabled_modules; +} __packed; /* BT_COEX_CMD_API_S_VER_6 */ + +/** + * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut + * @corun_lut20: co-running 20 MHz LUT configuration + * @corun_lut40: co-running 40 MHz LUT configuration + * + * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command. + */ +struct iwl_bt_coex_corun_lut_update_cmd { + __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE]; + __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE]; +} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ + +/** + * struct iwl_bt_coex_reduced_txp_update_cmd + * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the + * bits are the sta_id (value) + */ +struct iwl_bt_coex_reduced_txp_update_cmd { + __le32 reduced_txp; +} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */ + +/** + * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command + * @bt_primary_ci: primary channel inhibition bitmap + * @primary_ch_phy_id: primary channel PHY ID + * @bt_secondary_ci: secondary channel inhibition bitmap + * @secondary_ch_phy_id: secondary channel PHY ID + * + * Used for BT_COEX_CI command + */ +struct iwl_bt_coex_ci_cmd { + __le64 bt_primary_ci; + __le32 primary_ch_phy_id; + + __le64 bt_secondary_ci; + __le32 secondary_ch_phy_id; +} __packed; /* BT_CI_MSG_API_S_VER_2 */ + +#define BT_MBOX(n_dw, _msg, _pos, _nbits) \ + BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ + BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS + +enum iwl_bt_mxbox_dw0 { + BT_MBOX(0, LE_SLAVE_LAT, 0, 3), + BT_MBOX(0, LE_PROF1, 3, 1), + BT_MBOX(0, LE_PROF2, 4, 1), + BT_MBOX(0, LE_PROF_OTHER, 5, 1), + BT_MBOX(0, CHL_SEQ_N, 8, 4), + BT_MBOX(0, INBAND_S, 13, 1), + BT_MBOX(0, LE_MIN_RSSI, 16, 4), + BT_MBOX(0, LE_SCAN, 20, 1), + BT_MBOX(0, LE_ADV, 21, 1), + BT_MBOX(0, LE_MAX_TX_POWER, 24, 4), + BT_MBOX(0, OPEN_CON_1, 28, 2), +}; + +enum iwl_bt_mxbox_dw1 { + BT_MBOX(1, BR_MAX_TX_POWER, 0, 4), + BT_MBOX(1, IP_SR, 4, 1), + BT_MBOX(1, LE_MSTR, 5, 1), + BT_MBOX(1, AGGR_TRFC_LD, 8, 6), + BT_MBOX(1, MSG_TYPE, 16, 3), + BT_MBOX(1, SSN, 19, 2), +}; + +enum iwl_bt_mxbox_dw2 { + BT_MBOX(2, SNIFF_ACT, 0, 3), + BT_MBOX(2, PAG, 3, 1), + BT_MBOX(2, INQUIRY, 4, 1), + BT_MBOX(2, CONN, 5, 1), + BT_MBOX(2, SNIFF_INTERVAL, 8, 5), + BT_MBOX(2, DISC, 13, 1), + BT_MBOX(2, SCO_TX_ACT, 16, 2), + BT_MBOX(2, SCO_RX_ACT, 18, 2), + BT_MBOX(2, ESCO_RE_TX, 20, 2), + BT_MBOX(2, SCO_DURATION, 24, 6), +}; + +enum iwl_bt_mxbox_dw3 { + BT_MBOX(3, SCO_STATE, 0, 1), + BT_MBOX(3, SNIFF_STATE, 1, 1), + BT_MBOX(3, A2DP_STATE, 2, 1), + BT_MBOX(3, ACL_STATE, 3, 1), + BT_MBOX(3, MSTR_STATE, 4, 1), + BT_MBOX(3, OBX_STATE, 5, 1), + BT_MBOX(3, OPEN_CON_2, 8, 2), + BT_MBOX(3, TRAFFIC_LOAD, 10, 2), + BT_MBOX(3, CHL_SEQN_LSB, 12, 1), + BT_MBOX(3, INBAND_P, 13, 1), + BT_MBOX(3, MSG_TYPE_2, 16, 3), + BT_MBOX(3, SSN_2, 19, 2), + BT_MBOX(3, UPDATE_REQUEST, 21, 1), +}; + +#define BT_MBOX_MSG(_notif, _num, _field) \ + ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ + >> BT_MBOX##_num##_##_field##_POS) + +enum iwl_bt_activity_grading { + BT_OFF = 0, + BT_ON_NO_CONNECTION = 1, + BT_LOW_TRAFFIC = 2, + BT_HIGH_TRAFFIC = 3, + + BT_MAX_AG, +}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */ + +enum iwl_bt_ci_compliance { + BT_CI_COMPLIANCE_NONE = 0, + BT_CI_COMPLIANCE_PRIMARY = 1, + BT_CI_COMPLIANCE_SECONDARY = 2, + BT_CI_COMPLIANCE_BOTH = 3, +}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ + +#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \ + (_ttc_rrc_status & BIT(_phy_id)) + +#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \ + ((_ttc_rrc_status >> 4) & BIT(_phy_id)) + +/** + * struct iwl_bt_coex_profile_notif - notification about BT coex + * @mbox_msg: message from BT to WiFi + * @msg_idx: the index of the message + * @bt_ci_compliance: enum %iwl_bt_ci_compliance + * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type + * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type + * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading + * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY + * @reserved: reserved + */ +struct iwl_bt_coex_profile_notif { + __le32 mbox_msg[4]; + __le32 msg_idx; + __le32 bt_ci_compliance; + + __le32 primary_ch_lut; + __le32 secondary_ch_lut; + __le32 bt_activity_grading; + u8 ttc_rrc_status; + u8 reserved[3]; +} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ + +#endif /* __iwl_fw_api_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h new file mode 100644 index 000000000000..34fceb26447d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -0,0 +1,657 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_commands_h__ +#define __iwl_fw_api_commands_h__ + +/** + * enum iwl_mvm_command_groups - command groups for the firmware + * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds + * @LONG_GROUP: legacy group with long header, also uses command IDs + * from &enum iwl_legacy_cmds + * @SYSTEM_GROUP: system group, uses command IDs from + * &enum iwl_system_subcmd_ids + * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from + * &enum iwl_mac_conf_subcmd_ids + * @PHY_OPS_GROUP: PHY operations group, uses command IDs from + * &enum iwl_phy_ops_subcmd_ids + * @DATA_PATH_GROUP: data path group, uses command IDs from + * &enum iwl_data_path_subcmd_ids + * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids + * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids + * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from + * &enum iwl_prot_offload_subcmd_ids + * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from + * &enum iwl_regulatory_and_nvm_subcmd_ids + * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds + */ +enum iwl_mvm_command_groups { + LEGACY_GROUP = 0x0, + LONG_GROUP = 0x1, + SYSTEM_GROUP = 0x2, + MAC_CONF_GROUP = 0x3, + PHY_OPS_GROUP = 0x4, + DATA_PATH_GROUP = 0x5, + NAN_GROUP = 0x7, + TOF_GROUP = 0x8, + PROT_OFFLOAD_GROUP = 0xb, + REGULATORY_AND_NVM_GROUP = 0xc, + DEBUG_GROUP = 0xf, +}; + +/** + * enum iwl_legacy_cmds - legacy group command IDs + */ +enum iwl_legacy_cmds { + /** + * @MVM_ALIVE: + * Alive data from the firmware, as described in + * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. + */ + MVM_ALIVE = 0x1, + + /** + * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. + */ + REPLY_ERROR = 0x2, + + /** + * @ECHO_CMD: Send data to the device to have it returned immediately. + */ + ECHO_CMD = 0x3, + + /** + * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. + */ + INIT_COMPLETE_NOTIF = 0x4, + + /** + * @PHY_CONTEXT_CMD: + * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. + */ + PHY_CONTEXT_CMD = 0x8, + + /** + * @DBG_CFG: Debug configuration command. + */ + DBG_CFG = 0x9, + + /** + * @ANTENNA_COUPLING_NOTIFICATION: + * Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif + */ + ANTENNA_COUPLING_NOTIFICATION = 0xa, + + /** + * @SCAN_ITERATION_COMPLETE_UMAC: + * Firmware indicates a scan iteration completed, using + * &struct iwl_umac_scan_iter_complete_notif. + */ + SCAN_ITERATION_COMPLETE_UMAC = 0xb5, + + /** + * @SCAN_CFG_CMD: + * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config + */ + SCAN_CFG_CMD = 0xc, + + /** + * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac + */ + SCAN_REQ_UMAC = 0xd, + + /** + * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort + */ + SCAN_ABORT_UMAC = 0xe, + + /** + * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete + */ + SCAN_COMPLETE_UMAC = 0xf, + + /** + * @BA_WINDOW_STATUS_NOTIFICATION_ID: + * uses &struct iwl_ba_window_status_notif + */ + BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, + + /** + * @ADD_STA_KEY: + * &struct iwl_mvm_add_sta_key_cmd_v1 or + * &struct iwl_mvm_add_sta_key_cmd. + */ + ADD_STA_KEY = 0x17, + + /** + * @ADD_STA: + * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. + */ + ADD_STA = 0x18, + + /** + * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd + */ + REMOVE_STA = 0x19, + + /** + * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd + */ + FW_GET_ITEM_CMD = 0x1a, + + /** + * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, + * response in &struct iwl_mvm_tx_resp or + * &struct iwl_mvm_tx_resp_v3 + */ + TX_CMD = 0x1c, + + /** + * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd + */ + TXPATH_FLUSH = 0x1e, + + /** + * @MGMT_MCAST_KEY: + * &struct iwl_mvm_mgmt_mcast_key_cmd or + * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 + */ + MGMT_MCAST_KEY = 0x1f, + + /* scheduler config */ + /** + * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, + * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp + * for newer (A000) hardware. + */ + SCD_QUEUE_CFG = 0x1d, + + /** + * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd + */ + WEP_KEY = 0x20, + + /** + * @SHARED_MEM_CFG: + * retrieve shared memory configuration - response in + * &struct iwl_shared_mem_cfg + */ + SHARED_MEM_CFG = 0x25, + + /** + * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd + */ + TDLS_CHANNEL_SWITCH_CMD = 0x27, + + /** + * @TDLS_CHANNEL_SWITCH_NOTIFICATION: + * uses &struct iwl_tdls_channel_switch_notif + */ + TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, + + /** + * @TDLS_CONFIG_CMD: + * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res + */ + TDLS_CONFIG_CMD = 0xa7, + + /** + * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd + */ + MAC_CONTEXT_CMD = 0x28, + + /** + * @TIME_EVENT_CMD: + * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp + */ + TIME_EVENT_CMD = 0x29, /* both CMD and response */ + + /** + * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif + */ + TIME_EVENT_NOTIFICATION = 0x2a, + + /** + * @BINDING_CONTEXT_CMD: + * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 + */ + BINDING_CONTEXT_CMD = 0x2b, + + /** + * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd + */ + TIME_QUOTA_CMD = 0x2c, + + /** + * @NON_QOS_TX_COUNTER_CMD: + * command is &struct iwl_nonqos_seq_query_cmd + */ + NON_QOS_TX_COUNTER_CMD = 0x2d, + + /** + * @LQ_CMD: using &struct iwl_lq_cmd + */ + LQ_CMD = 0x4e, + + /** + * @FW_PAGING_BLOCK_CMD: + * &struct iwl_fw_paging_cmd + */ + FW_PAGING_BLOCK_CMD = 0x4f, + + /** + * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac + */ + SCAN_OFFLOAD_REQUEST_CMD = 0x51, + + /** + * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents + */ + SCAN_OFFLOAD_ABORT_CMD = 0x52, + + /** + * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req + */ + HOT_SPOT_CMD = 0x53, + + /** + * @SCAN_OFFLOAD_COMPLETE: + * notification, &struct iwl_periodic_scan_complete + */ + SCAN_OFFLOAD_COMPLETE = 0x6D, + + /** + * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: + * update scan offload (scheduled scan) profiles/blacklist/etc. + */ + SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, + + /** + * @MATCH_FOUND_NOTIFICATION: scan match found + */ + MATCH_FOUND_NOTIFICATION = 0xd9, + + /** + * @SCAN_ITERATION_COMPLETE: + * uses &struct iwl_lmac_scan_complete_notif + */ + SCAN_ITERATION_COMPLETE = 0xe7, + + /* Phy */ + /** + * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd + */ + PHY_CONFIGURATION_CMD = 0x6a, + + /** + * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db + */ + CALIB_RES_NOTIF_PHY_DB = 0x6b, + + /** + * @PHY_DB_CMD: &struct iwl_phy_db_cmd + */ + PHY_DB_CMD = 0x6c, + + /** + * @TOF_CMD: &struct iwl_tof_config_cmd + */ + TOF_CMD = 0x10, + + /** + * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd + */ + TOF_NOTIFICATION = 0x11, + + /** + * @POWER_TABLE_CMD: &struct iwl_device_power_cmd + */ + POWER_TABLE_CMD = 0x77, + + /** + * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: + * &struct iwl_uapsd_misbehaving_ap_notif + */ + PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, + + /** + * @LTR_CONFIG: &struct iwl_ltr_config_cmd + */ + LTR_CONFIG = 0xee, + + /** + * @REPLY_THERMAL_MNG_BACKOFF: + * Thermal throttling command + */ + REPLY_THERMAL_MNG_BACKOFF = 0x7e, + + /** + * @DC2DC_CONFIG_CMD: + * Set/Get DC2DC frequency tune + * Command is &struct iwl_dc2dc_config_cmd, + * response is &struct iwl_dc2dc_config_resp + */ + DC2DC_CONFIG_CMD = 0x83, + + /** + * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd + */ + NVM_ACCESS_CMD = 0x88, + + /** + * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif + */ + BEACON_NOTIFICATION = 0x90, + + /** + * @BEACON_TEMPLATE_CMD: + * Uses one of &struct iwl_mac_beacon_cmd_v6, + * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd + * depending on the device version. + */ + BEACON_TEMPLATE_CMD = 0x91, + /** + * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd + */ + TX_ANT_CONFIGURATION_CMD = 0x98, + + /** + * @STATISTICS_CMD: &struct iwl_statistics_cmd + */ + STATISTICS_CMD = 0x9c, + + /** + * @STATISTICS_NOTIFICATION: + * one of &struct iwl_notif_statistics_v10, + * &struct iwl_notif_statistics_v11, + * &struct iwl_notif_statistics_cdb + */ + STATISTICS_NOTIFICATION = 0x9d, + + /** + * @EOSP_NOTIFICATION: + * Notify that a service period ended, + * &struct iwl_mvm_eosp_notification + */ + EOSP_NOTIFICATION = 0x9e, + + /** + * @REDUCE_TX_POWER_CMD: + * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd + */ + REDUCE_TX_POWER_CMD = 0x9f, + + /** + * @CARD_STATE_NOTIFICATION: + * Card state (RF/CT kill) notification, + * uses &struct iwl_card_state_notif + */ + CARD_STATE_NOTIFICATION = 0xa1, + + /** + * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif + */ + MISSED_BEACONS_NOTIFICATION = 0xa2, + + /** + * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd + */ + MAC_PM_POWER_TABLE = 0xa9, + + /** + * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif + */ + MFUART_LOAD_NOTIFICATION = 0xb1, + + /** + * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd + */ + RSS_CONFIG_CMD = 0xb3, + + /** + * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info + */ + REPLY_RX_PHY_CMD = 0xc0, + + /** + * @REPLY_RX_MPDU_CMD: + * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc + */ + REPLY_RX_MPDU_CMD = 0xc1, + + /** + * @FRAME_RELEASE: + * Frame release (reorder helper) notification, uses + * &struct iwl_frame_release + */ + FRAME_RELEASE = 0xc3, + + /** + * @BA_NOTIF: + * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif + * or &struct iwl_mvm_ba_notif depending on the HW + */ + BA_NOTIF = 0xc5, + + /* Location Aware Regulatory */ + /** + * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd + */ + MCC_UPDATE_CMD = 0xc8, + + /** + * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif + */ + MCC_CHUB_UPDATE_CMD = 0xc9, + + /** + * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker + */ + MARKER_CMD = 0xcb, + + /** + * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif + */ + BT_PROFILE_NOTIFICATION = 0xce, + + /** + * @BT_CONFIG: &struct iwl_bt_coex_cmd + */ + BT_CONFIG = 0x9b, + + /** + * @BT_COEX_UPDATE_CORUN_LUT: + * &struct iwl_bt_coex_corun_lut_update_cmd + */ + BT_COEX_UPDATE_CORUN_LUT = 0x5b, + + /** + * @BT_COEX_UPDATE_REDUCED_TXP: + * &struct iwl_bt_coex_reduced_txp_update_cmd + */ + BT_COEX_UPDATE_REDUCED_TXP = 0x5c, + + /** + * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd + */ + BT_COEX_CI = 0x5d, + + /** + * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd + */ + REPLY_SF_CFG_CMD = 0xd1, + /** + * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd + */ + REPLY_BEACON_FILTERING_CMD = 0xd2, + + /** + * @DTS_MEASUREMENT_NOTIFICATION: + * &struct iwl_dts_measurement_notif_v1 or + * &struct iwl_dts_measurement_notif_v2 + */ + DTS_MEASUREMENT_NOTIFICATION = 0xdd, + + /** + * @LDBG_CONFIG_CMD: configure continuous trace recording + */ + LDBG_CONFIG_CMD = 0xf6, + + /** + * @DEBUG_LOG_MSG: Debugging log data from firmware + */ + DEBUG_LOG_MSG = 0xf7, + + /** + * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd + */ + BCAST_FILTER_CMD = 0xcf, + + /** + * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd + */ + MCAST_FILTER_CMD = 0xd0, + + /** + * @D3_CONFIG_CMD: &struct iwl_d3_manager_config + */ + D3_CONFIG_CMD = 0xd3, + + /** + * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of + * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, + * &struct iwl_proto_offload_cmd_v3_small, + * &struct iwl_proto_offload_cmd_v3_large + */ + PROT_OFFLOAD_CONFIG_CMD = 0xd4, + + /** + * @OFFLOADS_QUERY_CMD: + * No data in command, response in &struct iwl_wowlan_status + */ + OFFLOADS_QUERY_CMD = 0xd5, + + /** + * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config + */ + REMOTE_WAKE_CONFIG_CMD = 0xd6, + + /** + * @D0I3_END_CMD: End D0i3/D3 state, no command data + */ + D0I3_END_CMD = 0xed, + + /** + * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd + */ + WOWLAN_PATTERNS = 0xe0, + + /** + * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd + */ + WOWLAN_CONFIGURATION = 0xe1, + + /** + * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd + */ + WOWLAN_TSC_RSC_PARAM = 0xe2, + + /** + * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd + */ + WOWLAN_TKIP_PARAM = 0xe3, + + /** + * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd + */ + WOWLAN_KEK_KCK_MATERIAL = 0xe4, + + /** + * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status + */ + WOWLAN_GET_STATUSES = 0xe5, + + /** + * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: + * No command data, response is &struct iwl_scan_offload_profiles_query + */ + SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, +}; + +/** + * enum iwl_system_subcmd_ids - system group command IDs + */ +enum iwl_system_subcmd_ids { + /** + * @SHARED_MEM_CFG_CMD: + * response in &struct iwl_shared_mem_cfg or + * &struct iwl_shared_mem_cfg_v2 + */ + SHARED_MEM_CFG_CMD = 0x0, + + /** + * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd + */ + INIT_EXTENDED_CFG_CMD = 0x03, +}; + +#endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h new file mode 100644 index 000000000000..ee1bd45b7021 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -0,0 +1,192 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_config_h__ +#define __iwl_fw_api_config_h__ + +/* + * struct iwl_dqa_enable_cmd + * @cmd_queue: the TXQ number of the command queue + */ +struct iwl_dqa_enable_cmd { + __le32 cmd_queue; +} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ + +/* + * struct iwl_tx_ant_cfg_cmd + * @valid: valid antenna configuration + */ +struct iwl_tx_ant_cfg_cmd { + __le32 valid; +} __packed; + +/** + * struct iwl_calib_ctrl - Calibration control struct. + * Sent as part of the phy configuration command. + * @flow_trigger: bitmap for which calibrations to perform according to + * flow triggers, using &enum iwl_calib_cfg + * @event_trigger: bitmap for which calibrations to perform according to + * event triggers, using &enum iwl_calib_cfg + */ +struct iwl_calib_ctrl { + __le32 flow_trigger; + __le32 event_trigger; +} __packed; + +/* This enum defines the bitmap of various calibrations to enable in both + * init ucode and runtime ucode through CALIBRATION_CFG_CMD. + */ +enum iwl_calib_cfg { + IWL_CALIB_CFG_XTAL_IDX = BIT(0), + IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), + IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), + IWL_CALIB_CFG_PAPD_IDX = BIT(3), + IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), + IWL_CALIB_CFG_DC_IDX = BIT(5), + IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), + IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), + IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), + IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), + IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), + IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), + IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), + IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), + IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), + IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), + IWL_CALIB_CFG_DAC_IDX = BIT(16), + IWL_CALIB_CFG_ABS_IDX = BIT(17), + IWL_CALIB_CFG_AGC_IDX = BIT(18), +}; + +/** + * struct iwl_phy_cfg_cmd - Phy configuration command + * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg + * @calib_control: calibration control data + */ +struct iwl_phy_cfg_cmd { + __le32 phy_cfg; + struct iwl_calib_ctrl calib_control; +} __packed; + +#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) +#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) +#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) +#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) +#define PHY_CFG_TX_CHAIN_A BIT(8) +#define PHY_CFG_TX_CHAIN_B BIT(9) +#define PHY_CFG_TX_CHAIN_C BIT(10) +#define PHY_CFG_RX_CHAIN_A BIT(12) +#define PHY_CFG_RX_CHAIN_B BIT(13) +#define PHY_CFG_RX_CHAIN_C BIT(14) + +/* + * enum iwl_dc2dc_config_id - flag ids + * + * Ids of dc2dc configuration flags + */ +enum iwl_dc2dc_config_id { + DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ + DCDC_FREQ_TUNE_SET = 0x2, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_dc2dc_config_cmd - configure dc2dc values + * + * (DC2DC_CONFIG_CMD = 0x83) + * + * Set/Get & configure dc2dc values. + * The command always returns the current dc2dc values. + * + * @flags: set/get dc2dc + * @enable_low_power_mode: not used. + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_cmd { + __le32 flags; + __le32 enable_low_power_mode; /* not used */ + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd + * + * Current dc2dc values returned by the FW. + * + * @dc2dc_freq_tune0: frequency divider - digital domain + * @dc2dc_freq_tune1: frequency divider - analog domain + */ +struct iwl_dc2dc_config_resp { + __le32 dc2dc_freq_tune0; + __le32 dc2dc_freq_tune1; +} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ + +/** + * struct iwl_mvm_antenna_coupling_notif - antenna coupling notification + * @isolation: antenna isolation value + */ +struct iwl_mvm_antenna_coupling_notif { + __le32 isolation; +} __packed; + +#endif /* __iwl_fw_api_config_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h new file mode 100644 index 000000000000..2f0d7c498b3e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h @@ -0,0 +1,94 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_context_h__ +#define __iwl_fw_api_context_h__ + +/** + * enum iwl_ctxt_id_and_color - ID and color fields in context dword + * @FW_CTXT_ID_POS: position of the ID + * @FW_CTXT_ID_MSK: mask of the ID + * @FW_CTXT_COLOR_POS: position of the color + * @FW_CTXT_COLOR_MSK: mask of the color + * @FW_CTXT_INVALID: value used to indicate unused/invalid + */ +enum iwl_ctxt_id_and_color { + FW_CTXT_ID_POS = 0, + FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, + FW_CTXT_COLOR_POS = 8, + FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, + FW_CTXT_INVALID = 0xffffffff, +}; + +#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\ + ((_color) << FW_CTXT_COLOR_POS)) + +/* Possible actions on PHYs, MACs and Bindings */ +enum iwl_ctxt_action { + FW_CTXT_ACTION_STUB = 0, + FW_CTXT_ACTION_ADD, + FW_CTXT_ACTION_MODIFY, + FW_CTXT_ACTION_REMOVE, + FW_CTXT_ACTION_NUM +}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ + +#endif /* __iwl_fw_api_context_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h new file mode 100644 index 000000000000..57f4bc242023 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -0,0 +1,466 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_api_d3_h__ +#define __iwl_fw_api_d3_h__ + +/** + * enum iwl_d3_wakeup_flags - D3 manager wakeup flags + * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert + */ +enum iwl_d3_wakeup_flags { + IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0), +}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */ + +/** + * struct iwl_d3_manager_config - D3 manager configuration command + * @min_sleep_time: minimum sleep time (in usec) + * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags + * @wakeup_host_timer: force wakeup after this many seconds + * + * The structure is used for the D3_CONFIG_CMD command. + */ +struct iwl_d3_manager_config { + __le32 min_sleep_time; + __le32 wakeup_flags; + __le32 wakeup_host_timer; +} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ + + +/* TODO: OFFLOADS_QUERY_API_S_VER_1 */ + +/** + * enum iwl_d3_proto_offloads - enabled protocol offloads + * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled + * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled + * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid + * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid + */ +enum iwl_proto_offloads { + IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), + IWL_D3_PROTO_OFFLOAD_NS = BIT(1), + IWL_D3_PROTO_IPV4_VALID = BIT(2), + IWL_D3_PROTO_IPV6_VALID = BIT(3), +}; + +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4 +#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12 + +#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4 +#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2 + +/** + * struct iwl_proto_offload_cmd_common - ARP/NS offload common part + * @enabled: enable flags + * @remote_ipv4_addr: remote address to answer to (or zero if all) + * @host_ipv4_addr: our IPv4 address to respond to queries for + * @arp_mac_addr: our MAC address for ARP responses + * @reserved: unused + */ +struct iwl_proto_offload_cmd_common { + __le32 enabled; + __be32 remote_ipv4_addr; + __be32 host_ipv4_addr; + u8 arp_mac_addr[ETH_ALEN]; + __le16 reserved; +} __packed; + +/** + * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @remote_ipv6_addr: remote address to answer to (or zero if all) + * @solicited_node_ipv6_addr: broken -- solicited node address exists + * for each target address + * @target_ipv6_addr: our target addresses + * @ndp_mac_addr: neighbor solicitation response MAC address + * @reserved2: reserved + */ +struct iwl_proto_offload_cmd_v1 { + struct iwl_proto_offload_cmd_common common; + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16]; + u8 ndp_mac_addr[ETH_ALEN]; + __le16 reserved2; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ + +/** + * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @remote_ipv6_addr: remote address to answer to (or zero if all) + * @solicited_node_ipv6_addr: broken -- solicited node address exists + * for each target address + * @target_ipv6_addr: our target addresses + * @ndp_mac_addr: neighbor solicitation response MAC address + * @num_valid_ipv6_addrs: number of valid IPv6 addresses + * @reserved2: reserved + */ +struct iwl_proto_offload_cmd_v2 { + struct iwl_proto_offload_cmd_common common; + u8 remote_ipv6_addr[16]; + u8 solicited_node_ipv6_addr[16]; + u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16]; + u8 ndp_mac_addr[ETH_ALEN]; + u8 num_valid_ipv6_addrs; + u8 reserved2[3]; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ + +struct iwl_ns_config { + struct in6_addr source_ipv6_addr; + struct in6_addr dest_ipv6_addr; + u8 target_mac_addr[ETH_ALEN]; + __le16 reserved; +} __packed; /* NS_OFFLOAD_CONFIG */ + +struct iwl_targ_addr { + struct in6_addr addr; + __le32 config_num; +} __packed; /* TARGET_IPV6_ADDRESS */ + +/** + * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @num_valid_ipv6_addrs: number of valid IPv6 addresses + * @targ_addrs: target IPv6 addresses + * @ns_config: NS offload configurations + */ +struct iwl_proto_offload_cmd_v3_small { + struct iwl_proto_offload_cmd_common common; + __le32 num_valid_ipv6_addrs; + struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S]; + struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S]; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ + +/** + * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration + * @common: common/IPv4 configuration + * @num_valid_ipv6_addrs: number of valid IPv6 addresses + * @targ_addrs: target IPv6 addresses + * @ns_config: NS offload configurations + */ +struct iwl_proto_offload_cmd_v3_large { + struct iwl_proto_offload_cmd_common common; + __le32 num_valid_ipv6_addrs; + struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L]; + struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L]; +} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ + +/* + * WOWLAN_PATTERNS + */ +#define IWL_WOWLAN_MIN_PATTERN_LEN 16 +#define IWL_WOWLAN_MAX_PATTERN_LEN 128 + +struct iwl_wowlan_pattern { + u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN]; + u8 mask_size; + u8 pattern_size; + __le16 reserved; +} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */ + +#define IWL_WOWLAN_MAX_PATTERNS 20 + +struct iwl_wowlan_patterns_cmd { + __le32 n_patterns; + struct iwl_wowlan_pattern patterns[]; +} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ + +enum iwl_wowlan_wakeup_filters { + IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), + IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), + IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), + IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), + IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), + IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), + IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), + IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7), + IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), + IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), + IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), + IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11), + IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), + IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13), + IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14), + IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15), + IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), +}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ + +enum iwl_wowlan_flags { + IS_11W_ASSOC = BIT(0), + ENABLE_L3_FILTERING = BIT(1), + ENABLE_NBNS_FILTERING = BIT(2), + ENABLE_DHCP_FILTERING = BIT(3), + ENABLE_STORE_BEACON = BIT(4), +}; + +/** + * struct iwl_wowlan_config_cmd - WoWLAN configuration + * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters + * @non_qos_seq: non-QoS sequence counter to use next + * @qos_seq: QoS sequence counters to use next + * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down + * @is_11n_connection: indicates HT connection + * @offloading_tid: TID reserved for firmware use + * @flags: extra flags, see &enum iwl_wowlan_flags + * @reserved: reserved + */ +struct iwl_wowlan_config_cmd { + __le32 wakeup_filter; + __le16 non_qos_seq; + __le16 qos_seq[8]; + u8 wowlan_ba_teardown_tids; + u8 is_11n_connection; + u8 offloading_tid; + u8 flags; + u8 reserved[2]; +} __packed; /* WOWLAN_CONFIG_API_S_VER_4 */ + +/* + * WOWLAN_TSC_RSC_PARAMS + */ +#define IWL_NUM_RSC 16 + +struct tkip_sc { + __le16 iv16; + __le16 pad; + __le32 iv32; +} __packed; /* TKIP_SC_API_U_VER_1 */ + +struct iwl_tkip_rsc_tsc { + struct tkip_sc unicast_rsc[IWL_NUM_RSC]; + struct tkip_sc multicast_rsc[IWL_NUM_RSC]; + struct tkip_sc tsc; +} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */ + +struct aes_sc { + __le64 pn; +} __packed; /* TKIP_AES_SC_API_U_VER_1 */ + +struct iwl_aes_rsc_tsc { + struct aes_sc unicast_rsc[IWL_NUM_RSC]; + struct aes_sc multicast_rsc[IWL_NUM_RSC]; + struct aes_sc tsc; +} __packed; /* AES_TSC_RSC_API_S_VER_1 */ + +union iwl_all_tsc_rsc { + struct iwl_tkip_rsc_tsc tkip; + struct iwl_aes_rsc_tsc aes; +}; /* ALL_TSC_RSC_API_S_VER_2 */ + +struct iwl_wowlan_rsc_tsc_params_cmd { + union iwl_all_tsc_rsc all_tsc_rsc; +} __packed; /* ALL_TSC_RSC_API_S_VER_2 */ + +#define IWL_MIC_KEY_SIZE 8 +struct iwl_mic_keys { + u8 tx[IWL_MIC_KEY_SIZE]; + u8 rx_unicast[IWL_MIC_KEY_SIZE]; + u8 rx_mcast[IWL_MIC_KEY_SIZE]; +} __packed; /* MIC_KEYS_API_S_VER_1 */ + +#define IWL_P1K_SIZE 5 +struct iwl_p1k_cache { + __le16 p1k[IWL_P1K_SIZE]; +} __packed; + +#define IWL_NUM_RX_P1K_CACHE 2 + +struct iwl_wowlan_tkip_params_cmd { + struct iwl_mic_keys mic_keys; + struct iwl_p1k_cache tx; + struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; + struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; +} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ + +#define IWL_KCK_MAX_SIZE 32 +#define IWL_KEK_MAX_SIZE 32 + +struct iwl_wowlan_kek_kck_material_cmd { + u8 kck[IWL_KCK_MAX_SIZE]; + u8 kek[IWL_KEK_MAX_SIZE]; + __le16 kck_len; + __le16 kek_len; + __le64 replay_ctr; +} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ + +#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 + +enum iwl_wowlan_rekey_status { + IWL_WOWLAN_REKEY_POST_REKEY = 0, + IWL_WOWLAN_REKEY_WHILE_REKEY = 1, +}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */ + +enum iwl_wowlan_wakeup_reason { + IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, + IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0), + IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1), + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2), + IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3), + IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4), + IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5), + IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6), + IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7), + IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), + IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), + IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13), + IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14), + IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15), + IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16), + +}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ + +struct iwl_wowlan_gtk_status { + u8 key_index; + u8 reserved[3]; + u8 decrypt_key[16]; + u8 tkip_mic_key[8]; + struct iwl_wowlan_rsc_tsc_params_cmd rsc; +} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ + +/** + * struct iwl_wowlan_status - WoWLAN status + * @gtk: GTK data + * @replay_ctr: GTK rekey replay counter + * @pattern_number: number of the matched pattern + * @non_qos_seq_ctr: non-QoS sequence counter to use next + * @qos_seq_ctr: QoS sequence counters to use next + * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason + * @num_of_gtk_rekeys: number of GTK rekeys + * @transmitted_ndps: number of transmitted neighbor discovery packets + * @received_beacons: number of received beacons + * @wake_packet_length: wakeup packet length + * @wake_packet_bufsize: wakeup packet buffer size + * @wake_packet: wakeup packet + */ +struct iwl_wowlan_status { + struct iwl_wowlan_gtk_status gtk; + __le64 replay_ctr; + __le16 pattern_number; + __le16 non_qos_seq_ctr; + __le16 qos_seq_ctr[8]; + __le32 wakeup_reasons; + __le32 num_of_gtk_rekeys; + __le32 transmitted_ndps; + __le32 received_beacons; + __le32 wake_packet_length; + __le32 wake_packet_bufsize; + u8 wake_packet[]; /* can be truncated from _length to _bufsize */ +} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ + +#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64 +#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128 +#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048 + +struct iwl_tcp_packet_info { + __le16 tcp_pseudo_header_checksum; + __le16 tcp_payload_length; +} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */ + +struct iwl_tcp_packet { + struct iwl_tcp_packet_info info; + u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN]; +} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ + +struct iwl_remote_wake_packet { + struct iwl_tcp_packet_info info; + u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; + u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN]; +} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ + +struct iwl_wowlan_remote_wake_config { + __le32 connection_max_time; /* unused */ + /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */ + u8 max_syn_retries; + u8 max_data_retries; + u8 tcp_syn_ack_timeout; + u8 tcp_ack_timeout; + + struct iwl_tcp_packet syn_tx; + struct iwl_tcp_packet synack_rx; + struct iwl_tcp_packet keepalive_ack_rx; + struct iwl_tcp_packet fin_tx; + + struct iwl_remote_wake_packet keepalive_tx; + struct iwl_remote_wake_packet wake_rx; + + /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */ + u8 sequence_number_offset; + u8 sequence_number_length; + u8 token_offset; + u8 token_length; + /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */ + __le32 initial_sequence_number; + __le16 keepalive_interval; + __le16 num_tokens; + u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS]; +} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */ + +/* TODO: NetDetect API */ + +#endif /* __iwl_fw_api_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h new file mode 100644 index 000000000000..aa76dcc148bd --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -0,0 +1,127 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_datapath_h__ +#define __iwl_fw_api_datapath_h__ + +/** + * enum iwl_data_path_subcmd_ids - data path group commands + */ +enum iwl_data_path_subcmd_ids { + /** + * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd + */ + DQA_ENABLE_CMD = 0x0, + + /** + * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd + */ + UPDATE_MU_GROUPS_CMD = 0x1, + + /** + * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd + */ + TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, + + /** + * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification + */ + STA_PM_NOTIF = 0xFD, + + /** + * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif + */ + MU_GROUP_MGMT_NOTIF = 0xFE, + + /** + * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification + */ + RX_QUEUES_NOTIFICATION = 0xFF, +}; + +/** + * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration + * + * @reserved: reserved + * @membership_status: a bitmap of MU groups + * @user_position:the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_cmd { + __le32 reserved; + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ + +/** + * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification + * + * @membership_status: a bitmap of MU groups + * @user_position: the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_notif { + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_datapath_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h new file mode 100644 index 000000000000..9f88b61536bc --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -0,0 +1,345 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_debug_h__ +#define __iwl_fw_api_debug_h__ + +/** + * enum iwl_debug_cmds - debug commands + */ +enum iwl_debug_cmds { + /** + * @LMAC_RD_WR: + * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and + * &struct iwl_dbg_mem_access_rsp + */ + LMAC_RD_WR = 0x0, + /** + * @UMAC_RD_WR: + * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and + * &struct iwl_dbg_mem_access_rsp + */ + UMAC_RD_WR = 0x1, + /** + * @MFU_ASSERT_DUMP_NTF: + * &struct iwl_mfu_assert_dump_notif + */ + MFU_ASSERT_DUMP_NTF = 0xFE, +}; + +/* Error response/notification */ +enum { + FW_ERR_UNKNOWN_CMD = 0x0, + FW_ERR_INVALID_CMD_PARAM = 0x1, + FW_ERR_SERVICE = 0x2, + FW_ERR_ARC_MEMORY = 0x3, + FW_ERR_ARC_CODE = 0x4, + FW_ERR_WATCH_DOG = 0x5, + FW_ERR_WEP_GRP_KEY_INDX = 0x10, + FW_ERR_WEP_KEY_SIZE = 0x11, + FW_ERR_OBSOLETE_FUNC = 0x12, + FW_ERR_UNEXPECTED = 0xFE, + FW_ERR_FATAL = 0xFF +}; + +/** + * struct iwl_error_resp - FW error indication + * ( REPLY_ERROR = 0x2 ) + * @error_type: one of FW_ERR_* + * @cmd_id: the command ID for which the error occurred + * @reserved1: reserved + * @bad_cmd_seq_num: sequence number of the erroneous command + * @error_service: which service created the error, applicable only if + * error_type = 2, otherwise 0 + * @timestamp: TSF in usecs. + */ +struct iwl_error_resp { + __le32 error_type; + u8 cmd_id; + u8 reserved1; + __le16 bad_cmd_seq_num; + __le32 error_service; + __le64 timestamp; +} __packed; + +#define TX_FIFO_MAX_NUM_9000 8 +#define TX_FIFO_MAX_NUM 15 +#define RX_FIFO_MAX_NUM 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 + +/** + * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information + * + * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not + * accessible) + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to + * 0x0 as accessible only via DBGM RDAT) + * @sample_buff_size: internal sample buff size + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre + * 8000 HW set to 0x0 as not accessible) + * @txfifo_size: size of TXF0 ... TXF7 + * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @rxfifo_addr: Start address of rxFifo + * @internal_txfifo_addr: start address of internalFifo + * @internal_txfifo_size: internal fifos' size + * + * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG + * set, the last 3 members don't exist. + */ +struct iwl_shared_mem_cfg_v2 { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM_9000]; + __le32 rxfifo_size[RX_FIFO_MAX_NUM]; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 rxfifo_addr; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */ + +/** + * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration + * + * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB) + * @txfifo_size: size of TX FIFOs + * @rxfifo1_addr: RXF1 addr + * @rxfifo1_size: RXF1 size + */ +struct iwl_shared_mem_lmac_cfg { + __le32 txfifo_addr; + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_addr; + __le32 rxfifo1_size; + +} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */ + +/** + * struct iwl_shared_mem_cfg - Shared memory configuration information + * + * @shared_mem_addr: shared memory address + * @shared_mem_size: shared memory size + * @sample_buff_addr: internal sample (mon/adc) buff addr + * @sample_buff_size: internal sample buff size + * @rxfifo2_addr: start addr of RXF2 + * @rxfifo2_size: size of RXF2 + * @page_buff_addr: used by UMAC and performance debug (page miss analysis), + * when paging is not supported this should be 0 + * @page_buff_size: size of %page_buff_addr + * @lmac_num: number of LMACs (1 or 2) + * @lmac_smem: per - LMAC smem data + */ +struct iwl_shared_mem_cfg { + __le32 shared_mem_addr; + __le32 shared_mem_size; + __le32 sample_buff_addr; + __le32 sample_buff_size; + __le32 rxfifo2_addr; + __le32 rxfifo2_size; + __le32 page_buff_addr; + __le32 page_buff_size; + __le32 lmac_num; + struct iwl_shared_mem_lmac_cfg lmac_smem[2]; +} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */ + +/** + * struct iwl_mfuart_load_notif - mfuart image version & status + * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) + * @installed_ver: installed image version + * @external_ver: external image version + * @status: MFUART loading status + * @duration: MFUART loading time + * @image_size: MFUART image size in bytes +*/ +struct iwl_mfuart_load_notif { + __le32 installed_ver; + __le32 external_ver; + __le32 status; + __le32 duration; + /* image size valid only in v2 of the command */ + __le32 image_size; +} __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */ + +/** + * struct iwl_mfu_assert_dump_notif - mfuart dump logs + * ( MFU_ASSERT_DUMP_NTF = 0xfe ) + * @assert_id: mfuart assert id that cause the notif + * @curr_reset_num: number of asserts since uptime + * @index_num: current chunk id + * @parts_num: total number of chunks + * @data_size: number of data bytes sent + * @data: data buffer + */ +struct iwl_mfu_assert_dump_notif { + __le32 assert_id; + __le32 curr_reset_num; + __le16 index_num; + __le16 parts_num; + __le32 data_size; + __le32 data[0]; +} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */ + +/** + * enum iwl_mvm_marker_id - marker ids + * + * The ids for different type of markers to insert into the usniffer logs + * + * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker + */ +enum iwl_mvm_marker_id { + MARKER_ID_TX_FRAME_LATENCY = 1, +}; /* MARKER_ID_API_E_VER_1 */ + +/** + * struct iwl_mvm_marker - mark info into the usniffer logs + * + * (MARKER_CMD = 0xcb) + * + * Mark the UTC time stamp into the usniffer logs together with additional + * metadata, so the usniffer output can be parsed. + * In the command response the ucode will return the GP2 time. + * + * @dw_len: The amount of dwords following this byte including this byte. + * @marker_id: A unique marker id (iwl_mvm_marker_id). + * @reserved: reserved. + * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC + * @metadata: additional meta data that will be written to the unsiffer log + */ +struct iwl_mvm_marker { + u8 dw_len; + u8 marker_id; + __le16 reserved; + __le64 timestamp; + __le32 metadata[0]; +} __packed; /* MARKER_API_S_VER_1 */ + +/* Operation types for the debug mem access */ +enum { + DEBUG_MEM_OP_READ = 0, + DEBUG_MEM_OP_WRITE = 1, + DEBUG_MEM_OP_WRITE_BYTES = 2, +}; + +#define DEBUG_MEM_MAX_SIZE_DWORDS 32 + +/** + * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory + * @op: DEBUG_MEM_OP_* + * @addr: address to read/write from/to + * @len: in dwords, to read/write + * @data: for write opeations, contains the source buffer + */ +struct iwl_dbg_mem_access_cmd { + __le32 op; + __le32 addr; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ + +/* Status responses for the debug mem access */ +enum { + DEBUG_MEM_STATUS_SUCCESS = 0x0, + DEBUG_MEM_STATUS_FAILED = 0x1, + DEBUG_MEM_STATUS_LOCKED = 0x2, + DEBUG_MEM_STATUS_HIDDEN = 0x3, + DEBUG_MEM_STATUS_LENGTH = 0x4, +}; + +/** + * struct iwl_dbg_mem_access_rsp - Response to debug mem commands + * @status: DEBUG_MEM_STATUS_* + * @len: read dwords (0 for write operations) + * @data: contains the read DWs + */ +struct iwl_dbg_mem_access_rsp { + __le32 status; + __le32 len; + __le32 data[]; +} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ + +#define CONT_REC_COMMAND_SIZE 80 +#define ENABLE_CONT_RECORDING 0x15 +#define DISABLE_CONT_RECORDING 0x16 + +/* + * struct iwl_continuous_record_mode - recording mode + */ +struct iwl_continuous_record_mode { + __le16 enable_recording; +} __packed; + +/* + * struct iwl_continuous_record_cmd - enable/disable continuous recording + */ +struct iwl_continuous_record_cmd { + struct iwl_continuous_record_mode record_mode; + u8 pad[CONT_REC_COMMAND_SIZE - + sizeof(struct iwl_continuous_record_mode)]; +} __packed; + +#endif /* __iwl_fw_api_debug_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h new file mode 100644 index 000000000000..befc3b126041 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h @@ -0,0 +1,183 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_filter_h__ +#define __iwl_fw_api_filter_h__ + +#include "fw/api/mac.h" + +#define MAX_PORT_ID_NUM 2 +#define MAX_MCAST_FILTERING_ADDRESSES 256 + +/** + * struct iwl_mcast_filter_cmd - configure multicast filter. + * @filter_own: Set 1 to filter out multicast packets sent by station itself + * @port_id: Multicast MAC addresses array specifier. This is a strange way + * to identify network interface adopted in host-device IF. + * It is used by FW as index in array of addresses. This array has + * MAX_PORT_ID_NUM members. + * @count: Number of MAC addresses in the array + * @pass_all: Set 1 to pass all multicast packets. + * @bssid: current association BSSID. + * @reserved: reserved + * @addr_list: Place holder for array of MAC addresses. + * IMPORTANT: add padding if necessary to ensure DWORD alignment. + */ +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ + +#define MAX_BCAST_FILTERS 8 +#define MAX_BCAST_FILTER_ATTRS 2 + +/** + * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet + * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. + * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. + * start of ip payload). + */ +enum iwl_mvm_bcast_filter_attr_offset { + BCAST_FILTER_OFFSET_PAYLOAD_START = 0, + BCAST_FILTER_OFFSET_IP_END = 1, +}; + +/** + * struct iwl_fw_bcast_filter_attr - broadcast filter attribute + * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. + * @offset: starting offset of this pattern. + * @reserved1: reserved + * @val: value to match - big endian (MSB is the first + * byte to match from offset pos). + * @mask: mask to match (big endian). + */ +struct iwl_fw_bcast_filter_attr { + u8 offset_type; + u8 offset; + __le16 reserved1; + __be32 val; + __be32 mask; +} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ + +/** + * enum iwl_mvm_bcast_filter_frame_type - filter frame type + * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. + * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames + */ +enum iwl_mvm_bcast_filter_frame_type { + BCAST_FILTER_FRAME_TYPE_ALL = 0, + BCAST_FILTER_FRAME_TYPE_IPV4 = 1, +}; + +/** + * struct iwl_fw_bcast_filter - broadcast filter + * @discard: discard frame (1) or let it pass (0). + * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. + * @reserved1: reserved + * @num_attrs: number of valid attributes in this filter. + * @attrs: attributes of this filter. a filter is considered matched + * only when all its attributes are matched (i.e. AND relationship) + */ +struct iwl_fw_bcast_filter { + u8 discard; + u8 frame_type; + u8 num_attrs; + u8 reserved1; + struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; +} __packed; /* BCAST_FILTER_S_VER_1 */ + +/** + * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. + * @default_discard: default action for this mac (discard (1) / pass (0)). + * @reserved1: reserved + * @attached_filters: bitmap of relevant filters for this mac. + */ +struct iwl_fw_bcast_mac { + u8 default_discard; + u8 reserved1; + __le16 attached_filters; +} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ + +/** + * struct iwl_bcast_filter_cmd - broadcast filtering configuration + * @disable: enable (0) / disable (1) + * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) + * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) + * @reserved1: reserved + * @filters: broadcast filters + * @macs: broadcast filtering configuration per-mac + */ +struct iwl_bcast_filter_cmd { + u8 disable; + u8 max_bcast_filters; + u8 max_macs; + u8 reserved1; + struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; + struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; +} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_filter_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h new file mode 100644 index 000000000000..39c89e85fd2f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -0,0 +1,152 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_mac_cfg_h__ +#define __iwl_fw_api_mac_cfg_h__ + +/** + * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs + */ +enum iwl_mac_conf_subcmd_ids { + /** + * @LINK_QUALITY_MEASUREMENT_CMD: &struct iwl_link_qual_msrmnt_cmd + */ + LINK_QUALITY_MEASUREMENT_CMD = 0x1, + + /** + * @LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF: + * &struct iwl_link_qual_msrmnt_notif + */ + LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, + + /** + * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif + */ + CHANNEL_SWITCH_NOA_NOTIF = 0xFF, +}; + +#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 + +enum iwl_lqm_cmd_operatrions { + LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, + LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, +}; + +enum iwl_lqm_status { + LQM_STATUS_SUCCESS = 0, + LQM_STATUS_TIMEOUT = 1, + LQM_STATUS_ABORT = 2, +}; + +/** + * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command + * @cmd_operation: command operation to be performed (start or stop) + * as defined above. + * @mac_id: MAC ID the measurement applies to. + * @measurement_time: time of the total measurement to be performed, in uSec. + * @timeout: maximum time allowed until a response is sent, in uSec. + */ +struct iwl_link_qual_msrmnt_cmd { + __le32 cmd_operation; + __le32 mac_id; + __le32 measurement_time; + __le32 timeout; +} __packed /* LQM_CMD_API_S_VER_1 */; + +/** + * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification + * + * @frequent_stations_air_time: an array containing the total air time + * (in uSec) used by the most frequently transmitting stations. + * @number_of_stations: the number of uniqe stations included in the array + * (a number between 0 to 16) + * @total_air_time_other_stations: the total air time (uSec) used by all the + * stations which are not included in the above report. + * @time_in_measurement_window: the total time in uSec in which a measurement + * took place. + * @tx_frame_dropped: the number of TX frames dropped due to retry limit during + * measurement + * @mac_id: MAC ID the measurement applies to. + * @status: return status. may be one of the LQM_STATUS_* defined above. + * @reserved: reserved. + */ +struct iwl_link_qual_msrmnt_notif { + __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; + __le32 number_of_stations; + __le32 total_air_time_other_stations; + __le32 time_in_measurement_window; + __le32 tx_frame_dropped; + __le32 mac_id; + __le32 status; + u8 reserved[12]; +} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ + +/** + * struct iwl_channel_switch_noa_notif - Channel switch NOA notification + * + * @id_and_color: ID and color of the MAC + */ +struct iwl_channel_switch_noa_notif { + __le32 id_and_color; +} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_mac_cfg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h new file mode 100644 index 000000000000..f2e31e040a7b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -0,0 +1,409 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_api_mac_h__ +#define __iwl_fw_api_mac_h__ + +/* + * The first MAC indices (starting from 0) are available to the driver, + * AUX indices follows - 1 for non-CDB, 2 for CDB. + */ +#define MAC_INDEX_AUX 4 +#define MAC_INDEX_MIN_DRIVER 0 +#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX +#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) +#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) + +#define IWL_MVM_STATION_COUNT 16 +#define IWL_MVM_INVALID_STA 0xFF + +enum iwl_ac { + AC_BK, + AC_BE, + AC_VI, + AC_VO, + AC_NUM, +}; + +/** + * enum iwl_mac_protection_flags - MAC context flags + * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, + * this will require CCK RTS/CTS2self. + * RTS/CTS will protect full burst time. + * @MAC_PROT_FLG_HT_PROT: enable HT protection + * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions + * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self + */ +enum iwl_mac_protection_flags { + MAC_PROT_FLG_TGG_PROTECT = BIT(3), + MAC_PROT_FLG_HT_PROT = BIT(23), + MAC_PROT_FLG_FAT_PROT = BIT(24), + MAC_PROT_FLG_SELF_CTS_EN = BIT(30), +}; + +#define MAC_FLG_SHORT_SLOT BIT(4) +#define MAC_FLG_SHORT_PREAMBLE BIT(5) + +/** + * enum iwl_mac_types - Supported MAC types + * @FW_MAC_TYPE_FIRST: lowest supported MAC type + * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal) + * @FW_MAC_TYPE_LISTENER: monitor MAC type (?) + * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS + * @FW_MAC_TYPE_IBSS: IBSS + * @FW_MAC_TYPE_BSS_STA: BSS (managed) station + * @FW_MAC_TYPE_P2P_DEVICE: P2P Device + * @FW_MAC_TYPE_P2P_STA: P2P client + * @FW_MAC_TYPE_GO: P2P GO + * @FW_MAC_TYPE_TEST: ? + * @FW_MAC_TYPE_MAX: highest support MAC type + */ +enum iwl_mac_types { + FW_MAC_TYPE_FIRST = 1, + FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST, + FW_MAC_TYPE_LISTENER, + FW_MAC_TYPE_PIBSS, + FW_MAC_TYPE_IBSS, + FW_MAC_TYPE_BSS_STA, + FW_MAC_TYPE_P2P_DEVICE, + FW_MAC_TYPE_P2P_STA, + FW_MAC_TYPE_GO, + FW_MAC_TYPE_TEST, + FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST +}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ + +/** + * enum iwl_tsf_id - TSF hw timer ID + * @TSF_ID_A: use TSF A + * @TSF_ID_B: use TSF B + * @TSF_ID_C: use TSF C + * @TSF_ID_D: use TSF D + * @NUM_TSF_IDS: number of TSF timers available + */ +enum iwl_tsf_id { + TSF_ID_A = 0, + TSF_ID_B = 1, + TSF_ID_C = 2, + TSF_ID_D = 3, + NUM_TSF_IDS = 4, +}; /* TSF_ID_API_E_VER_1 */ + +/** + * struct iwl_mac_data_ap - configuration data for AP MAC context + * @beacon_time: beacon transmit time in system time + * @beacon_tsf: beacon transmit time in TSF + * @bi: beacon interval in TU + * @bi_reciprocal: 2^32 / bi + * @dtim_interval: dtim transmit time in TU + * @dtim_reciprocal: 2^32 / dtim_interval + * @mcast_qid: queue ID for multicast traffic. + * NOTE: obsolete from VER2 and on + * @beacon_template: beacon template ID + */ +struct iwl_mac_data_ap { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 dtim_interval; + __le32 dtim_reciprocal; + __le32 mcast_qid; + __le32 beacon_template; +} __packed; /* AP_MAC_DATA_API_S_VER_2 */ + +/** + * struct iwl_mac_data_ibss - configuration data for IBSS MAC context + * @beacon_time: beacon transmit time in system time + * @beacon_tsf: beacon transmit time in TSF + * @bi: beacon interval in TU + * @bi_reciprocal: 2^32 / bi + * @beacon_template: beacon template ID + */ +struct iwl_mac_data_ibss { + __le32 beacon_time; + __le64 beacon_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 beacon_template; +} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_sta - configuration data for station MAC context + * @is_assoc: 1 for associated state, 0 otherwise + * @dtim_time: DTIM arrival time in system time + * @dtim_tsf: DTIM arrival time in TSF + * @bi: beacon interval in TU, applicable only when associated + * @bi_reciprocal: 2^32 / bi , applicable only when associated + * @dtim_interval: DTIM interval in TU, applicable only when associated + * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated + * @listen_interval: in beacon intervals, applicable only when associated + * @assoc_id: unique ID assigned by the AP during association + * @assoc_beacon_arrive_time: TSF of first beacon after association + */ +struct iwl_mac_data_sta { + __le32 is_assoc; + __le32 dtim_time; + __le64 dtim_tsf; + __le32 bi; + __le32 bi_reciprocal; + __le32 dtim_interval; + __le32 dtim_reciprocal; + __le32 listen_interval; + __le32 assoc_id; + __le32 assoc_beacon_arrive_time; +} __packed; /* STA_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_go - configuration data for P2P GO MAC context + * @ap: iwl_mac_data_ap struct with most config data + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + * @opp_ps_enabled: indicate that opportunistic PS allowed + */ +struct iwl_mac_data_go { + struct iwl_mac_data_ap ap; + __le32 ctwin; + __le32 opp_ps_enabled; +} __packed; /* GO_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context + * @sta: iwl_mac_data_sta struct with most config data + * @ctwin: client traffic window in TU (period after TBTT when GO is present). + * 0 indicates that there is no CT window. + */ +struct iwl_mac_data_p2p_sta { + struct iwl_mac_data_sta sta; + __le32 ctwin; +} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */ + +/** + * struct iwl_mac_data_pibss - Pseudo IBSS config data + * @stats_interval: interval in TU between statistics notifications to host. + */ +struct iwl_mac_data_pibss { + __le32 stats_interval; +} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */ + +/* + * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC + * context. + * @is_disc_extended: if set to true, P2P Device discoverability is enabled on + * other channels as well. This should be to true only in case that the + * device is discoverable and there is an active GO. Note that setting this + * field when not needed, will increase the number of interrupts and have + * effect on the platform power, as this setting opens the Rx filters on + * all macs. + */ +struct iwl_mac_data_p2p_dev { + __le32 is_disc_extended; +} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */ + +/** + * enum iwl_mac_filter_flags - MAC context filter flags + * @MAC_FILTER_IN_PROMISC: accept all data frames + * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and + * control frames to the host + * @MAC_FILTER_ACCEPT_GRP: accept multicast frames + * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames + * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames + * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host + * (in station mode when associated) + * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames + * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames + * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host + */ +enum iwl_mac_filter_flags { + MAC_FILTER_IN_PROMISC = BIT(0), + MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1), + MAC_FILTER_ACCEPT_GRP = BIT(2), + MAC_FILTER_DIS_DECRYPT = BIT(3), + MAC_FILTER_DIS_GRP_DECRYPT = BIT(4), + MAC_FILTER_IN_BEACON = BIT(6), + MAC_FILTER_OUT_BCAST = BIT(8), + MAC_FILTER_IN_CRC32 = BIT(11), + MAC_FILTER_IN_PROBE_REQUEST = BIT(12), +}; + +/** + * enum iwl_mac_qos_flags - QoS flags + * @MAC_QOS_FLG_UPDATE_EDCA: ? + * @MAC_QOS_FLG_TGN: HT is enabled + * @MAC_QOS_FLG_TXOP_TYPE: ? + * + */ +enum iwl_mac_qos_flags { + MAC_QOS_FLG_UPDATE_EDCA = BIT(0), + MAC_QOS_FLG_TGN = BIT(1), + MAC_QOS_FLG_TXOP_TYPE = BIT(4), +}; + +/** + * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD + * @cw_min: Contention window, start value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x0f. + * @cw_max: Contention window, max value in numbers of slots. + * Should be a power-of-2, minus 1. Device's default is 0x3f. + * @aifsn: Number of slots in Arbitration Interframe Space (before + * performing random backoff timing prior to Tx). Device default 1. + * @fifos_mask: FIFOs used by this MAC for this AC + * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. + * + * One instance of this config struct for each of 4 EDCA access categories + * in struct iwl_qosparam_cmd. + * + * Device will automatically increase contention window by (2*CW) + 1 for each + * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW + * value, to cap the CW value. + */ +struct iwl_ac_qos { + __le16 cw_min; + __le16 cw_max; + u8 aifsn; + u8 fifos_mask; + __le16 edca_txop; +} __packed; /* AC_QOS_API_S_VER_2 */ + +/** + * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts + * ( MAC_CONTEXT_CMD = 0x28 ) + * @id_and_color: ID and color of the MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @mac_type: one of &enum iwl_mac_types + * @tsf_id: TSF HW timer, one of &enum iwl_tsf_id + * @node_addr: MAC address + * @reserved_for_node_addr: reserved + * @bssid_addr: BSSID + * @reserved_for_bssid_addr: reserved + * @cck_rates: basic rates available for CCK + * @ofdm_rates: basic rates available for OFDM + * @protection_flags: combination of &enum iwl_mac_protection_flags + * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise + * @short_slot: 0x10 for enabling short slots, 0 otherwise + * @filter_flags: combination of &enum iwl_mac_filter_flags + * @qos_flags: from &enum iwl_mac_qos_flags + * @ac: one iwl_mac_qos configuration for each AC + */ +struct iwl_mac_ctx_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */ + __le32 mac_type; + __le32 tsf_id; + u8 node_addr[6]; + __le16 reserved_for_node_addr; + u8 bssid_addr[6]; + __le16 reserved_for_bssid_addr; + __le32 cck_rates; + __le32 ofdm_rates; + __le32 protection_flags; + __le32 cck_short_preamble; + __le32 short_slot; + __le32 filter_flags; + /* MAC_QOS_PARAM_API_S_VER_1 */ + __le32 qos_flags; + struct iwl_ac_qos ac[AC_NUM+1]; + /* MAC_CONTEXT_COMMON_DATA_API_S */ + union { + struct iwl_mac_data_ap ap; + struct iwl_mac_data_go go; + struct iwl_mac_data_sta sta; + struct iwl_mac_data_p2p_sta p2p_sta; + struct iwl_mac_data_p2p_dev p2p_dev; + struct iwl_mac_data_pibss pibss; + struct iwl_mac_data_ibss ibss; + }; +} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ + +static inline u32 iwl_mvm_reciprocal(u32 v) +{ + if (!v) + return 0; + return 0xFFFFFFFF / v; +} + +#define IWL_NONQOS_SEQ_GET 0x1 +#define IWL_NONQOS_SEQ_SET 0x2 +struct iwl_nonqos_seq_query_cmd { + __le32 get_set_flag; + __le32 mac_id_n_color; + __le16 value; + __le16 reserved; +} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ + +/** + * struct iwl_missed_beacons_notif - information on missed beacons + * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) + * @mac_id: interface ID + * @consec_missed_beacons_since_last_rx: number of consecutive missed + * beacons since last RX. + * @consec_missed_beacons: number of consecutive missed beacons + * @num_expected_beacons: number of expected beacons + * @num_recvd_beacons: number of received beacons + */ +struct iwl_missed_beacons_notif { + __le32 mac_id; + __le32 consec_missed_beacons_since_last_rx; + __le32 consec_missed_beacons; + __le32 num_expected_beacons; + __le32 num_recvd_beacons; +} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ + +#endif /* __iwl_fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h new file mode 100644 index 000000000000..d4c01f3dce32 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -0,0 +1,378 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_nvm_reg_h__ +#define __iwl_fw_api_nvm_reg_h__ + +/** + * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands + */ +enum iwl_regulatory_and_nvm_subcmd_ids { + /** + * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd + */ + NVM_ACCESS_COMPLETE = 0x0, + + /** + * @NVM_GET_INFO: + * Command is &struct iwl_nvm_get_info, + * response is &struct iwl_nvm_get_info_rsp + */ + NVM_GET_INFO = 0x2, +}; + +/** + * enum iwl_nvm_access_op - NVM access opcode + * @IWL_NVM_READ: read NVM + * @IWL_NVM_WRITE: write NVM + */ +enum iwl_nvm_access_op { + IWL_NVM_READ = 0, + IWL_NVM_WRITE = 1, +}; + +/** + * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD + * @NVM_ACCESS_TARGET_CACHE: access the cache + * @NVM_ACCESS_TARGET_OTP: access the OTP + * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM + */ +enum iwl_nvm_access_target { + NVM_ACCESS_TARGET_CACHE = 0, + NVM_ACCESS_TARGET_OTP = 1, + NVM_ACCESS_TARGET_EEPROM = 2, +}; + +/** + * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD + * @NVM_SECTION_TYPE_SW: software section + * @NVM_SECTION_TYPE_REGULATORY: regulatory section + * @NVM_SECTION_TYPE_CALIBRATION: calibration section + * @NVM_SECTION_TYPE_PRODUCTION: production section + * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section + * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section + * @NVM_MAX_NUM_SECTIONS: number of sections + */ +enum iwl_nvm_section_type { + NVM_SECTION_TYPE_SW = 1, + NVM_SECTION_TYPE_REGULATORY = 3, + NVM_SECTION_TYPE_CALIBRATION = 4, + NVM_SECTION_TYPE_PRODUCTION = 5, + NVM_SECTION_TYPE_MAC_OVERRIDE = 11, + NVM_SECTION_TYPE_PHY_SKU = 12, + NVM_MAX_NUM_SECTIONS = 13, +}; + +/** + * struct iwl_nvm_access_cmd - Request the device to send an NVM section + * @op_code: &enum iwl_nvm_access_op + * @target: &enum iwl_nvm_access_target + * @type: &enum iwl_nvm_section_type + * @offset: offset in bytes into the section + * @length: in bytes, to read/write + * @data: if write operation, the data to write. On read its empty + */ +struct iwl_nvm_access_cmd { + u8 op_code; + u8 target; + __le16 type; + __le16 offset; + __le16 length; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ + +/** + * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD + * @offset: offset in bytes into the section + * @length: in bytes, either how much was written or read + * @type: NVM_SECTION_TYPE_* + * @status: 0 for success, fail otherwise + * @data: if read operation, the data returned. Empty on write. + */ +struct iwl_nvm_access_resp { + __le16 offset; + __le16 length; + __le16 type; + __le16 status; + u8 data[]; +} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ + +/* + * struct iwl_nvm_get_info - request to get NVM data + */ +struct iwl_nvm_get_info { + __le32 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_general - general NVM data + * @flags: 1 - empty, 0 - valid + * @nvm_version: nvm version + * @board_type: board type + * @reserved: reserved + */ +struct iwl_nvm_get_info_general { + __le32 flags; + __le16 nvm_version; + u8 board_type; + u8 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_sku - mac information + * @enable_24g: band 2.4G enabled + * @enable_5g: band 5G enabled + * @enable_11n: 11n enabled + * @enable_11ac: 11ac enabled + * @mimo_disable: MIMO enabled + * @ext_crypto: Extended crypto enabled + */ +struct iwl_nvm_get_info_sku { + __le32 enable_24g; + __le32 enable_5g; + __le32 enable_11n; + __le32 enable_11ac; + __le32 mimo_disable; + __le32 ext_crypto; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_phy - phy information + * @tx_chains: BIT 0 chain A, BIT 1 chain B + * @rx_chains: BIT 0 chain A, BIT 1 chain B + */ +struct iwl_nvm_get_info_phy { + __le32 tx_chains; + __le32 rx_chains; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ + +#define IWL_NUM_CHANNELS (51) + +/** + * struct iwl_nvm_get_info_regulatory - regulatory information + * @lar_enabled: is LAR enabled + * @channel_profile: regulatory data of this channel + * @reserved: reserved + */ +struct iwl_nvm_get_info_regulatory { + __le32 lar_enabled; + __le16 channel_profile[IWL_NUM_CHANNELS]; + __le16 reserved; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ + +/** + * struct iwl_nvm_get_info_rsp - response to get NVM data + * @general: general NVM data + * @mac_sku: data relating to MAC sku + * @phy_sku: data relating to PHY sku + * @regulatory: regulatory data + */ +struct iwl_nvm_get_info_rsp { + struct iwl_nvm_get_info_general general; + struct iwl_nvm_get_info_sku mac_sku; + struct iwl_nvm_get_info_phy phy_sku; + struct iwl_nvm_get_info_regulatory regulatory; +} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ + +/** + * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed + * @reserved: reserved + */ +struct iwl_nvm_access_complete_cmd { + __le32 reserved; +} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ + +/** + * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + */ +struct iwl_mcc_update_cmd_v1 { + __le16 mcc; + u8 source_id; + u8 reserved; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ + +/** + * struct iwl_mcc_update_cmd - Request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: the source from where we got the MCC, see iwl_mcc_source + * @reserved: reserved for alignment + * @key: integrity key for MCC API OEM testing + * @reserved2: reserved + */ +struct iwl_mcc_update_cmd { + __le16 mcc; + u8 source_id; + u8 reserved; + __le32 key; + u8 reserved2[20]; +} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ + +/** + * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp_v1 { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ + +/** + * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. + * Contains the new channel control profile map, if changed, and the new MCC + * (mobile country code). + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. + * @status: see &enum iwl_mcc_update_status + * @mcc: the new applied MCC + * @cap: capabilities for all channels which matches the MCC + * @source_id: the MCC source, see iwl_mcc_source + * @time: time elapsed from the MCC test start (in 30 seconds TU) + * @reserved: reserved. + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 + * channels, depending on platform) + * @channels: channel control data map, DWORD for each channel. Only the first + * 16bits are used. + */ +struct iwl_mcc_update_resp { + __le32 status; + __le16 mcc; + u8 cap; + u8 source_id; + __le16 time; + __le16 reserved; + __le32 n_channels; + __le32 channels[0]; +} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ + +/** + * struct iwl_mcc_chub_notif - chub notifies of mcc change + * (MCC_CHUB_UPDATE_CMD = 0xc9) + * The Chub (Communication Hub, CommsHUB) is a HW component that connects to + * the cellular and connectivity cores that gets updates of the mcc, and + * notifies the ucode directly of any mcc change. + * The ucode requests the driver to request the device to update geographic + * regulatory profile according to the given MCC (Mobile Country Code). + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the + * MCC in the cmd response will be the relevant MCC in the NVM. + * @mcc: given mobile country code + * @source_id: identity of the change originator, see iwl_mcc_source + * @reserved1: reserved for alignment + */ +struct iwl_mcc_chub_notif { + __le16 mcc; + u8 source_id; + u8 reserved1; +} __packed; /* LAR_MCC_NOTIFY_S */ + +enum iwl_mcc_update_status { + MCC_RESP_NEW_CHAN_PROFILE, + MCC_RESP_SAME_CHAN_PROFILE, + MCC_RESP_INVALID, + MCC_RESP_NVM_DISABLED, + MCC_RESP_ILLEGAL, + MCC_RESP_LOW_PRIORITY, + MCC_RESP_TEST_MODE_ACTIVE, + MCC_RESP_TEST_MODE_NOT_ACTIVE, + MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, +}; + +enum iwl_mcc_source { + MCC_SOURCE_OLD_FW = 0, + MCC_SOURCE_ME = 1, + MCC_SOURCE_BIOS = 2, + MCC_SOURCE_3G_LTE_HOST = 3, + MCC_SOURCE_3G_LTE_DEVICE = 4, + MCC_SOURCE_WIFI = 5, + MCC_SOURCE_RESERVED = 6, + MCC_SOURCE_DEFAULT = 7, + MCC_SOURCE_UNINITIALIZED = 8, + MCC_SOURCE_MCC_API = 9, + MCC_SOURCE_GET_CURRENT = 0x10, + MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, +}; + +#endif /* __iwl_fw_api_nvm_reg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h new file mode 100644 index 000000000000..53cab993068f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -0,0 +1,101 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_offload_h__ +#define __iwl_fw_api_offload_h__ + +/** + * enum iwl_prot_offload_subcmd_ids - protocol offload commands + */ +enum iwl_prot_offload_subcmd_ids { + /** + * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif + */ + STORED_BEACON_NTF = 0xFF, +}; + +#define MAX_STORED_BEACON_SIZE 600 + +/** + * struct iwl_stored_beacon_notif - Stored beacon notification + * + * @system_time: system time on air rise + * @tsf: TSF on air rise + * @beacon_timestamp: beacon on air rise + * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition + * @channel: channel this beacon was received on + * @rates: rate in ucode internal format + * @byte_count: frame's byte count + * @data: beacon data, length in @byte_count + */ +struct iwl_stored_beacon_notif { + __le32 system_time; + __le64 tsf; + __le32 beacon_timestamp; + __le16 band; + __le16 channel; + __le32 rates; + __le32 byte_count; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ + +#endif /* __iwl_fw_api_offload_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h new file mode 100644 index 000000000000..e76f9cd4473d --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h @@ -0,0 +1,108 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_paging_h__ +#define __iwl_fw_api_paging_h__ + +#define NUM_OF_FW_PAGING_BLOCKS 33 /* 32 for data and 1 block for CSS */ + +/** + * struct iwl_fw_paging_cmd - paging layout + * + * Send to FW the paging layout in the driver. + * + * @flags: various flags for the command + * @block_size: the block size in powers of 2 + * @block_num: number of blocks specified in the command. + * @device_phy_addr: virtual addresses from device side + */ +struct iwl_fw_paging_cmd { + __le32 flags; + __le32 block_size; + __le32 block_num; + __le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS]; +} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */ + +/** + * enum iwl_fw_item_id - FW item IDs + * + * @IWL_FW_ITEM_ID_PAGING: Address of the pages that the FW will upload + * download + */ +enum iwl_fw_item_id { + IWL_FW_ITEM_ID_PAGING = 3, +}; + +/** + * struct iwl_fw_get_item_cmd - get an item from the fw + * @item_id: ID of item to obtain, see &enum iwl_fw_item_id + */ +struct iwl_fw_get_item_cmd { + __le32 item_id; +} __packed; /* FW_GET_ITEM_CMD_API_S_VER_1 */ + +struct iwl_fw_get_item_resp { + __le32 item_id; + __le32 item_byte_cnt; + __le32 item_val; +} __packed; /* FW_GET_ITEM_RSP_S_VER_1 */ + +#endif /* __iwl_fw_api_paging_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h new file mode 100644 index 000000000000..45f61c6af14e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -0,0 +1,164 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_phy_ctxt_h__ +#define __iwl_fw_api_phy_ctxt_h__ + +/* Supported bands */ +#define PHY_BAND_5 (0) +#define PHY_BAND_24 (1) + +/* Supported channel width, vary if there is VHT support */ +#define PHY_VHT_CHANNEL_MODE20 (0x0) +#define PHY_VHT_CHANNEL_MODE40 (0x1) +#define PHY_VHT_CHANNEL_MODE80 (0x2) +#define PHY_VHT_CHANNEL_MODE160 (0x3) + +/* + * Control channel position: + * For legacy set bit means upper channel, otherwise lower. + * For VHT - bit-2 marks if the control is lower/upper relative to center-freq + * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. + * center_freq + * | + * 40Mhz |_______|_______| + * 80Mhz |_______|_______|_______|_______| + * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| + * code 011 010 001 000 | 100 101 110 111 + */ +#define PHY_VHT_CTRL_POS_1_BELOW (0x0) +#define PHY_VHT_CTRL_POS_2_BELOW (0x1) +#define PHY_VHT_CTRL_POS_3_BELOW (0x2) +#define PHY_VHT_CTRL_POS_4_BELOW (0x3) +#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) +#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) +#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) +#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) + +/* + * @band: PHY_BAND_* + * @channel: channel number + * @width: PHY_[VHT|LEGACY]_CHANNEL_* + * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* + */ +struct iwl_fw_channel_info { + u8 band; + u8 channel; + u8 width; + u8 ctrl_pos; +} __packed; + +#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) +#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) +#define PHY_RX_CHAIN_VALID_POS (1) +#define PHY_RX_CHAIN_VALID_MSK \ + (0x7 << PHY_RX_CHAIN_VALID_POS) +#define PHY_RX_CHAIN_FORCE_SEL_POS (4) +#define PHY_RX_CHAIN_FORCE_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) +#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ + (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) +#define PHY_RX_CHAIN_CNT_POS (10) +#define PHY_RX_CHAIN_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_CNT_POS) +#define PHY_RX_CHAIN_MIMO_CNT_POS (12) +#define PHY_RX_CHAIN_MIMO_CNT_MSK \ + (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) +#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) +#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ + (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) + +/* TODO: fix the value, make it depend on firmware at runtime? */ +#define NUM_PHY_CTX 3 + +/* TODO: complete missing documentation */ +/** + * struct iwl_phy_context_cmd - config of the PHY context + * ( PHY_CONTEXT_CMD = 0x8 ) + * @id_and_color: ID and color of the relevant Binding + * @action: action to perform, one of FW_CTXT_ACTION_* + * @apply_time: 0 means immediate apply and context switch. + * other value means apply new params after X usecs + * @tx_param_color: ??? + * @ci: channel info + * @txchain_info: ??? + * @rxchain_info: ??? + * @acquisition_data: ??? + * @dsp_cfg_flags: set to 0 + */ +struct iwl_phy_context_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + /* PHY_CONTEXT_DATA_API_S_VER_1 */ + __le32 apply_time; + __le32 tx_param_color; + struct iwl_fw_channel_info ci; + __le32 txchain_info; + __le32 rxchain_info; + __le32 acquisition_data; + __le32 dsp_cfg_flags; +} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ + +#endif /* __iwl_fw_api_phy_ctxt_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h new file mode 100644 index 000000000000..9cc59e00bd95 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -0,0 +1,258 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_phy_h__ +#define __iwl_fw_api_phy_h__ + +/** + * enum iwl_phy_ops_subcmd_ids - PHY group commands + */ +enum iwl_phy_ops_subcmd_ids { + /** + * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: + * Uses either &struct iwl_dts_measurement_cmd or + * &struct iwl_ext_dts_measurement_cmd + */ + CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + + /** + * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd + */ + CTDP_CONFIG_CMD = 0x03, + + /** + * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd + */ + TEMP_REPORTING_THRESHOLDS_CMD = 0x04, + + /** + * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd + */ + GEO_TX_POWER_LIMIT = 0x05, + + /** + * @CT_KILL_NOTIFICATION: &struct ct_kill_notif + */ + CT_KILL_NOTIFICATION = 0xFE, + + /** + * @DTS_MEASUREMENT_NOTIF_WIDE: + * &struct iwl_dts_measurement_notif_v1 or + * &struct iwl_dts_measurement_notif_v2 + */ + DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, +}; + +/* DTS measurements */ + +enum iwl_dts_measurement_flags { + DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), + DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), +}; + +/** + * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements + * + * @flags: indicates which measurements we want as specified in + * &enum iwl_dts_measurement_flags + */ +struct iwl_dts_measurement_cmd { + __le32 flags; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ + +/** +* enum iwl_dts_control_measurement_mode - DTS measurement type +* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read +* back (latest value. Not waiting for new value). Use automatic +* SW DTS configuration. +* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, +* trigger DTS reading and provide read back temperature read +* when available. +* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read +* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, +* without measurement trigger. +*/ +enum iwl_dts_control_measurement_mode { + DTS_AUTOMATIC = 0, + DTS_REQUEST_READ = 1, + DTS_OVER_WRITE = 2, + DTS_DIRECT_WITHOUT_MEASURE = 3, +}; + +/** +* enum iwl_dts_used - DTS to use or used for measurement in the DTS request +* @DTS_USE_TOP: Top +* @DTS_USE_CHAIN_A: chain A +* @DTS_USE_CHAIN_B: chain B +* @DTS_USE_CHAIN_C: chain C +* @XTAL_TEMPERATURE: read temperature from xtal +*/ +enum iwl_dts_used { + DTS_USE_TOP = 0, + DTS_USE_CHAIN_A = 1, + DTS_USE_CHAIN_B = 2, + DTS_USE_CHAIN_C = 3, + XTAL_TEMPERATURE = 4, +}; + +/** +* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode +* @DTS_BIT6_MODE: bit 6 mode +* @DTS_BIT8_MODE: bit 8 mode +*/ +enum iwl_dts_bit_mode { + DTS_BIT6_MODE = 0, + DTS_BIT8_MODE = 1, +}; + +/** + * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements + * @control_mode: see &enum iwl_dts_control_measurement_mode + * @temperature: used when over write DTS mode is selected + * @sensor: set temperature sensor to use. See &enum iwl_dts_used + * @avg_factor: average factor to DTS in request DTS read mode + * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode + * @step_duration: step duration for the DTS + */ +struct iwl_ext_dts_measurement_cmd { + __le32 control_mode; + __le32 temperature; + __le32 sensor; + __le32 avg_factor; + __le32 bit_mode; + __le32 step_duration; +} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ + +/** + * struct iwl_dts_measurement_notif_v1 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + */ +struct iwl_dts_measurement_notif_v1 { + __le32 temp; + __le32 voltage; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ + +/** + * struct iwl_dts_measurement_notif_v2 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + * @threshold_idx: the trip index that was crossed + */ +struct iwl_dts_measurement_notif_v2 { + __le32 temp; + __le32 voltage; + __le32 threshold_idx; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ + +/** + * struct ct_kill_notif - CT-kill entry notification + * + * @temperature: the current temperature in celsius + * @reserved: reserved + */ +struct ct_kill_notif { + __le16 temperature; + __le16 reserved; +} __packed; /* GRP_PHY_CT_KILL_NTF */ + +/** +* enum ctdp_cmd_operation - CTDP command operations +* @CTDP_CMD_OPERATION_START: update the current budget +* @CTDP_CMD_OPERATION_STOP: stop ctdp +* @CTDP_CMD_OPERATION_REPORT: get the average budget +*/ +enum iwl_mvm_ctdp_cmd_operation { + CTDP_CMD_OPERATION_START = 0x1, + CTDP_CMD_OPERATION_STOP = 0x2, + CTDP_CMD_OPERATION_REPORT = 0x4, +};/* CTDP_CMD_OPERATION_TYPE_E */ + +/** + * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget + * + * @operation: see &enum iwl_mvm_ctdp_cmd_operation + * @budget: the budget in milliwatt + * @window_size: defined in API but not used + */ +struct iwl_mvm_ctdp_cmd { + __le32 operation; + __le32 budget; + __le32 window_size; +} __packed; + +#define IWL_MAX_DTS_TRIPS 8 + +/** + * struct temp_report_ths_cmd - set temperature thresholds + * + * @num_temps: number of temperature thresholds passed + * @thresholds: array with the thresholds to be configured + */ +struct temp_report_ths_cmd { + __le32 num_temps; + __le16 thresholds[IWL_MAX_DTS_TRIPS]; +} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ + +#endif /* __iwl_fw_api_phy_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h new file mode 100644 index 000000000000..a06afb5605d2 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -0,0 +1,526 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_power_h__ +#define __iwl_fw_api_power_h__ + +/* Power Management Commands, Responses, Notifications */ + +/** + * enum iwl_ltr_config_flags - masks for LTR config command flags + * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status + * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow + * memory access + * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR + * reg change + * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from + * D0 to D3 + * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register + * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register + * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD + * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short + * idle timeout + */ +enum iwl_ltr_config_flags { + LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), + LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), + LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), + LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), + LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), + LTR_CFG_FLAG_SW_SET_LONG = BIT(5), + LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), + LTR_CFG_FLAG_UPDATE_VALUES = BIT(7), +}; + +/** + * struct iwl_ltr_config_cmd_v1 - configures the LTR + * @flags: See &enum iwl_ltr_config_flags + * @static_long: static LTR Long register value. + * @static_short: static LTR Short register value. + */ +struct iwl_ltr_config_cmd_v1 { + __le32 flags; + __le32 static_long; + __le32 static_short; +} __packed; /* LTR_CAPABLE_API_S_VER_1 */ + +#define LTR_VALID_STATES_NUM 4 + +/** + * struct iwl_ltr_config_cmd - configures the LTR + * @flags: See &enum iwl_ltr_config_flags + * @static_long: static LTR Long register value. + * @static_short: static LTR Short register value. + * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order: + * TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES + * is set. + * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if + * %LTR_CFG_FLAG_UPDATE_VALUES is set. + */ +struct iwl_ltr_config_cmd { + __le32 flags; + __le32 static_long; + __le32 static_short; + __le32 ltr_cfg_values[LTR_VALID_STATES_NUM]; + __le32 ltr_short_idle_timeout; +} __packed; /* LTR_CAPABLE_API_S_VER_2 */ + +/* Radio LP RX Energy Threshold measured in dBm */ +#define POWER_LPRX_RSSI_THRESHOLD 75 +#define POWER_LPRX_RSSI_THRESHOLD_MAX 94 +#define POWER_LPRX_RSSI_THRESHOLD_MIN 30 + +/** + * enum iwl_power_flags - masks for power table command flags + * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off + * receiver and transmitter. '0' - does not allow. + * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, + * '1' Driver enables PM (use rest of parameters) + * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, + * '1' PM could sleep over DTIM till listen Interval. + * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all + * access categories are both delivery and trigger enabled. + * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and + * PBW Snoozing enabled + * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask + * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. + * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving + * detection enablement +*/ +enum iwl_power_flags { + POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), + POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), + POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), + POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), + POWER_FLAGS_BT_SCO_ENA = BIT(8), + POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), + POWER_FLAGS_LPRX_ENA_MSK = BIT(11), + POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12), +}; + +#define IWL_POWER_VEC_SIZE 5 + +/** + * struct iwl_powertable_cmd - legacy power command. Beside old API support this + * is used also with a new power API for device wide power settings. + * POWER_TABLE_CMD = 0x77 (command, has simple generic response) + * + * @flags: Power table command flags from POWER_FLAGS_* + * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. + * Minimum allowed:- 3 * DTIM. Keep alive period must be + * set regardless of power scheme or current power state. + * FW use this value also when PM is disabled. + * @debug_flags: debug flags + * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to + * PSM transition - legacy PM + * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to + * PSM transition - legacy PM + * @sleep_interval: not in use + * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag + * is set. For example, if it is required to skip over + * one DTIM, this value need to be set to 2 (DTIM periods). + * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. + * Default: 80dbm + */ +struct iwl_powertable_cmd { + /* PM_POWER_TABLE_CMD_API_S_VER_6 */ + __le16 flags; + u8 keep_alive_seconds; + u8 debug_flags; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 sleep_interval[IWL_POWER_VEC_SIZE]; + __le32 skip_dtim_periods; + __le32 lprx_rssi_threshold; +} __packed; + +/** + * enum iwl_device_power_flags - masks for device power command flags + * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK: + * '1' Allow to save power by turning off + * receiver and transmitter. '0' - does not allow. +*/ +enum iwl_device_power_flags { + DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), +}; + +/** + * struct iwl_device_power_cmd - device wide power command. + * DEVICE_POWER_CMD = 0x77 (command, has simple generic response) + * + * @flags: Power table command flags from &enum iwl_device_power_flags + * @reserved: reserved (padding) + */ +struct iwl_device_power_cmd { + /* PM_POWER_TABLE_CMD_API_S_VER_6 */ + __le16 flags; + __le16 reserved; +} __packed; + +/** + * struct iwl_mac_power_cmd - New power command containing uAPSD support + * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) + * @id_and_color: MAC contex identifier, &enum iwl_ctxt_id_and_color + * @flags: Power table command flags from POWER_FLAGS_* + * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. + * Minimum allowed:- 3 * DTIM. Keep alive period must be + * set regardless of power scheme or current power state. + * FW use this value also when PM is disabled. + * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to + * PSM transition - legacy PM + * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to + * PSM transition - legacy PM + * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag + * is set. For example, if it is required to skip over + * one DTIM, this value need to be set to 2 (DTIM periods). + * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to + * PSM transition - uAPSD + * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to + * PSM transition - uAPSD + * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. + * Default: 80dbm + * @snooze_interval: Maximum time between attempts to retrieve buffered data + * from the AP [msec] + * @snooze_window: A window of time in which PBW snoozing insures that all + * packets received. It is also the minimum time from last + * received unicast RX packet, before client stops snoozing + * for data. [msec] + * @snooze_step: TBD + * @qndp_tid: TID client shall use for uAPSD QNDP triggers + * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for + * each corresponding AC. + * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. + * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct + * values. + * @heavy_tx_thld_packets: TX threshold measured in number of packets + * @heavy_rx_thld_packets: RX threshold measured in number of packets + * @heavy_tx_thld_percentage: TX threshold measured in load's percentage + * @heavy_rx_thld_percentage: RX threshold measured in load's percentage + * @limited_ps_threshold: (unused) + * @reserved: reserved (padding) + */ +struct iwl_mac_power_cmd { + /* CONTEXT_DESC_API_T_VER_1 */ + __le32 id_and_color; + + /* CLIENT_PM_POWER_TABLE_S_VER_1 */ + __le16 flags; + __le16 keep_alive_seconds; + __le32 rx_data_timeout; + __le32 tx_data_timeout; + __le32 rx_data_timeout_uapsd; + __le32 tx_data_timeout_uapsd; + u8 lprx_rssi_threshold; + u8 skip_dtim_periods; + __le16 snooze_interval; + __le16 snooze_window; + u8 snooze_step; + u8 qndp_tid; + u8 uapsd_ac_flags; + u8 uapsd_max_sp; + u8 heavy_tx_thld_packets; + u8 heavy_rx_thld_packets; + u8 heavy_tx_thld_percentage; + u8 heavy_rx_thld_percentage; + u8 limited_ps_threshold; + u8 reserved; +} __packed; + +/* + * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when + * associated AP is identified as improperly implementing uAPSD protocol. + * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78 + * @sta_id: index of station in uCode's station table - associated AP ID in + * this context. + */ +struct iwl_uapsd_misbehaving_ap_notif { + __le32 sta_id; + u8 mac_id; + u8 reserved[3]; +} __packed; + +/** + * struct iwl_reduce_tx_power_cmd - TX power reduction command + * REDUCE_TX_POWER_CMD = 0x9f + * @flags: (reserved for future implementation) + * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @pwr_restriction: TX power restriction in dBms. + */ +struct iwl_reduce_tx_power_cmd { + u8 flags; + u8 mac_context_id; + __le16 pwr_restriction; +} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ + +enum iwl_dev_tx_power_cmd_mode { + IWL_TX_POWER_MODE_SET_MAC = 0, + IWL_TX_POWER_MODE_SET_DEVICE = 1, + IWL_TX_POWER_MODE_SET_CHAINS = 2, + IWL_TX_POWER_MODE_SET_ACK = 3, +}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */; + +#define IWL_NUM_CHAIN_LIMITS 2 +#define IWL_NUM_SUB_BANDS 5 + +/** + * struct iwl_dev_tx_power_cmd - TX power reduction command + * @set_mode: see &enum iwl_dev_tx_power_cmd_mode + * @mac_context_id: id of the mac ctx for which we are reducing TX power. + * @pwr_restriction: TX power restriction in 1/8 dBms. + * @dev_24: device TX power restriction in 1/8 dBms + * @dev_52_low: device TX power restriction upper band - low + * @dev_52_high: device TX power restriction upper band - high + * @per_chain_restriction: per chain restrictions + */ +struct iwl_dev_tx_power_cmd_v3 { + __le32 set_mode; + __le32 mac_context_id; + __le16 pwr_restriction; + __le16 dev_24; + __le16 dev_52_low; + __le16 dev_52_high; + __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; +} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ + +#define IWL_DEV_MAX_TX_POWER 0x7FFF + +/** + * struct iwl_dev_tx_power_cmd - TX power reduction command + * @v3: version 3 of the command, embedded here for easier software handling + * @enable_ack_reduction: enable or disable close range ack TX power + * reduction. + * @reserved: reserved (padding) + */ +struct iwl_dev_tx_power_cmd { + /* v4 is just an extension of v3 - keep this here */ + struct iwl_dev_tx_power_cmd_v3 v3; + u8 enable_ack_reduction; + u8 reserved[3]; +} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ + +#define IWL_NUM_GEO_PROFILES 3 +#define IWL_GEO_PER_CHAIN_SIZE 3 + +/** + * enum iwl_geo_per_chain_offset_operation - type of operation + * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW. + * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table. + */ +enum iwl_geo_per_chain_offset_operation { + IWL_PER_CHAIN_OFFSET_SET_TABLES, + IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, +}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */ + +/** + * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT. + * @max_tx_power: maximum allowed tx power. + * @chain_a: tx power offset for chain a. + * @chain_b: tx power offset for chain b. + */ +struct iwl_per_chain_offset { + __le16 max_tx_power; + u8 chain_a; + u8 chain_b; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ + +struct iwl_per_chain_offset_group { + struct iwl_per_chain_offset lb; + struct iwl_per_chain_offset hb; +} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */ + +/** + * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd. + * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation + * @table: offset profile per band. + */ +struct iwl_geo_tx_power_profiles_cmd { + __le32 ops; + struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; +} __packed; /* GEO_TX_POWER_LIMIT */ + +/** + * struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd + * @profile_idx: current geo profile in use + */ +struct iwl_geo_tx_power_profiles_resp { + __le32 profile_idx; +} __packed; /* GEO_TX_POWER_LIMIT_RESP */ + +/** + * struct iwl_beacon_filter_cmd + * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) + * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon + * to driver if delta in Energy values calculated for this and last + * passed beacon is greater than this threshold. Zero value means that + * the Energy change is ignored for beacon filtering, and beacon will + * not be forced to be sent to driver regardless of this delta. Typical + * energy delta 5dB. + * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. + * Send beacon to driver if delta in Energy values calculated for this + * and last passed beacon is greater than this threshold. Zero value + * means that the Energy change is ignored for beacon filtering while in + * Roaming state, typical energy delta 1dB. + * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values + * calculated for current beacon is less than the threshold, use + * Roaming Energy Delta Threshold, otherwise use normal Energy Delta + * Threshold. Typical energy threshold is -72dBm. + * @bf_temp_threshold: This threshold determines the type of temperature + * filtering (Slow or Fast) that is selected (Units are in Celsuis): + * If the current temperature is above this threshold - Fast filter + * will be used, If the current temperature is below this threshold - + * Slow filter will be used. + * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature change is ignored for + * beacon filtering; beacons will not be forced to be sent to driver + * regardless of whether its temerature has been changed. + * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values + * calculated for this and the last passed beacon is greater than this + * threshold. Zero value means that the temperature change is ignored for + * beacon filtering; beacons will not be forced to be sent to driver + * regardless of whether its temerature has been changed. + * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. + * @bf_debug_flag: beacon filtering debug configuration + * @bf_escape_timer: Send beacons to to driver if no beacons were passed + * for a specific period of time. Units: Beacons. + * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed + * for a longer period of time then this escape-timeout. Units: Beacons. + * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. + */ +struct iwl_beacon_filter_cmd { + __le32 bf_energy_delta; + __le32 bf_roaming_energy_delta; + __le32 bf_roaming_state; + __le32 bf_temp_threshold; + __le32 bf_temp_fast_filter; + __le32 bf_temp_slow_filter; + __le32 bf_enable_beacon_filter; + __le32 bf_debug_flag; + __le32 bf_escape_timer; + __le32 ba_escape_timer; + __le32 ba_enable_beacon_abort; +} __packed; + +/* Beacon filtering and beacon abort */ +#define IWL_BF_ENERGY_DELTA_DEFAULT 5 +#define IWL_BF_ENERGY_DELTA_D0I3 20 +#define IWL_BF_ENERGY_DELTA_MAX 255 +#define IWL_BF_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 +#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20 +#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 +#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 + +#define IWL_BF_ROAMING_STATE_DEFAULT 72 +#define IWL_BF_ROAMING_STATE_D0I3 72 +#define IWL_BF_ROAMING_STATE_MAX 255 +#define IWL_BF_ROAMING_STATE_MIN 0 + +#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 +#define IWL_BF_TEMP_THRESHOLD_D0I3 112 +#define IWL_BF_TEMP_THRESHOLD_MAX 255 +#define IWL_BF_TEMP_THRESHOLD_MIN 0 + +#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 +#define IWL_BF_TEMP_FAST_FILTER_D0I3 1 +#define IWL_BF_TEMP_FAST_FILTER_MAX 255 +#define IWL_BF_TEMP_FAST_FILTER_MIN 0 + +#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 +#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 +#define IWL_BF_TEMP_SLOW_FILTER_MAX 255 +#define IWL_BF_TEMP_SLOW_FILTER_MIN 0 + +#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 + +#define IWL_BF_DEBUG_FLAG_DEFAULT 0 +#define IWL_BF_DEBUG_FLAG_D0I3 0 + +#define IWL_BF_ESCAPE_TIMER_DEFAULT 0 +#define IWL_BF_ESCAPE_TIMER_D0I3 0 +#define IWL_BF_ESCAPE_TIMER_MAX 1024 +#define IWL_BF_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ESCAPE_TIMER_DEFAULT 6 +#define IWL_BA_ESCAPE_TIMER_D0I3 6 +#define IWL_BA_ESCAPE_TIMER_D3 9 +#define IWL_BA_ESCAPE_TIMER_MAX 1024 +#define IWL_BA_ESCAPE_TIMER_MIN 0 + +#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 + +#define IWL_BF_CMD_CONFIG(mode) \ + .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \ + .bf_roaming_energy_delta = \ + cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \ + .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \ + .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \ + .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \ + .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \ + .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \ + .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \ + .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode) + +#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) +#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) +#endif /* __iwl_fw_api_power_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h new file mode 100644 index 000000000000..a13fd8a1be62 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -0,0 +1,408 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_api_rs_h__ +#define __iwl_fw_api_rs_h__ + +#include "mac.h" + +/* + * These serve as indexes into + * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; + * TODO: avoid overlap between legacy and HT rates + */ +enum { + IWL_RATE_1M_INDEX = 0, + IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, + IWL_RATE_2M_INDEX, + IWL_RATE_5M_INDEX, + IWL_RATE_11M_INDEX, + IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, + IWL_RATE_6M_INDEX, + IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, + IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX, + IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX, + IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX, + IWL_RATE_9M_INDEX, + IWL_RATE_12M_INDEX, + IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX, + IWL_RATE_18M_INDEX, + IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX, + IWL_RATE_24M_INDEX, + IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX, + IWL_RATE_36M_INDEX, + IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX, + IWL_RATE_48M_INDEX, + IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX, + IWL_RATE_54M_INDEX, + IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX, + IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, + IWL_RATE_60M_INDEX, + IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX, + IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX, + IWL_RATE_MCS_8_INDEX, + IWL_RATE_MCS_9_INDEX, + IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, + IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, + IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1, +}; + +#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) + +/* fw API values for legacy bit rates, both OFDM and CCK */ +enum { + IWL_RATE_6M_PLCP = 13, + IWL_RATE_9M_PLCP = 15, + IWL_RATE_12M_PLCP = 5, + IWL_RATE_18M_PLCP = 7, + IWL_RATE_24M_PLCP = 9, + IWL_RATE_36M_PLCP = 11, + IWL_RATE_48M_PLCP = 1, + IWL_RATE_54M_PLCP = 3, + IWL_RATE_1M_PLCP = 10, + IWL_RATE_2M_PLCP = 20, + IWL_RATE_5M_PLCP = 55, + IWL_RATE_11M_PLCP = 110, + IWL_RATE_INVM_PLCP = -1, +}; + +/* + * rate_n_flags bit fields + * + * The 32-bit value has different layouts in the low 8 bites depending on the + * format. There are three formats, HT, VHT and legacy (11abg, with subformats + * for CCK and OFDM). + * + * High-throughput (HT) rate format + * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) + * Very High-throughput (VHT) rate format + * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) + * Legacy OFDM rate format for bits 7:0 + * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) + * Legacy CCK rate format for bits 7:0: + * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) + */ + +/* Bit 8: (1) HT format, (0) legacy or VHT format */ +#define RATE_MCS_HT_POS 8 +#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS) + +/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ +#define RATE_MCS_CCK_POS 9 +#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS) + +/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ +#define RATE_MCS_VHT_POS 26 +#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS) + + +/* + * High-throughput (HT) rate format for bits 7:0 + * + * 2-0: MCS rate base + * 0) 6 Mbps + * 1) 12 Mbps + * 2) 18 Mbps + * 3) 24 Mbps + * 4) 36 Mbps + * 5) 48 Mbps + * 6) 54 Mbps + * 7) 60 Mbps + * 4-3: 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data + * (bits 7-6 are zero) + * + * Together the low 5 bits work out to the MCS index because we don't + * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two + * streams and 16-23 have three streams. We could also support MCS 32 + * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) + */ +#define RATE_HT_MCS_RATE_CODE_MSK 0x7 +#define RATE_HT_MCS_NSS_POS 3 +#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS) + +/* Bit 10: (1) Use Green Field preamble */ +#define RATE_HT_MCS_GF_POS 10 +#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) + +#define RATE_HT_MCS_INDEX_MSK 0x3f + +/* + * Very High-throughput (VHT) rate format for bits 7:0 + * + * 3-0: VHT MCS (0-9) + * 5-4: number of streams - 1: + * 0) Single stream (SISO) + * 1) Dual stream (MIMO) + * 2) Triple stream (MIMO) + */ + +/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ +#define RATE_VHT_MCS_RATE_CODE_MSK 0xf +#define RATE_VHT_MCS_NSS_POS 4 +#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) + +/* + * Legacy OFDM rate format for bits 7:0 + * + * 3-0: 0xD) 6 Mbps + * 0xF) 9 Mbps + * 0x5) 12 Mbps + * 0x7) 18 Mbps + * 0x9) 24 Mbps + * 0xB) 36 Mbps + * 0x1) 48 Mbps + * 0x3) 54 Mbps + * (bits 7-4 are 0) + * + * Legacy CCK rate format for bits 7:0: + * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): + * + * 6-0: 10) 1 Mbps + * 20) 2 Mbps + * 55) 5.5 Mbps + * 110) 11 Mbps + * (bit 7 is 0) + */ +#define RATE_LEGACY_RATE_MSK 0xff + +/* Bit 10 - OFDM HE */ +#define RATE_MCS_OFDM_HE_POS 10 +#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS) + +/* + * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz + * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT + */ +#define RATE_MCS_CHAN_WIDTH_POS 11 +#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) +#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) + +/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ +#define RATE_MCS_SGI_POS 13 +#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) + +/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ +#define RATE_MCS_ANT_POS 14 +#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS) +#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ + RATE_MCS_ANT_B_MSK) +#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ + RATE_MCS_ANT_C_MSK) +#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK +#define RATE_MCS_ANT_NUM 3 + +/* Bit 17: (0) SS, (1) SS*2 */ +#define RATE_MCS_STBC_POS 17 +#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS) + +/* Bit 18: OFDM-HE dual carrier mode */ +#define RATE_HE_DUAL_CARRIER_MODE 18 +#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE) + +/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ +#define RATE_MCS_BF_POS 19 +#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) + +/* + * Bit 20-21: HE guard interval and LTF type. + * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us, + * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us + */ +#define RATE_MCS_HE_GI_LTF_POS 20 +#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) + +/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ +#define RATE_MCS_HE_TYPE_POS 22 +#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) + +/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ +#define RATE_MCS_DUP_POS 24 +#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS) + +/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ +#define RATE_MCS_LDPC_POS 27 +#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) + + +/* Link Quality definitions */ + +/* # entries in rate scale table to support Tx retries */ +#define LQ_MAX_RETRY_NUM 16 + +/* Link quality command flags bit fields */ + +/* Bit 0: (0) Don't use RTS (1) Use RTS */ +#define LQ_FLAG_USE_RTS_POS 0 +#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS) + +/* Bit 1-3: LQ command color. Used to match responses to LQ commands */ +#define LQ_FLAG_COLOR_POS 1 +#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) +#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ + LQ_FLAG_COLOR_POS) +#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ + LQ_FLAG_COLOR_MSK) +#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) + +/* Bit 4-5: Tx RTS BW Signalling + * (0) No RTS BW signalling + * (1) Static BW signalling + * (2) Dynamic BW signalling + */ +#define LQ_FLAG_RTS_BW_SIG_POS 4 +#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS) +#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS) +#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS) + +/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection + * Dyanmic BW selection allows Tx with narrower BW then requested in rates + */ +#define LQ_FLAG_DYNAMIC_BW_POS 6 +#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) + +/* Single Stream Tx Parameters (lq_cmd->ss_params) + * Flags to control a smart FW decision about whether BFER/STBC/SISO will be + * used for single stream Tx. + */ + +/* Bit 0-1: Max STBC streams allowed. Can be 0-3. + * (0) - No STBC allowed + * (1) - 2x1 STBC allowed (HT/VHT) + * (2) - 4x2 STBC allowed (HT/VHT) + * (3) - 3x2 STBC allowed (HT only) + * All our chips are at most 2 antennas so only (1) is valid for now. + */ +#define LQ_SS_STBC_ALLOWED_POS 0 +#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK) + +/* 2x1 STBC is allowed */ +#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS) + +/* Bit 2: Beamformer (VHT only) is allowed */ +#define LQ_SS_BFER_ALLOWED_POS 2 +#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS) + +/* Bit 3: Force BFER or STBC for testing + * If this is set: + * If BFER is allowed then force the ucode to choose BFER else + * If STBC is allowed then force the ucode to choose STBC over SISO + */ +#define LQ_SS_FORCE_POS 3 +#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS) + +/* Bit 31: ss_params field is valid. Used for FW backward compatibility + * with other drivers which don't support the ss_params API yet + */ +#define LQ_SS_PARAMS_VALID_POS 31 +#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS) + +/** + * struct iwl_lq_cmd - link quality command + * @sta_id: station to update + * @reduced_tpc: reduced transmit power control value + * @control: not used + * @flags: combination of LQ_FLAG_* + * @mimo_delim: the first SISO index in rs_table, which separates MIMO + * and SISO rates + * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). + * Should be ANT_[ABC] + * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] + * @initial_rate_index: first index from rs_table per AC category + * @agg_time_limit: aggregation max time threshold in usec/100, meaning + * value of 100 is one usec. Range is 100 to 8000 + * @agg_disable_start_th: try-count threshold for starting aggregation. + * If a frame has higher try-count, it should not be selected for + * starting an aggregation sequence. + * @agg_frame_cnt_limit: max frame count in an aggregation. + * 0: no limit + * 1: no aggregation (one frame per aggregation) + * 2 - 0x3f: maximal number of frames (up to 3f == 63) + * @reserved2: reserved + * @rs_table: array of rates for each TX try, each is rate_n_flags, + * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP + * @ss_params: single stream features. declare whether STBC or BFER are allowed. + */ +struct iwl_lq_cmd { + u8 sta_id; + u8 reduced_tpc; + __le16 control; + /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ + u8 flags; + u8 mimo_delim; + u8 single_stream_ant_msk; + u8 dual_stream_ant_msk; + u8 initial_rate_index[AC_NUM]; + /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ + __le16 agg_time_limit; + u8 agg_disable_start_th; + u8 agg_frame_cnt_limit; + __le32 reserved2; + __le32 rs_table[LQ_MAX_RETRY_NUM]; + __le32 ss_params; +}; /* LINK_QUALITY_CMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_rs_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h new file mode 100644 index 000000000000..e7565f37ece9 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -0,0 +1,589 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_rx_h__ +#define __iwl_fw_api_rx_h__ + +/* API for pre-9000 hardware */ + +#define IWL_RX_INFO_PHY_CNT 8 +#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 +#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff +#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 +#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000 +#define IWL_RX_INFO_ENERGY_ANT_A_POS 0 +#define IWL_RX_INFO_ENERGY_ANT_B_POS 8 +#define IWL_RX_INFO_ENERGY_ANT_C_POS 16 + +enum iwl_mac_context_info { + MAC_CONTEXT_INFO_NONE, + MAC_CONTEXT_INFO_GSCAN, +}; + +/** + * struct iwl_rx_phy_info - phy info + * (REPLY_RX_PHY_CMD = 0xc0) + * @non_cfg_phy_cnt: non configurable DSP phy data byte count + * @cfg_phy_cnt: configurable DSP phy data byte count + * @stat_id: configurable DSP phy data set ID + * @reserved1: reserved + * @system_timestamp: GP2 at on air rise + * @timestamp: TSF at on air rise + * @beacon_time_stamp: beacon at on-air rise + * @phy_flags: general phy flags: band, modulation, ... + * @channel: channel number + * @non_cfg_phy: for various implementations of non_cfg_phy + * @rate_n_flags: RATE_MCS_* + * @byte_count: frame's byte-count + * @frame_time: frame's time on the air, based on byte count and frame rate + * calculation + * @mac_active_msk: what MACs were active when the frame was received + * @mac_context_info: additional info on the context in which the frame was + * received as defined in &enum iwl_mac_context_info + * + * Before each Rx, the device sends this data. It contains PHY information + * about the reception of the packet. + */ +struct iwl_rx_phy_info { + u8 non_cfg_phy_cnt; + u8 cfg_phy_cnt; + u8 stat_id; + u8 reserved1; + __le32 system_timestamp; + __le64 timestamp; + __le32 beacon_time_stamp; + __le16 phy_flags; + __le16 channel; + __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; + __le32 rate_n_flags; + __le32 byte_count; + u8 mac_active_msk; + u8 mac_context_info; + __le16 frame_time; +} __packed; + +/* + * TCP offload Rx assist info + * + * bits 0:3 - reserved + * bits 4:7 - MIC CRC length + * bits 8:12 - MAC header length + * bit 13 - Padding indication + * bit 14 - A-AMSDU indication + * bit 15 - Offload enabled + */ +enum iwl_csum_rx_assist_info { + CSUM_RXA_RESERVED_MASK = 0x000f, + CSUM_RXA_MICSIZE_MASK = 0x00f0, + CSUM_RXA_HEADERLEN_MASK = 0x1f00, + CSUM_RXA_PADD = BIT(13), + CSUM_RXA_AMSDU = BIT(14), + CSUM_RXA_ENA = BIT(15) +}; + +/** + * struct iwl_rx_mpdu_res_start - phy info + * @byte_count: byte count of the frame + * @assist: see &enum iwl_csum_rx_assist_info + */ +struct iwl_rx_mpdu_res_start { + __le16 byte_count; + __le16 assist; +} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ + +/** + * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags + * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band + * @RX_RES_PHY_FLAGS_MOD_CCK: modulation is CCK + * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short + * @RX_RES_PHY_FLAGS_NARROW_BAND: narrow band (<20 MHz) receive + * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received + * @RX_RES_PHY_FLAGS_ANTENNA_POS: antenna bit position + * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU + * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame + * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble + * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame + */ +enum iwl_rx_phy_flags { + RX_RES_PHY_FLAGS_BAND_24 = BIT(0), + RX_RES_PHY_FLAGS_MOD_CCK = BIT(1), + RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2), + RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3), + RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4), + RX_RES_PHY_FLAGS_ANTENNA_POS = 4, + RX_RES_PHY_FLAGS_AGG = BIT(7), + RX_RES_PHY_FLAGS_OFDM_HT = BIT(8), + RX_RES_PHY_FLAGS_OFDM_GF = BIT(9), + RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10), +}; + +/** + * enum iwl_mvm_rx_status - written by fw for each Rx packet + * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine + * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow + * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found + * @RX_MPDU_RES_STATUS_KEY_VALID: key was valid + * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: key parameters were usable + * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed + * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked + * in the driver. + * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine + * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or + * alg = CCM only. Checks replay attack for 11w frames. Relevant only if + * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set. + * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted + * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP + * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM + * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP + * @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension + * algorithm + * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC + * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted + * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm + * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted + * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: extended IV (set with TKIP) + * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: key ID comparison done + * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame + * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw + * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors + * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask + * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift + * @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status + * @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2 + */ +enum iwl_mvm_rx_status { + RX_MPDU_RES_STATUS_CRC_OK = BIT(0), + RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), + RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), + RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), + RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4), + RX_MPDU_RES_STATUS_ICV_OK = BIT(5), + RX_MPDU_RES_STATUS_MIC_OK = BIT(6), + RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), + RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7), + RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8), + RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8), + RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8), + RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8), + RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8), + RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8), + RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), + RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), + RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), + RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), + RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), + RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), + RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), + RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), + RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, + RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT, + RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), + RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), +}; + +/* 9000 series API */ +enum iwl_rx_mpdu_mac_flags1 { + IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03, + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0, + /* shift should be 4, but the length is measured in 2-byte + * words, so shifting only by 3 gives a byte result + */ + IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3, +}; + +enum iwl_rx_mpdu_mac_flags2 { + /* in 2-byte words */ + IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f, + IWL_RX_MPDU_MFLG2_PAD = 0x20, + IWL_RX_MPDU_MFLG2_AMSDU = 0x40, +}; + +enum iwl_rx_mpdu_amsdu_info { + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f, + IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, +}; + +enum iwl_rx_l3_proto_values { + IWL_RX_L3_TYPE_NONE, + IWL_RX_L3_TYPE_IPV4, + IWL_RX_L3_TYPE_IPV4_FRAG, + IWL_RX_L3_TYPE_IPV6_FRAG, + IWL_RX_L3_TYPE_IPV6, + IWL_RX_L3_TYPE_IPV6_IN_IPV4, + IWL_RX_L3_TYPE_ARP, + IWL_RX_L3_TYPE_EAPOL, +}; + +#define IWL_RX_L3_PROTO_POS 4 + +enum iwl_rx_l3l4_flags { + IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), + IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), + IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), + IWL_RX_L3L4_TCP_ACK = BIT(3), + IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS, + IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, + IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, +}; + +enum iwl_rx_mpdu_status { + IWL_RX_MPDU_STATUS_CRC_OK = BIT(0), + IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1), + IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2), + IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3), + IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4), + IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), + IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), + IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), + IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, + IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, + IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, + IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, + IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, + IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, + IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, + IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), + IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), + IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13), + IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14), + IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15), +}; + +enum iwl_rx_mpdu_hash_filter { + IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f, + IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0, +}; + +enum iwl_rx_mpdu_sta_id_flags { + IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f, + IWL_RX_MPDU_SIF_RRF_ABORT = 0x20, + IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0, +}; + +#define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f + +enum iwl_rx_mpdu_reorder_data { + IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff, + IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000, + IWL_RX_MPDU_REORDER_SN_SHIFT = 12, + IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000, + IWL_RX_MPDU_REORDER_BAID_SHIFT = 24, + IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000, +}; + +enum iwl_rx_mpdu_phy_info { + IWL_RX_MPDU_PHY_AMPDU = BIT(5), + IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), + IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), + IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8), +}; + +enum iwl_rx_mpdu_mac_info { + IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f, + IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, +}; + +/** + * struct iwl_rx_mpdu_desc - RX MPDU descriptor + */ +struct iwl_rx_mpdu_desc { + /* DW2 */ + /** + * @mpdu_len: MPDU length + */ + __le16 mpdu_len; + /** + * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1 + */ + u8 mac_flags1; + /** + * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2 + */ + u8 mac_flags2; + /* DW3 */ + /** + * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info + */ + u8 amsdu_info; + /** + * @phy_info: &enum iwl_rx_mpdu_phy_info + */ + __le16 phy_info; + /** + * @mac_phy_idx: MAC/PHY index + */ + u8 mac_phy_idx; + /* DW4 - carries csum data only when rpa_en == 1 */ + /** + * @raw_csum: raw checksum (alledgedly unreliable) + */ + __le16 raw_csum; + /** + * @l3l4_flags: &enum iwl_rx_l3l4_flags + */ + __le16 l3l4_flags; + /* DW5 */ + /** + * @status: &enum iwl_rx_mpdu_status + */ + __le16 status; + /** + * @hash_filter: hash filter value + */ + u8 hash_filter; + /** + * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags + */ + u8 sta_id_flags; + /* DW6 */ + /** + * @reorder_data: &enum iwl_rx_mpdu_reorder_data + */ + __le32 reorder_data; + /* DW7 - carries rss_hash only when rpa_en == 1 */ + /** + * @rss_hash: RSS hash value + */ + __le32 rss_hash; + /* DW8 - carries filter_match only when rpa_en == 1 */ + /** + * @filter_match: filter match value + */ + __le32 filter_match; + /* DW9 */ + /** + * @rate_n_flags: RX rate/flags encoding + */ + __le32 rate_n_flags; + /* DW10 */ + /** + * @energy_a: energy chain A + */ + u8 energy_a; + /** + * @energy_b: energy chain B + */ + u8 energy_b; + /** + * @channel: channel number + */ + u8 channel; + /** + * @mac_context: MAC context mask + */ + u8 mac_context; + /* DW11 */ + /** + * @gp2_on_air_rise: GP2 timer value on air rise (INA) + */ + __le32 gp2_on_air_rise; + /* DW12 & DW13 */ + /** + * @tsf_on_air_rise: + * TSF value on air rise (INA), only valid if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set + */ + __le64 tsf_on_air_rise; +} __packed; + +struct iwl_frame_release { + u8 baid; + u8 reserved; + __le16 nssn; +}; + +enum iwl_rss_hash_func_en { + IWL_RSS_HASH_TYPE_IPV4_TCP, + IWL_RSS_HASH_TYPE_IPV4_UDP, + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, + IWL_RSS_HASH_TYPE_IPV6_TCP, + IWL_RSS_HASH_TYPE_IPV6_UDP, + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, +}; + +#define IWL_RSS_HASH_KEY_CNT 10 +#define IWL_RSS_INDIRECTION_TABLE_SIZE 128 +#define IWL_RSS_ENABLE 1 + +/** + * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration + * + * @flags: 1 - enable, 0 - disable + * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en + * @reserved: reserved + * @secret_key: 320 bit input of random key configuration from driver + * @indirection_table: indirection table + */ +struct iwl_rss_config_cmd { + __le32 flags; + u8 hash_mask; + u8 reserved[3]; + __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; + u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; +} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ + +#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128 +#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 +#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf + +/** + * struct iwl_rxq_sync_cmd - RXQ notification trigger + * + * @flags: flags of the notification. bit 0:3 are the sender queue + * @rxq_mask: rx queues to send the notification on + * @count: number of bytes in payload, should be DWORD aligned + * @payload: data to send to rx queues + */ +struct iwl_rxq_sync_cmd { + __le32 flags; + __le32 rxq_mask; + __le32 count; + u8 payload[]; +} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ + +/** + * struct iwl_rxq_sync_notification - Notification triggered by RXQ + * sync command + * + * @count: number of bytes in payload + * @payload: data to send to rx queues + */ +struct iwl_rxq_sync_notification { + __le32 count; + u8 payload[]; +} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ + +/** + * enum iwl_mvm_rxq_notif_type - Internal message identifier + * + * @IWL_MVM_RXQ_EMPTY: empty sync notification + * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA + */ +enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_EMPTY, + IWL_MVM_RXQ_NOTIF_DEL_BA, +}; + +/** + * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent + * in &iwl_rxq_sync_cmd. Should be DWORD aligned. + * FW is agnostic to the payload, so there are no endianity requirements. + * + * @type: value from &iwl_mvm_rxq_notif_type + * @sync: ctrl path is waiting for all notifications to be received + * @cookie: internal cookie to identify old notifications + * @data: payload + */ +struct iwl_mvm_internal_rxq_notif { + u16 type; + u16 sync; + u32 cookie; + u8 data[]; +} __packed; + +/** + * enum iwl_mvm_pm_event - type of station PM event + * @IWL_MVM_PM_EVENT_AWAKE: station woke up + * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep + * @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger + * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll + */ +enum iwl_mvm_pm_event { + IWL_MVM_PM_EVENT_AWAKE, + IWL_MVM_PM_EVENT_ASLEEP, + IWL_MVM_PM_EVENT_UAPSD, + IWL_MVM_PM_EVENT_PS_POLL, +}; /* PEER_PM_NTFY_API_E_VER_1 */ + +/** + * struct iwl_mvm_pm_state_notification - station PM state notification + * @sta_id: station ID of the station changing state + * @type: the new powersave state, see &enum iwl_mvm_pm_event + */ +struct iwl_mvm_pm_state_notification { + u8 sta_id; + u8 type; + /* private: */ + __le16 reserved; +} __packed; /* PEER_PM_NTFY_API_S_VER_1 */ + +#define BA_WINDOW_STREAMS_MAX 16 +#define BA_WINDOW_STATUS_TID_MSK 0x000F +#define BA_WINDOW_STATUS_STA_ID_POS 4 +#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 +#define BA_WINDOW_STATUS_VALID_MSK BIT(9) + +/** + * struct iwl_ba_window_status_notif - reordering window's status notification + * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] + * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid + * @start_seq_num: the start sequence number of the bitmap + * @mpdu_rx_count: the number of received MPDUs since entering D0i3 + */ +struct iwl_ba_window_status_notif { + __le64 bitmap[BA_WINDOW_STREAMS_MAX]; + __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; + __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; + __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; +} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h new file mode 100644 index 000000000000..5a40092febfb --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -0,0 +1,787 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_scan_h__ +#define __iwl_fw_api_scan_h__ + +/* Scan Commands, Responses, Notifications */ + +/* Max number of IEs for direct SSID scans in a command */ +#define PROBE_OPTION_MAX 20 + +/** + * struct iwl_ssid_ie - directed scan network information element + * + * Up to 20 of these may appear in REPLY_SCAN_CMD, + * selected by "type" bit field in struct iwl_scan_channel; + * each channel may select different ssids from among the 20 entries. + * SSID IEs get transmitted in reverse order of entry. + * + * @id: element ID + * @len: element length + * @ssid: element (SSID) data + */ +struct iwl_ssid_ie { + u8 id; + u8 len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; +} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ + +/* scan offload */ +#define IWL_SCAN_MAX_BLACKLIST_LEN 64 +#define IWL_SCAN_SHORT_BLACKLIST_LEN 16 +#define IWL_SCAN_MAX_PROFILES 11 +#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 + +/* Default watchdog (in MS) for scheduled scan iteration */ +#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) + +#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) +#define CAN_ABORT_STATUS 1 + +#define IWL_FULL_SCAN_MULTIPLIER 5 +#define IWL_FAST_SCHED_SCAN_ITERATIONS 3 +#define IWL_MAX_SCHED_SCAN_PLANS 2 + +enum scan_framework_client { + SCAN_CLIENT_SCHED_SCAN = BIT(0), + SCAN_CLIENT_NETDETECT = BIT(1), + SCAN_CLIENT_ASSET_TRACKING = BIT(2), +}; + +/** + * struct iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S + * @ssid: MAC address to filter out + * @reported_rssi: AP rssi reported to the host + * @client_bitmap: clients ignore this entry - enum scan_framework_client + */ +struct iwl_scan_offload_blacklist { + u8 ssid[ETH_ALEN]; + u8 reported_rssi; + u8 client_bitmap; +} __packed; + +enum iwl_scan_offload_network_type { + IWL_NETWORK_TYPE_BSS = 1, + IWL_NETWORK_TYPE_IBSS = 2, + IWL_NETWORK_TYPE_ANY = 3, +}; + +enum iwl_scan_offload_band_selection { + IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4, + IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8, + IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, +}; + +/** + * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S + * @ssid_index: index to ssid list in fixed part + * @unicast_cipher: encryption algorithm to match - bitmap + * @auth_alg: authentication algorithm to match - bitmap + * @network_type: enum iwl_scan_offload_network_type + * @band_selection: enum iwl_scan_offload_band_selection + * @client_bitmap: clients waiting for match - enum scan_framework_client + * @reserved: reserved + */ +struct iwl_scan_offload_profile { + u8 ssid_index; + u8 unicast_cipher; + u8 auth_alg; + u8 network_type; + u8 band_selection; + u8 client_bitmap; + u8 reserved[2]; +} __packed; + +/** + * struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1 + * @profiles: profiles to search for match + * @blacklist_len: length of blacklist + * @num_profiles: num of profiles in the list + * @match_notify: clients waiting for match found notification + * @pass_match: clients waiting for the results + * @active_clients: active clients bitmap - enum scan_framework_client + * @any_beacon_notify: clients waiting for match notification without match + * @reserved: reserved + */ +struct iwl_scan_offload_profile_cfg { + struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; + u8 blacklist_len; + u8 num_profiles; + u8 match_notify; + u8 pass_match; + u8 active_clients; + u8 any_beacon_notify; + u8 reserved[2]; +} __packed; + +/** + * struct iwl_scan_schedule_lmac - schedule of scan offload + * @delay: delay between iterations, in seconds. + * @iterations: num of scan iterations + * @full_scan_mul: number of partial scans before each full scan + */ +struct iwl_scan_schedule_lmac { + __le16 delay; + u8 iterations; + u8 full_scan_mul; +} __packed; /* SCAN_SCHEDULE_API_S */ + +enum iwl_scan_offload_complete_status { + IWL_SCAN_OFFLOAD_COMPLETED = 1, + IWL_SCAN_OFFLOAD_ABORTED = 2, +}; + +enum iwl_scan_ebs_status { + IWL_SCAN_EBS_SUCCESS, + IWL_SCAN_EBS_FAILED, + IWL_SCAN_EBS_CHAN_NOT_FOUND, + IWL_SCAN_EBS_INACTIVE, +}; + +/** + * struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S + * @tx_flags: combination of TX_CMD_FLG_* + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @sta_id: index of destination station in FW station table + * @reserved: for alignment and future use + */ +struct iwl_scan_req_tx_cmd { + __le32 tx_flags; + __le32 rate_n_flags; + u8 sta_id; + u8 reserved[3]; +} __packed; + +enum iwl_scan_channel_flags_lmac { + IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27), + IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28), +}; + +/** + * struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2 + * @flags: bits 1-20: directed scan to i'th ssid + * other bits &enum iwl_scan_channel_flags_lmac + * @channel_num: channel number 1-13 etc + * @iter_count: scan iteration on this channel + * @iter_interval: interval in seconds between iterations on one channel + */ +struct iwl_scan_channel_cfg_lmac { + __le32 flags; + __le16 channel_num; + __le16 iter_count; + __le32 iter_interval; +} __packed; + +/* + * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 + * @offset: offset in the data block + * @len: length of the segment + */ +struct iwl_scan_probe_segment { + __le16 offset; + __le16 len; +} __packed; + +/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2 + * @mac_header: first (and common) part of the probe + * @band_data: band specific data + * @common_data: last (and common) part of the probe + * @buf: raw data block + */ +struct iwl_scan_probe_req { + struct iwl_scan_probe_segment mac_header; + struct iwl_scan_probe_segment band_data[2]; + struct iwl_scan_probe_segment common_data; + u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; +} __packed; + +enum iwl_scan_channel_flags { + IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0), + IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1), + IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2), +}; + +/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S + * @flags: enum iwl_scan_channel_flags + * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is + * involved. + * 1 - EBS is disabled. + * 2 - every second scan will be full scan(and so on). + */ +struct iwl_scan_channel_opt { + __le16 flags; + __le16 non_ebs_ratio; +} __packed; + +/** + * enum iwl_mvm_lmac_scan_flags - LMAC scan flags + * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses + * without filtering. + * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels + * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan + * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification + * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS: multiple SSID matching + * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented + * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report + * and DS parameter set IEs into probe requests. + * @IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels + * 1, 6 and 11. + * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches + */ +enum iwl_mvm_lmac_scan_flags { + IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), + IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1), + IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2), + IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3), + IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), + IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), + IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), + IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7), + IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), +}; + +enum iwl_scan_priority { + IWL_SCAN_PRIORITY_LOW, + IWL_SCAN_PRIORITY_MEDIUM, + IWL_SCAN_PRIORITY_HIGH, +}; + +enum iwl_scan_priority_ext { + IWL_SCAN_PRIORITY_EXT_0_LOWEST, + IWL_SCAN_PRIORITY_EXT_1, + IWL_SCAN_PRIORITY_EXT_2, + IWL_SCAN_PRIORITY_EXT_3, + IWL_SCAN_PRIORITY_EXT_4, + IWL_SCAN_PRIORITY_EXT_5, + IWL_SCAN_PRIORITY_EXT_6, + IWL_SCAN_PRIORITY_EXT_7_HIGHEST, +}; + +/** + * struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 + * @reserved1: for alignment and future use + * @n_channels: num of channels to scan + * @active_dwell: dwell time for active channels + * @passive_dwell: dwell time for passive channels + * @fragmented_dwell: dwell time for fragmented passive scan + * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases) + * @reserved2: for alignment and future use + * @rx_chain_select: PHY_RX_CHAIN_* flags + * @scan_flags: &enum iwl_mvm_lmac_scan_flags + * @max_out_time: max time (in TU) to be out of associated channel + * @suspend_time: pause scan this long (TUs) when returning to service channel + * @flags: RXON flags + * @filter_flags: RXON filter + * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz + * @direct_scan: list of SSIDs for directed active scan + * @scan_prio: enum iwl_scan_priority + * @iter_num: number of scan iterations + * @delay: delay in seconds before first iteration + * @schedule: two scheduling plans. The first one is finite, the second one can + * be infinite. + * @channel_opt: channel optimization options, for full and partial scan + * @data: channel configuration and probe request packet. + */ +struct iwl_scan_req_lmac { + /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */ + __le32 reserved1; + u8 n_channels; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + u8 extended_dwell; + u8 reserved2; + __le16 rx_chain_select; + __le32 scan_flags; + __le32 max_out_time; + __le32 suspend_time; + /* RX_ON_FLAGS_API_S_VER_1 */ + __le32 flags; + __le32 filter_flags; + struct iwl_scan_req_tx_cmd tx_cmd[2]; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; + __le32 scan_prio; + /* SCAN_REQ_PERIODIC_PARAMS_API_S */ + __le32 iter_num; + __le32 delay; + struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS]; + struct iwl_scan_channel_opt channel_opt[2]; + u8 data[]; +} __packed; + +/** + * struct iwl_scan_results_notif - scan results for one channel - + * SCAN_RESULT_NTF_API_S_VER_3 + * @channel: which channel the results are from + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz + * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request + * @num_probe_not_sent: # of request that weren't sent due to not enough time + * @duration: duration spent in channel, in usecs + */ +struct iwl_scan_results_notif { + u8 channel; + u8 band; + u8 probe_status; + u8 num_probe_not_sent; + __le32 duration; +} __packed; + +/** + * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels) + * SCAN_COMPLETE_NTF_API_S_VER_3 + * @scanned_channels: number of channels scanned (and number of valid results) + * @status: one of SCAN_COMP_STATUS_* + * @bt_status: BT on/off status + * @last_channel: last channel that was scanned + * @tsf_low: TSF timer (lower half) in usecs + * @tsf_high: TSF timer (higher half) in usecs + * @results: an array of scan results, only "scanned_channels" of them are valid + */ +struct iwl_lmac_scan_complete_notif { + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le32 tsf_low; + __le32 tsf_high; + struct iwl_scan_results_notif results[]; +} __packed; + +/** + * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 + * @last_schedule_line: last schedule line executed (fast or regular) + * @last_schedule_iteration: last scan iteration executed before scan abort + * @status: &enum iwl_scan_offload_complete_status + * @ebs_status: EBS success status &enum iwl_scan_ebs_status + * @time_after_last_iter: time in seconds elapsed after last iteration + * @reserved: reserved + */ +struct iwl_periodic_scan_complete { + u8 last_schedule_line; + u8 last_schedule_iteration; + u8 status; + u8 ebs_status; + __le32 time_after_last_iter; + __le32 reserved; +} __packed; + +/* UMAC Scan API */ + +/* The maximum of either of these cannot exceed 8, because we use an + * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). + */ +#define IWL_MVM_MAX_UMAC_SCANS 8 +#define IWL_MVM_MAX_LMAC_SCANS 1 + +enum scan_config_flags { + SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), + SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1), + SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2), + SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3), + SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8), + SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9), + SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10), + SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11), + SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12), + SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13), + SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14), + SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15), + SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16), + SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17), + SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18), + SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), + SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), + SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), + SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22), + SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23), + + /* Bits 26-31 are for num of channels in channel_array */ +#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) +}; + +enum scan_config_rates { + /* OFDM basic rates */ + SCAN_CONFIG_RATE_6M = BIT(0), + SCAN_CONFIG_RATE_9M = BIT(1), + SCAN_CONFIG_RATE_12M = BIT(2), + SCAN_CONFIG_RATE_18M = BIT(3), + SCAN_CONFIG_RATE_24M = BIT(4), + SCAN_CONFIG_RATE_36M = BIT(5), + SCAN_CONFIG_RATE_48M = BIT(6), + SCAN_CONFIG_RATE_54M = BIT(7), + /* CCK basic rates */ + SCAN_CONFIG_RATE_1M = BIT(8), + SCAN_CONFIG_RATE_2M = BIT(9), + SCAN_CONFIG_RATE_5M = BIT(10), + SCAN_CONFIG_RATE_11M = BIT(11), + + /* Bits 16-27 are for supported rates */ +#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16) +}; + +enum iwl_channel_flags { + IWL_CHANNEL_FLAG_EBS = BIT(0), + IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1), + IWL_CHANNEL_FLAG_EBS_ADD = BIT(2), + IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), +}; + +/** + * struct iwl_scan_dwell + * @active: default dwell time for active scan + * @passive: default dwell time for passive scan + * @fragmented: default dwell time for fragmented scan + * @extended: default dwell time for channels 1, 6 and 11 + */ +struct iwl_scan_dwell { + u8 active; + u8 passive; + u8 fragmented; + u8 extended; +} __packed; + +/** + * struct iwl_scan_config + * @flags: enum scan_config_flags + * @tx_chains: valid_tx antenna - ANT_* definitions + * @rx_chains: valid_rx antenna - ANT_* definitions + * @legacy_rates: default legacy rates - enum scan_config_rates + * @out_of_channel_time: default max out of serving channel time + * @suspend_time: default max suspend time + * @dwell: dwells for the scan + * @mac_addr: default mac address to be used in probes + * @bcast_sta_id: the index of the station in the fw + * @channel_flags: default channel flags - enum iwl_channel_flags + * scan_config_channel_flag + * @channel_array: default supported channels + */ +struct iwl_scan_config_v1 { + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time; + __le32 suspend_time; + struct iwl_scan_dwell dwell; + u8 mac_addr[ETH_ALEN]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[]; +} __packed; /* SCAN_CONFIG_DB_CMD_API_S */ + +#define SCAN_TWO_LMACS 2 + +struct iwl_scan_config { + __le32 flags; + __le32 tx_chains; + __le32 rx_chains; + __le32 legacy_rates; + __le32 out_of_channel_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + struct iwl_scan_dwell dwell; + u8 mac_addr[ETH_ALEN]; + u8 bcast_sta_id; + u8 channel_flags; + u8 channel_array[]; +} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */ + +/** + * enum iwl_umac_scan_flags - UMAC scan flags + * @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request + * can be preempted by other scan requests with higher priority. + * The low priority scan will be resumed when the higher proirity scan is + * completed. + * @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver + * when scan starts. + */ +enum iwl_umac_scan_flags { + IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0), + IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1), +}; + +enum iwl_umac_scan_uid_offsets { + IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0, + IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8, +}; + +enum iwl_umac_scan_general_flags { + IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), + IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), + IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), + IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), + IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), + IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), + IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), + IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), + IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), + IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), + IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), + IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), +}; + +/** + * struct iwl_scan_channel_cfg_umac + * @flags: bitmap - 0-19: directed scan to i'th ssid. + * @channel_num: channel number 1-13 etc. + * @iter_count: repetition count for the channel. + * @iter_interval: interval between two scan iterations on one channel. + */ +struct iwl_scan_channel_cfg_umac { + __le32 flags; + u8 channel_num; + u8 iter_count; + __le16 iter_interval; +} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */ + +/** + * struct iwl_scan_umac_schedule + * @interval: interval in seconds between scan iterations + * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop + * @reserved: for alignment and future use + */ +struct iwl_scan_umac_schedule { + __le16 interval; + u8 iter_count; + u8 reserved; +} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ + +/** + * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command + * parameters following channels configuration array. + * @schedule: two scheduling plans. + * @delay: delay in TUs before starting the first scan iteration + * @reserved: for future use and alignment + * @preq: probe request with IEs blocks + * @direct_scan: list of SSIDs for directed active scan + */ +struct iwl_scan_req_umac_tail { + /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ + struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; + __le16 delay; + __le16 reserved; + /* SCAN_PROBE_PARAMS_API_S_VER_1 */ + struct iwl_scan_probe_req preq; + struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; +} __packed; + +/** + * struct iwl_scan_req_umac + * @flags: &enum iwl_umac_scan_flags + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @ooc_priority: out of channel priority - &enum iwl_scan_priority + * @general_flags: &enum iwl_umac_scan_general_flags + * @reserved2: for future use and alignment + * @scan_start_mac_id: report the scan start TSF time according to this mac TSF + * @extended_dwell: dwell time for channels 1, 6 and 11 + * @active_dwell: dwell time for active scan + * @passive_dwell: dwell time for passive scan + * @fragmented_dwell: dwell time for fragmented passive scan + * @max_out_time: max out of serving channel time, per LMAC - for CDB there + * are 2 LMACs + * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs + * @scan_priority: scan internal prioritization &enum iwl_scan_priority + * @channel_flags: &enum iwl_scan_channel_flags + * @n_channels: num of channels in scan request + * @reserved: for future use and alignment + * @data: &struct iwl_scan_channel_cfg_umac and + * &struct iwl_scan_req_umac_tail + */ +struct iwl_scan_req_umac { + __le32 flags; + __le32 uid; + __le32 ooc_priority; + /* SCAN_GENERAL_PARAMS_API_S_VER_4 */ + __le16 general_flags; + u8 reserved2; + u8 scan_start_mac_id; + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + union { + struct { + __le32 max_out_time; + __le32 suspend_time; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved; + u8 data[]; + } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ + struct { + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved; + u8 data[]; + } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ + }; +} __packed; + +#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(__le32)) + +/** + * struct iwl_umac_scan_abort + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @flags: reserved + */ +struct iwl_umac_scan_abort { + __le32 uid; + __le32 flags; +} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ + +/** + * struct iwl_umac_scan_complete + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @last_schedule: last scheduling line + * @last_iter: last scan iteration number + * @status: &enum iwl_scan_offload_complete_status + * @ebs_status: &enum iwl_scan_ebs_status + * @time_from_last_iter: time elapsed from last iteration + * @reserved: for future use + */ +struct iwl_umac_scan_complete { + __le32 uid; + u8 last_schedule; + u8 last_iter; + u8 status; + u8 ebs_status; + __le32 time_from_last_iter; + __le32 reserved; +} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ + +#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5 +/** + * struct iwl_scan_offload_profile_match - match information + * @bssid: matched bssid + * @reserved: reserved + * @channel: channel where the match occurred + * @energy: energy + * @matching_feature: feature matches + * @matching_channels: bitmap of channels that matched, referencing + * the channels passed in tue scan offload request + */ +struct iwl_scan_offload_profile_match { + u8 bssid[ETH_ALEN]; + __le16 reserved; + u8 channel; + u8 energy; + u8 matching_feature; + u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; +} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ + +/** + * struct iwl_scan_offload_profiles_query - match results query response + * @matched_profiles: bitmap of matched profiles, referencing the + * matches passed in the scan offload request + * @last_scan_age: age of the last offloaded scan + * @n_scans_done: number of offloaded scans done + * @gp2_d0u: GP2 when D0U occurred + * @gp2_invoked: GP2 when scan offload was invoked + * @resume_while_scanning: not used + * @self_recovery: obsolete + * @reserved: reserved + * @matches: array of match information, one for each match + */ +struct iwl_scan_offload_profiles_query { + __le32 matched_profiles; + __le32 last_scan_age; + __le32 n_scans_done; + __le32 gp2_d0u; + __le32 gp2_invoked; + u8 resume_while_scanning; + u8 self_recovery; + __le16 reserved; + struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; +} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ + +/** + * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration + * @uid: scan id, &enum iwl_umac_scan_uid_offsets + * @scanned_channels: number of channels scanned and number of valid elements in + * results array + * @status: one of SCAN_COMP_STATUS_* + * @bt_status: BT on/off status + * @last_channel: last channel that was scanned + * @start_tsf: TSF timer in usecs of the scan start time for the mac specified + * in &struct iwl_scan_req_umac. + * @results: array of scan results, length in @scanned_channels + */ +struct iwl_umac_scan_iter_complete_notif { + __le32 uid; + u8 scanned_channels; + u8 status; + u8 bt_status; + u8 last_channel; + __le64 start_tsf; + struct iwl_scan_results_notif results[]; +} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ + +#endif /* __iwl_fw_api_scan_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h new file mode 100644 index 000000000000..e517b55f1bc6 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h @@ -0,0 +1,138 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_sf_h__ +#define __iwl_fw_api_sf_h__ + +/* Smart Fifo state */ +enum iwl_sf_state { + SF_LONG_DELAY_ON = 0, /* should never be called by driver */ + SF_FULL_ON, + SF_UNINIT, + SF_INIT_OFF, + SF_HW_NUM_STATES +}; + +/* Smart Fifo possible scenario */ +enum iwl_sf_scenario { + SF_SCENARIO_SINGLE_UNICAST, + SF_SCENARIO_AGG_UNICAST, + SF_SCENARIO_MULTICAST, + SF_SCENARIO_BA_RESP, + SF_SCENARIO_TX_RESP, + SF_NUM_SCENARIO +}; + +#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ +#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ + +/* smart FIFO default values */ +#define SF_W_MARK_SISO 6144 +#define SF_W_MARK_MIMO2 8192 +#define SF_W_MARK_MIMO3 6144 +#define SF_W_MARK_LEGACY 4096 +#define SF_W_MARK_SCAN 4096 + +/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ +#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ +#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ +#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ + +/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ +#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ +#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ +#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ +#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ +#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ +#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ +#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ + +#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ + +#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) + +/** + * struct iwl_sf_cfg_cmd - Smart Fifo configuration command. + * @state: smart fifo state, types listed in &enum iwl_sf_state. + * @watermark: Minimum allowed available free space in RXF for transient state. + * @long_delay_timeouts: aging and idle timer values for each scenario + * in long delay state. + * @full_on_timeouts: timer values for each scenario in full on state. + */ +struct iwl_sf_cfg_cmd { + __le32 state; + __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; + __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; + __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; +} __packed; /* SF_CFG_API_S_VER_2 */ + +#endif /* __iwl_fw_api_sf_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h new file mode 100644 index 000000000000..af369eba3795 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h @@ -0,0 +1,573 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_api_sta_h__ +#define __iwl_fw_api_sta_h__ + +/** + * enum iwl_sta_flags - flags for the ADD_STA host command + * @STA_FLG_REDUCED_TX_PWR_CTRL: reduced TX power (control frames) + * @STA_FLG_REDUCED_TX_PWR_DATA: reduced TX power (data frames) + * @STA_FLG_DISABLE_TX: set if TX should be disabled + * @STA_FLG_PS: set if STA is in Power Save + * @STA_FLG_INVALID: set if STA is invalid + * @STA_FLG_DLP_EN: Direct Link Protocol is enabled + * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs + * @STA_FLG_DRAIN_FLOW: drain flow + * @STA_FLG_PAN: STA is for PAN interface + * @STA_FLG_CLASS_AUTH: station is authenticated + * @STA_FLG_CLASS_ASSOC: station is associated + * @STA_FLG_RTS_MIMO_PROT: station requires RTS MIMO protection (dynamic SMPS) + * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU (mask) + * @STA_FLG_MAX_AGG_SIZE_SHIFT: maximal size for A-MPDU (bit shift) + * @STA_FLG_MAX_AGG_SIZE_8K: maximal size for A-MPDU (8k supported) + * @STA_FLG_MAX_AGG_SIZE_16K: maximal size for A-MPDU (16k supported) + * @STA_FLG_MAX_AGG_SIZE_32K: maximal size for A-MPDU (32k supported) + * @STA_FLG_MAX_AGG_SIZE_64K: maximal size for A-MPDU (64k supported) + * @STA_FLG_MAX_AGG_SIZE_128K: maximal size for A-MPDU (128k supported) + * @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported) + * @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported) + * @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported) + * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation + * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is + * initialised by driver and can be updated by fw upon reception of + * action frames that can change the channel width. When cleared the fw + * will send all the frames in 20MHz even when FAT channel is requested. + * @STA_FLG_FAT_EN_20MHZ: no wide channels are supported, only 20 MHz + * @STA_FLG_FAT_EN_40MHZ: wide channels up to 40 MHz supported + * @STA_FLG_FAT_EN_80MHZ: wide channels up to 80 MHz supported + * @STA_FLG_FAT_EN_160MHZ: wide channels up to 160 MHz supported + * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the + * driver and can be updated by fw upon reception of action frames. + * @STA_FLG_MIMO_EN_SISO: no support for MIMO + * @STA_FLG_MIMO_EN_MIMO2: 2 streams supported + * @STA_FLG_MIMO_EN_MIMO3: 3 streams supported + * @STA_FLG_MFP_EN: Management Frame Protection + * @STA_FLG_AGG_MPDU_DENS_MSK: A-MPDU density (mask) + * @STA_FLG_AGG_MPDU_DENS_SHIFT: A-MPDU density (bit shift) + * @STA_FLG_AGG_MPDU_DENS_2US: A-MPDU density (2 usec gap) + * @STA_FLG_AGG_MPDU_DENS_4US: A-MPDU density (4 usec gap) + * @STA_FLG_AGG_MPDU_DENS_8US: A-MPDU density (8 usec gap) + * @STA_FLG_AGG_MPDU_DENS_16US: A-MPDU density (16 usec gap) + */ +enum iwl_sta_flags { + STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3), + STA_FLG_REDUCED_TX_PWR_DATA = BIT(6), + + STA_FLG_DISABLE_TX = BIT(4), + + STA_FLG_PS = BIT(8), + STA_FLG_DRAIN_FLOW = BIT(12), + STA_FLG_PAN = BIT(13), + STA_FLG_CLASS_AUTH = BIT(14), + STA_FLG_CLASS_ASSOC = BIT(15), + STA_FLG_RTS_MIMO_PROT = BIT(17), + + STA_FLG_MAX_AGG_SIZE_SHIFT = 19, + STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), + STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), + + STA_FLG_AGG_MPDU_DENS_SHIFT = 23, + STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), + STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), + + STA_FLG_FAT_EN_20MHZ = (0 << 26), + STA_FLG_FAT_EN_40MHZ = (1 << 26), + STA_FLG_FAT_EN_80MHZ = (2 << 26), + STA_FLG_FAT_EN_160MHZ = (3 << 26), + STA_FLG_FAT_EN_MSK = (3 << 26), + + STA_FLG_MIMO_EN_SISO = (0 << 28), + STA_FLG_MIMO_EN_MIMO2 = (1 << 28), + STA_FLG_MIMO_EN_MIMO3 = (2 << 28), + STA_FLG_MIMO_EN_MSK = (3 << 28), +}; + +/** + * enum iwl_sta_key_flag - key flags for the ADD_STA host command + * @STA_KEY_FLG_NO_ENC: no encryption + * @STA_KEY_FLG_WEP: WEP encryption algorithm + * @STA_KEY_FLG_CCM: CCMP encryption algorithm + * @STA_KEY_FLG_TKIP: TKIP encryption algorithm + * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support) + * @STA_KEY_FLG_GCMP: GCMP encryption algorithm + * @STA_KEY_FLG_CMAC: CMAC encryption algorithm + * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm + * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value + * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from + * station info array (1 - n 1X mode) + * @STA_KEY_FLG_KEYID_MSK: the index of the key + * @STA_KEY_FLG_KEYID_POS: key index bit position + * @STA_KEY_NOT_VALID: key is invalid + * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key + * @STA_KEY_FLG_KEY_32BYTES: for non-wep key set for 32 bytes key + * @STA_KEY_MULTICAST: set for multical key + * @STA_KEY_MFP: key is used for Management Frame Protection + */ +enum iwl_sta_key_flag { + STA_KEY_FLG_NO_ENC = (0 << 0), + STA_KEY_FLG_WEP = (1 << 0), + STA_KEY_FLG_CCM = (2 << 0), + STA_KEY_FLG_TKIP = (3 << 0), + STA_KEY_FLG_EXT = (4 << 0), + STA_KEY_FLG_GCMP = (5 << 0), + STA_KEY_FLG_CMAC = (6 << 0), + STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), + STA_KEY_FLG_EN_MSK = (7 << 0), + + STA_KEY_FLG_WEP_KEY_MAP = BIT(3), + STA_KEY_FLG_KEYID_POS = 8, + STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), + STA_KEY_NOT_VALID = BIT(11), + STA_KEY_FLG_WEP_13BYTES = BIT(12), + STA_KEY_FLG_KEY_32BYTES = BIT(12), + STA_KEY_MULTICAST = BIT(14), + STA_KEY_MFP = BIT(15), +}; + +/** + * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed + * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue + * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx + * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs + * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid + * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid + * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count + * @STA_MODIFY_PROT_TH: modify RTS threshold + * @STA_MODIFY_QUEUES: modify the queues used by this station + */ +enum iwl_sta_modify_flag { + STA_MODIFY_QUEUE_REMOVAL = BIT(0), + STA_MODIFY_TID_DISABLE_TX = BIT(1), + STA_MODIFY_UAPSD_ACS = BIT(2), + STA_MODIFY_ADD_BA_TID = BIT(3), + STA_MODIFY_REMOVE_BA_TID = BIT(4), + STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5), + STA_MODIFY_PROT_TH = BIT(6), + STA_MODIFY_QUEUES = BIT(7), +}; + +/** + * enum iwl_sta_mode - station command mode + * @STA_MODE_ADD: add new station + * @STA_MODE_MODIFY: modify the station + */ +enum iwl_sta_mode { + STA_MODE_ADD = 0, + STA_MODE_MODIFY = 1, +}; + +/** + * enum iwl_sta_sleep_flag - type of sleep of the station + * @STA_SLEEP_STATE_AWAKE: station is awake + * @STA_SLEEP_STATE_PS_POLL: station is PS-polling + * @STA_SLEEP_STATE_UAPSD: station uses U-APSD + * @STA_SLEEP_STATE_MOREDATA: set more-data bit on + * (last) released frame + */ +enum iwl_sta_sleep_flag { + STA_SLEEP_STATE_AWAKE = 0, + STA_SLEEP_STATE_PS_POLL = BIT(0), + STA_SLEEP_STATE_UAPSD = BIT(1), + STA_SLEEP_STATE_MOREDATA = BIT(2), +}; + +#define STA_KEY_MAX_NUM (16) +#define STA_KEY_IDX_INVALID (0xff) +#define STA_KEY_MAX_DATA_KEY_NUM (4) +#define IWL_MAX_GLOBAL_KEYS (4) +#define STA_KEY_LEN_WEP40 (5) +#define STA_KEY_LEN_WEP104 (13) + +/** + * struct iwl_mvm_keyinfo - key information + * @key_flags: type &enum iwl_sta_key_flag + * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection + * @reserved1: reserved + * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx + * @key_offset: key offset in the fw's key table + * @reserved2: reserved + * @key: 16-byte unicast decryption key + * @tx_secur_seq_cnt: initial RSC / PN needed for replay check + * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only + * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only + */ +struct iwl_mvm_keyinfo { + __le16 key_flags; + u8 tkip_rx_tsc_byte2; + u8 reserved1; + __le16 tkip_rx_ttak[5]; + u8 key_offset; + u8 reserved2; + u8 key[16]; + __le64 tx_secur_seq_cnt; + __le64 hw_tkip_mic_rx_key; + __le64 hw_tkip_mic_tx_key; +} __packed; + +#define IWL_ADD_STA_STATUS_MASK 0xFF +#define IWL_ADD_STA_BAID_VALID_MASK 0x8000 +#define IWL_ADD_STA_BAID_MASK 0x7F00 +#define IWL_ADD_STA_BAID_SHIFT 8 + +/** + * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. + * ( REPLY_ADD_STA = 0x18 ) + * @add_modify: see &enum iwl_sta_mode + * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. + * @mac_id_n_color: the Mac context this station belongs to, + * see &enum iwl_ctxt_id_and_color + * @addr: station's MAC address + * @reserved2: reserved + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave + * alone. 1 - modify, 0 - don't change. + * @reserved3: reserved + * @station_flags: look at &enum iwl_sta_flags + * @station_flags_msk: what of %station_flags have changed, + * also &enum iwl_sta_flags + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set + * add_immediate_ba_ssn. + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) + * Set %STA_MODIFY_REMOVE_BA_TID to use this field + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with + * add_immediate_ba_tid. + * @sleep_tx_count: number of packets to transmit to station even though it is + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode + * keeps track of STA sleep state. + * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP + * mac-addr. + * @beamform_flags: beam forming controls + * @tfd_queue_msk: tfd queues used by this station + * + * The device contains an internal table of per-station information, with info + * on security keys, aggregation parameters, and Tx rates for initial Tx + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). + * + * ADD_STA sets up the table entry for one station, either creating a new + * entry, or modifying a pre-existing one. + */ +struct iwl_mvm_add_sta_cmd_v7 { + u8 add_modify; + u8 awake_acs; + __le16 tid_disable_tx; + __le32 mac_id_n_color; + u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + __le32 station_flags; + __le32 station_flags_msk; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + __le16 sleep_state_flags; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; +} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + +/** + * enum iwl_sta_type - FW station types + * ( REPLY_ADD_STA = 0x18 ) + * @IWL_STA_LINK: Link station - normal RX and TX traffic. + * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons + * and probe responses. + * @IWL_STA_MULTICAST: multicast traffic, + * @IWL_STA_TDLS_LINK: TDLS link station + * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on). + */ +enum iwl_sta_type { + IWL_STA_LINK, + IWL_STA_GENERAL_PURPOSE, + IWL_STA_MULTICAST, + IWL_STA_TDLS_LINK, + IWL_STA_AUX_ACTIVITY, +}; + +/** + * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. + * ( REPLY_ADD_STA = 0x18 ) + * @add_modify: see &enum iwl_sta_mode + * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. + * @mac_id_n_color: the Mac context this station belongs to, + * see &enum iwl_ctxt_id_and_color + * @addr: station's MAC address + * @reserved2: reserved + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave + * alone. 1 - modify, 0 - don't change. + * @reserved3: reserved + * @station_flags: look at &enum iwl_sta_flags + * @station_flags_msk: what of %station_flags have changed, + * also &enum iwl_sta_flags + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set + * add_immediate_ba_ssn. + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) + * Set %STA_MODIFY_REMOVE_BA_TID to use this field + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with + * add_immediate_ba_tid. + * @sleep_tx_count: number of packets to transmit to station even though it is + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode + * keeps track of STA sleep state. + * @station_type: type of this station. See &enum iwl_sta_type. + * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP + * mac-addr. + * @beamform_flags: beam forming controls + * @tfd_queue_msk: tfd queues used by this station. + * Obselete for new TX API (9 and above). + * @rx_ba_window: aggregation window size + * @sp_length: the size of the SP as it appears in the WME IE + * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver + * enabled ACs. + * + * The device contains an internal table of per-station information, with info + * on security keys, aggregation parameters, and Tx rates for initial Tx + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). + * + * ADD_STA sets up the table entry for one station, either creating a new + * entry, or modifying a pre-existing one. + */ +struct iwl_mvm_add_sta_cmd { + u8 add_modify; + u8 awake_acs; + __le16 tid_disable_tx; + __le32 mac_id_n_color; + u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + __le32 station_flags; + __le32 station_flags_msk; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + u8 sleep_state_flags; + u8 station_type; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; + __le16 rx_ba_window; + u8 sp_length; + u8 uapsd_acs; +} __packed; /* ADD_STA_CMD_API_S_VER_10 */ + +/** + * struct iwl_mvm_add_sta_key_common - add/modify sta key common part + * ( REPLY_ADD_STA_KEY = 0x17 ) + * @sta_id: index of station in uCode's station table + * @key_offset: key offset in key storage + * @key_flags: type &enum iwl_sta_key_flag + * @key: key material data + * @rx_secur_seq_cnt: RX security sequence counter for the key + */ +struct iwl_mvm_add_sta_key_common { + u8 sta_id; + u8 key_offset; + __le16 key_flags; + u8 key[32]; + u8 rx_secur_seq_cnt[16]; +} __packed; + +/** + * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key + * @common: see &struct iwl_mvm_add_sta_key_common + * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection + * @reserved: reserved + * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx + */ +struct iwl_mvm_add_sta_key_cmd_v1 { + struct iwl_mvm_add_sta_key_common common; + u8 tkip_rx_tsc_byte2; + u8 reserved; + __le16 tkip_rx_ttak[5]; +} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ + +/** + * struct iwl_mvm_add_sta_key_cmd - add/modify sta key + * @common: see &struct iwl_mvm_add_sta_key_common + * @rx_mic_key: TKIP RX unicast or multicast key + * @tx_mic_key: TKIP TX key + * @transmit_seq_cnt: TSC, transmit packet number + */ +struct iwl_mvm_add_sta_key_cmd { + struct iwl_mvm_add_sta_key_common common; + __le64 rx_mic_key; + __le64 tx_mic_key; + __le64 transmit_seq_cnt; +} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */ + +/** + * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command + * @ADD_STA_SUCCESS: operation was executed successfully + * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table + * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session + * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that + * doesn't exist. + */ +enum iwl_mvm_add_sta_rsp_status { + ADD_STA_SUCCESS = 0x1, + ADD_STA_STATIONS_OVERLOAD = 0x2, + ADD_STA_IMMEDIATE_BA_FAILURE = 0x4, + ADD_STA_MODIFY_NON_EXISTING_STA = 0x8, +}; + +/** + * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table + * ( REMOVE_STA = 0x19 ) + * @sta_id: the station id of the station to be removed + * @reserved: reserved + */ +struct iwl_mvm_rm_sta_cmd { + u8 sta_id; + u8 reserved[3]; +} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ + +/** + * struct iwl_mvm_mgmt_mcast_key_cmd_v1 + * ( MGMT_MCAST_KEY = 0x1f ) + * @ctrl_flags: &enum iwl_sta_key_flag + * @igtk: IGTK key material + * @k1: unused + * @k2: unused + * @sta_id: station ID that support IGTK + * @key_id: key ID + * @receive_seq_cnt: initial RSC/PN needed for replay check + */ +struct iwl_mvm_mgmt_mcast_key_cmd_v1 { + __le32 ctrl_flags; + u8 igtk[16]; + u8 k1[16]; + u8 k2[16]; + __le32 key_id; + __le32 sta_id; + __le64 receive_seq_cnt; +} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ + +/** + * struct iwl_mvm_mgmt_mcast_key_cmd + * ( MGMT_MCAST_KEY = 0x1f ) + * @ctrl_flags: &enum iwl_sta_key_flag + * @igtk: IGTK master key + * @sta_id: station ID that support IGTK + * @key_id: key ID + * @receive_seq_cnt: initial RSC/PN needed for replay check + */ +struct iwl_mvm_mgmt_mcast_key_cmd { + __le32 ctrl_flags; + u8 igtk[32]; + __le32 key_id; + __le32 sta_id; + __le64 receive_seq_cnt; +} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */ + +struct iwl_mvm_wep_key { + u8 key_index; + u8 key_offset; + __le16 reserved1; + u8 key_size; + u8 reserved2[3]; + u8 key[16]; +} __packed; + +struct iwl_mvm_wep_key_cmd { + __le32 mac_id_n_color; + u8 num_keys; + u8 decryption_type; + u8 flags; + u8 reserved; + struct iwl_mvm_wep_key wep_key[0]; +} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ + +/** + * struct iwl_mvm_eosp_notification - EOSP notification from firmware + * @remain_frame_count: # of frames remaining, non-zero if SP was cut + * short by GO absence + * @sta_id: station ID + */ +struct iwl_mvm_eosp_notification { + __le32 remain_frame_count; + __le32 sta_id; +} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ + +#endif /* __iwl_fw_api_sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h new file mode 100644 index 000000000000..53cb622aa9ab --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h @@ -0,0 +1,474 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_stats_h__ +#define __iwl_fw_api_stats_h__ +#include "mac.h" + +struct mvm_statistics_dbg { + __le32 burst_check; + __le32 burst_count; + __le32 wait_for_silence_timeout_cnt; + u8 reserved[12]; +} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */ + +struct mvm_statistics_div { + __le32 tx_on_a; + __le32 tx_on_b; + __le32 exec_time; + __le32 probe_time; + __le32 rssi_ant; + __le32 reserved2; +} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ + +/** + * struct mvm_statistics_rx_non_phy + * @bogus_cts: CTS received when not expecting CTS + * @bogus_ack: ACK received when not expecting ACK + * @non_channel_beacons: beacons with our bss id but not on our serving channel + * @channel_beacons: beacons with our bss id and in our serving channel + * @num_missed_bcon: number of missed beacons + * @adc_rx_saturation_time: count in 0.8us units the time the ADC was in + * saturation + * @ina_detection_search_time: total time (in 0.8us) searched for INA + * @beacon_silence_rssi_a: RSSI silence after beacon frame + * @beacon_silence_rssi_b: RSSI silence after beacon frame + * @beacon_silence_rssi_c: RSSI silence after beacon frame + * @interference_data_flag: flag for interference data availability. 1 when data + * is available. + * @channel_load: counts RX Enable time in uSec + * @beacon_rssi_a: beacon RSSI on anntena A + * @beacon_rssi_b: beacon RSSI on antenna B + * @beacon_rssi_c: beacon RSSI on antenna C + * @beacon_energy_a: beacon energy on antenna A + * @beacon_energy_b: beacon energy on antenna B + * @beacon_energy_c: beacon energy on antenna C + * @num_bt_kills: number of BT "kills" (frame TX aborts) + * @mac_id: mac ID + */ +struct mvm_statistics_rx_non_phy { + __le32 bogus_cts; + __le32 bogus_ack; + __le32 non_channel_beacons; + __le32 channel_beacons; + __le32 num_missed_bcon; + __le32 adc_rx_saturation_time; + __le32 ina_detection_search_time; + __le32 beacon_silence_rssi_a; + __le32 beacon_silence_rssi_b; + __le32 beacon_silence_rssi_c; + __le32 interference_data_flag; + __le32 channel_load; + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; + __le32 num_bt_kills; + __le32 mac_id; +} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_4 */ + +struct mvm_statistics_rx_non_phy_v3 { + __le32 bogus_cts; /* CTS received when not expecting CTS */ + __le32 bogus_ack; /* ACK received when not expecting ACK */ + __le32 non_bssid_frames; /* number of frames with BSSID that + * doesn't belong to the STA BSSID */ + __le32 filtered_frames; /* count frames that were dumped in the + * filtering process */ + __le32 non_channel_beacons; /* beacons with our bss id but not on + * our serving channel */ + __le32 channel_beacons; /* beacons with our bss id and in our + * serving channel */ + __le32 num_missed_bcon; /* number of missed beacons */ + __le32 adc_rx_saturation_time; /* count in 0.8us units the time the + * ADC was in saturation */ + __le32 ina_detection_search_time;/* total time (in 0.8us) searched + * for INA */ + __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ + __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ + __le32 interference_data_flag; /* flag for interference data + * availability. 1 when data is + * available. */ + __le32 channel_load; /* counts RX Enable time in uSec */ + __le32 dsp_false_alarms; /* DSP false alarm (both OFDM + * and CCK) counter */ + __le32 beacon_rssi_a; + __le32 beacon_rssi_b; + __le32 beacon_rssi_c; + __le32 beacon_energy_a; + __le32 beacon_energy_b; + __le32 beacon_energy_c; + __le32 num_bt_kills; + __le32 mac_id; + __le32 directed_data_mpdu; +} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */ + +struct mvm_statistics_rx_phy { + __le32 unresponded_rts; + __le32 rxe_frame_lmt_overrun; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 reserved; +} __packed; /* STATISTICS_RX_PHY_API_S_VER_3 */ + +struct mvm_statistics_rx_phy_v2 { + __le32 ina_cnt; + __le32 fina_cnt; + __le32 plcp_err; + __le32 crc32_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 false_alarm_cnt; + __le32 fina_sync_err_cnt; + __le32 sfd_timeout; + __le32 fina_timeout; + __le32 unresponded_rts; + __le32 rxe_frame_lmt_overrun; + __le32 sent_ack_cnt; + __le32 sent_cts_cnt; + __le32 sent_ba_rsp_cnt; + __le32 dsp_self_kill; + __le32 mh_format_err; + __le32 re_acq_main_rssi_sum; + __le32 reserved; +} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */ + +struct mvm_statistics_rx_ht_phy_v1 { + __le32 plcp_err; + __le32 overrun_err; + __le32 early_overrun_err; + __le32 crc32_good; + __le32 crc32_err; + __le32 mh_format_err; + __le32 agg_crc32_good; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */ + +struct mvm_statistics_rx_ht_phy { + __le32 mh_format_err; + __le32 agg_mpdu_cnt; + __le32 agg_cnt; + __le32 unsupport_mcs; +} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_2 */ + +struct mvm_statistics_tx_non_phy_v3 { + __le32 preamble_cnt; + __le32 rx_detected_cnt; + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 expected_ack_cnt; + __le32 actual_ack_cnt; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; +} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */ + +struct mvm_statistics_tx_non_phy { + __le32 bt_prio_defer_cnt; + __le32 bt_prio_kill_cnt; + __le32 few_bytes_cnt; + __le32 cts_timeout; + __le32 ack_timeout; + __le32 dump_msdu_cnt; + __le32 burst_abort_next_frame_mismatch_cnt; + __le32 burst_abort_missing_next_frame_cnt; + __le32 cts_timeout_collision; + __le32 ack_or_ba_timeout_collision; +} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_4 */ + +#define MAX_CHAINS 3 + +struct mvm_statistics_tx_non_phy_agg { + __le32 ba_timeout; + __le32 ba_reschedule_frames; + __le32 scd_query_agg_frame_cnt; + __le32 scd_query_no_agg; + __le32 scd_query_agg; + __le32 scd_query_mismatch; + __le32 frame_not_ready; + __le32 underrun; + __le32 bt_prio_kill; + __le32 rx_ba_rsp_cnt; + __s8 txpower[MAX_CHAINS]; + __s8 reserved; + __le32 reserved2; +} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */ + +struct mvm_statistics_tx_channel_width { + __le32 ext_cca_narrow_ch20[1]; + __le32 ext_cca_narrow_ch40[2]; + __le32 ext_cca_narrow_ch80[3]; + __le32 ext_cca_narrow_ch160[4]; + __le32 last_tx_ch_width_indx; + __le32 rx_detected_per_ch_width[4]; + __le32 success_per_ch_width[4]; + __le32 fail_per_ch_width[4]; +}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */ + +struct mvm_statistics_tx_v4 { + struct mvm_statistics_tx_non_phy_v3 general; + struct mvm_statistics_tx_non_phy_agg agg; + struct mvm_statistics_tx_channel_width channel_width; +} __packed; /* STATISTICS_TX_API_S_VER_4 */ + +struct mvm_statistics_tx { + struct mvm_statistics_tx_non_phy general; + struct mvm_statistics_tx_non_phy_agg agg; + struct mvm_statistics_tx_channel_width channel_width; +} __packed; /* STATISTICS_TX_API_S_VER_5 */ + + +struct mvm_statistics_bt_activity { + __le32 hi_priority_tx_req_cnt; + __le32 hi_priority_tx_denied_cnt; + __le32 lo_priority_tx_req_cnt; + __le32 lo_priority_tx_denied_cnt; + __le32 hi_priority_rx_req_cnt; + __le32 hi_priority_rx_denied_cnt; + __le32 lo_priority_rx_req_cnt; + __le32 lo_priority_rx_denied_cnt; +} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ + +struct mvm_statistics_general_common_v19 { + __le32 radio_temperature; + __le32 radio_voltage; + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div slow_div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; + __le32 beacon_filtered; + __le32 missed_beacons; + u8 beacon_filter_average_energy; + u8 beacon_filter_reason; + u8 beacon_filter_current_energy; + u8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; + __le64 rx_time; + __le64 on_time_rf; + __le64 on_time_scan; + __le64 tx_time; +} __packed; + +struct mvm_statistics_general_common { + __le32 radio_temperature; + struct mvm_statistics_dbg dbg; + __le32 sleep_time; + __le32 slots_out; + __le32 slots_idle; + __le32 ttl_timestamp; + struct mvm_statistics_div slow_div; + __le32 rx_enable_counter; + /* + * num_of_sos_states: + * count the number of times we have to re-tune + * in order to get out of bad PHY status + */ + __le32 num_of_sos_states; + __le32 beacon_filtered; + __le32 missed_beacons; + u8 beacon_filter_average_energy; + u8 beacon_filter_reason; + u8 beacon_filter_current_energy; + u8 beacon_filter_reserved; + __le32 beacon_filter_delta_time; + struct mvm_statistics_bt_activity bt_activity; + __le64 rx_time; + __le64 on_time_rf; + __le64 on_time_scan; + __le64 tx_time; +} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */ + +struct mvm_statistics_general_v8 { + struct mvm_statistics_general_common_v19 common; + __le32 beacon_counter[NUM_MAC_INDEX]; + u8 beacon_average_energy[NUM_MAC_INDEX]; + u8 reserved[4 - (NUM_MAC_INDEX % 4)]; +} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ + +struct mvm_statistics_general_cdb_v9 { + struct mvm_statistics_general_common_v19 common; + __le32 beacon_counter[NUM_MAC_INDEX_CDB]; + u8 beacon_average_energy[NUM_MAC_INDEX_CDB]; + u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)]; +} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */ + +struct mvm_statistics_general_cdb { + struct mvm_statistics_general_common common; + __le32 beacon_counter[MAC_INDEX_AUX]; + u8 beacon_average_energy[MAC_INDEX_AUX]; + u8 reserved[8 - MAC_INDEX_AUX]; +} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */ + +/** + * struct mvm_statistics_load - RX statistics for multi-queue devices + * @air_time: accumulated air time, per mac + * @byte_count: accumulated byte count, per mac + * @pkt_count: accumulated packet count, per mac + * @avg_energy: average RSSI, per station + */ +struct mvm_statistics_load { + __le32 air_time[MAC_INDEX_AUX]; + __le32 byte_count[MAC_INDEX_AUX]; + __le32 pkt_count[MAC_INDEX_AUX]; + u8 avg_energy[IWL_MVM_STATION_COUNT]; +} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */ + +struct mvm_statistics_load_v1 { + __le32 air_time[NUM_MAC_INDEX]; + __le32 byte_count[NUM_MAC_INDEX]; + __le32 pkt_count[NUM_MAC_INDEX]; + u8 avg_energy[IWL_MVM_STATION_COUNT]; +} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ + +struct mvm_statistics_rx { + struct mvm_statistics_rx_phy ofdm; + struct mvm_statistics_rx_phy cck; + struct mvm_statistics_rx_non_phy general; + struct mvm_statistics_rx_ht_phy ofdm_ht; +} __packed; /* STATISTICS_RX_API_S_VER_4 */ + +struct mvm_statistics_rx_v3 { + struct mvm_statistics_rx_phy_v2 ofdm; + struct mvm_statistics_rx_phy_v2 cck; + struct mvm_statistics_rx_non_phy_v3 general; + struct mvm_statistics_rx_ht_phy_v1 ofdm_ht; +} __packed; /* STATISTICS_RX_API_S_VER_3 */ + +/* + * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) + * + * By default, uCode issues this notification after receiving a beacon + * while associated. To disable this behavior, set DISABLE_NOTIF flag in the + * STATISTICS_CMD (0x9c), below. + */ + +struct iwl_notif_statistics_v10 { + __le32 flag; + struct mvm_statistics_rx_v3 rx; + struct mvm_statistics_tx_v4 tx; + struct mvm_statistics_general_v8 general; +} __packed; /* STATISTICS_NTFY_API_S_VER_10 */ + +struct iwl_notif_statistics_v11 { + __le32 flag; + struct mvm_statistics_rx_v3 rx; + struct mvm_statistics_tx_v4 tx; + struct mvm_statistics_general_v8 general; + struct mvm_statistics_load_v1 load_stats; +} __packed; /* STATISTICS_NTFY_API_S_VER_11 */ + +struct iwl_notif_statistics_cdb { + __le32 flag; + struct mvm_statistics_rx rx; + struct mvm_statistics_tx tx; + struct mvm_statistics_general_cdb general; + struct mvm_statistics_load load_stats; +} __packed; /* STATISTICS_NTFY_API_S_VER_13 */ + +/** + * enum iwl_statistics_notif_flags - flags used in statistics notification + * @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report + */ +enum iwl_statistics_notif_flags { + IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1, +}; + +/** + * enum iwl_statistics_cmd_flags - flags used in statistics command + * @IWL_STATISTICS_FLG_CLEAR: request to clear statistics after the report + * that's sent after this command + * @IWL_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics + * notifications + */ +enum iwl_statistics_cmd_flags { + IWL_STATISTICS_FLG_CLEAR = 0x1, + IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2, +}; + +/** + * struct iwl_statistics_cmd - statistics config command + * @flags: flags from &enum iwl_statistics_cmd_flags + */ +struct iwl_statistics_cmd { + __le32 flags; +} __packed; /* STATISTICS_CMD_API_S_VER_1 */ + +#endif /* __iwl_fw_api_stats_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h new file mode 100644 index 000000000000..7c6c2462d0e8 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h @@ -0,0 +1,208 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_tdls_h__ +#define __iwl_fw_api_tdls_h__ + +#include "fw/api/tx.h" +#include "fw/api/phy-ctxt.h" + +#define IWL_MVM_TDLS_STA_COUNT 4 + +/* Type of TDLS request */ +enum iwl_tdls_channel_switch_type { + TDLS_SEND_CHAN_SW_REQ = 0, + TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, + TDLS_MOVE_CH, +}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch + * @frame_timestamp: GP2 timestamp of channel-switch request/response packet + * received from peer + * @max_offchan_duration: What amount of microseconds out of a DTIM is given + * to the TDLS off-channel communication. For instance if the DTIM is + * 200TU and the TDLS peer is to be given 25% of the time, the value + * given will be 50TU, or 50 * 1024 if translated into microseconds. + * @switch_time: switch time the peer sent in its channel switch timing IE + * @switch_timeout: switch timeout the peer sent in its channel switch timing IE + */ +struct iwl_tdls_channel_switch_timing { + __le32 frame_timestamp; /* GP2 time of peer packet Rx */ + __le32 max_offchan_duration; /* given in micro-seconds */ + __le32 switch_time; /* given in micro-seconds */ + __le32 switch_timeout; /* given in micro-seconds */ +} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ + +#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 + +/** + * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template + * + * A template representing a TDLS channel-switch request or response frame + * + * @switch_time_offset: offset to the channel switch timing IE in the template + * @tx_cmd: Tx parameters for the frame + * @data: frame data + */ +struct iwl_tdls_channel_switch_frame { + __le32 switch_time_offset; + struct iwl_tx_cmd tx_cmd; + u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command + * + * The command is sent to initiate a channel switch and also in response to + * incoming TDLS channel-switch request/response packets from remote peers. + * + * @switch_type: see &enum iwl_tdls_channel_switch_type + * @peer_sta_id: station id of TDLS peer + * @ci: channel we switch to + * @timing: timing related data for command + * @frame: channel-switch request/response template, depending to switch_type + */ +struct iwl_tdls_channel_switch_cmd { + u8 switch_type; + __le32 peer_sta_id; + struct iwl_fw_channel_info ci; + struct iwl_tdls_channel_switch_timing timing; + struct iwl_tdls_channel_switch_frame frame; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ + +/** + * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification + * + * @status: non-zero on success + * @offchannel_duration: duration given in microseconds + * @sta_id: peer currently performing the channel-switch with + */ +struct iwl_tdls_channel_switch_notif { + __le32 status; + __le32 offchannel_duration; + __le32 sta_id; +} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ + +/** + * struct iwl_tdls_sta_info - TDLS station info + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx + * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer + * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise + */ +struct iwl_tdls_sta_info { + u8 sta_id; + u8 tx_to_peer_tid; + __le16 tx_to_peer_ssn; + __le32 is_initiator; +} __packed; /* TDLS_STA_INFO_VER_1 */ + +/** + * struct iwl_tdls_config_cmd - TDLS basic config command + * + * @id_and_color: MAC id and color being configured + * @tdls_peer_count: amount of currently connected TDLS peers + * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx + * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP + * @sta_info: per-station info. Only the first tdls_peer_count entries are set + * @pti_req_data_offset: offset of network-level data for the PTI template + * @pti_req_tx_cmd: Tx parameters for PTI request template + * @pti_req_template: PTI request template data + */ +struct iwl_tdls_config_cmd { + __le32 id_and_color; /* mac id and color */ + u8 tdls_peer_count; + u8 tx_to_ap_tid; + __le16 tx_to_ap_ssn; + struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; + + __le32 pti_req_data_offset; + struct iwl_tx_cmd pti_req_tx_cmd; + u8 pti_req_template[0]; +} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ + +/** + * struct iwl_tdls_config_sta_info_res - TDLS per-station config information + * + * @sta_id: station id of the TDLS peer + * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to + * the peer + */ +struct iwl_tdls_config_sta_info_res { + __le16 sta_id; + __le16 tx_to_peer_last_seq; +} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ + +/** + * struct iwl_tdls_config_res - TDLS config information from FW + * + * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP + * @sta_info: per-station TDLS config information + */ +struct iwl_tdls_config_res { + __le32 tx_to_ap_last_seq; + struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; +} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_tdls_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h new file mode 100644 index 000000000000..3721a3ed358b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -0,0 +1,386 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_time_event_h__ +#define __iwl_fw_api_time_event_h__ + +#include "fw/api/phy-ctxt.h" + +/* Time Event types, according to MAC type */ +enum iwl_time_event_type { + /* BSS Station Events */ + TE_BSS_STA_AGGRESSIVE_ASSOC, + TE_BSS_STA_ASSOC, + TE_BSS_EAP_DHCP_PROT, + TE_BSS_QUIET_PERIOD, + + /* P2P Device Events */ + TE_P2P_DEVICE_DISCOVERABLE, + TE_P2P_DEVICE_LISTEN, + TE_P2P_DEVICE_ACTION_SCAN, + TE_P2P_DEVICE_FULL_SCAN, + + /* P2P Client Events */ + TE_P2P_CLIENT_AGGRESSIVE_ASSOC, + TE_P2P_CLIENT_ASSOC, + TE_P2P_CLIENT_QUIET_PERIOD, + + /* P2P GO Events */ + TE_P2P_GO_ASSOC_PROT, + TE_P2P_GO_REPETITIVET_NOA, + TE_P2P_GO_CT_WINDOW, + + /* WiDi Sync Events */ + TE_WIDI_TX_SYNC, + + /* Channel Switch NoA */ + TE_CHANNEL_SWITCH_PERIOD, + + TE_MAX +}; /* MAC_EVENT_TYPE_API_E_VER_1 */ + +/* Time event - defines for command API v1 */ + +/* + * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V1_FRAG_NONE = 0, + TE_V1_FRAG_SINGLE = 1, + TE_V1_FRAG_DUAL = 2, + TE_V1_FRAG_ENDLESS = 0xffffffff +}; + +/* If a Time Event can be fragmented, this is the max number of fragments */ +#define TE_V1_FRAG_MAX_MSK 0x0fffffff +/* Repeat the time event endlessly (until removed) */ +#define TE_V1_REPEAT_ENDLESS 0xffffffff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff + +/* Time Event dependencies: none, on another TE, or in a specific time */ +enum { + TE_V1_INDEPENDENT = 0, + TE_V1_DEP_OTHER = BIT(0), + TE_V1_DEP_TSF = BIT(1), + TE_V1_EVENT_SOCIOPATHIC = BIT(2), +}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ + +/* + * @TE_V1_NOTIF_NONE: no notifications + * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. + * + * Supported Time event notifications configuration. + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + */ +enum { + TE_V1_NOTIF_NONE = 0, + TE_V1_NOTIF_HOST_EVENT_START = BIT(0), + TE_V1_NOTIF_HOST_EVENT_END = BIT(1), + TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), + TE_V1_NOTIF_HOST_FRAG_START = BIT(4), + TE_V1_NOTIF_HOST_FRAG_END = BIT(5), + TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), +}; /* MAC_EVENT_ACTION_API_E_VER_2 */ + +/* Time event - defines for command API */ + +/* + * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. + * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only + * the first fragment is scheduled. + * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only + * the first 2 fragments are scheduled. + * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any + * number of fragments are valid. + * + * Other than the constant defined above, specifying a fragmentation value 'x' + * means that the event can be fragmented but only the first 'x' will be + * scheduled. + */ +enum { + TE_V2_FRAG_NONE = 0, + TE_V2_FRAG_SINGLE = 1, + TE_V2_FRAG_DUAL = 2, + TE_V2_FRAG_MAX = 0xfe, + TE_V2_FRAG_ENDLESS = 0xff +}; + +/* Repeat the time event endlessly (until removed) */ +#define TE_V2_REPEAT_ENDLESS 0xff +/* If a Time Event has bounded repetitions, this is the maximal value */ +#define TE_V2_REPEAT_MAX 0xfe + +#define TE_V2_PLACEMENT_POS 12 +#define TE_V2_ABSENCE_POS 15 + +/** + * enum iwl_time_event_policy - Time event policy values + * A notification (both event and fragment) includes a status indicating weather + * the FW was able to schedule the event or not. For fragment start/end + * notification the status is always success. There is no start/end fragment + * notification for monolithic events. + * + * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable + * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start + * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end + * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use + * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. + * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start + * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end + * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. + * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. + * @T2_V2_START_IMMEDIATELY: start time event immediately + * @TE_V2_DEP_OTHER: depends on another time event + * @TE_V2_DEP_TSF: depends on a specific time + * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC + * @TE_V2_ABSENCE: are we present or absent during the Time Event. + */ +enum iwl_time_event_policy { + TE_V2_DEFAULT_POLICY = 0x0, + + /* notifications (event start/stop, fragment start/stop) */ + TE_V2_NOTIF_HOST_EVENT_START = BIT(0), + TE_V2_NOTIF_HOST_EVENT_END = BIT(1), + TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), + TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), + + TE_V2_NOTIF_HOST_FRAG_START = BIT(4), + TE_V2_NOTIF_HOST_FRAG_END = BIT(5), + TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), + TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), + T2_V2_START_IMMEDIATELY = BIT(11), + + /* placement characteristics */ + TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), + TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), + TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), + + /* are we present or absent during the Time Event. */ + TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), +}; + +/** + * struct iwl_time_event_cmd - configuring Time Events + * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also + * with version 1. determined by IWL_UCODE_TLV_FLAGS) + * ( TIME_EVENT_CMD = 0x29 ) + * @id_and_color: ID and color of the relevant MAC, + * &enum iwl_ctxt_id_and_color + * @action: action to perform, one of &enum iwl_ctxt_action + * @id: this field has two meanings, depending on the action: + * If the action is ADD, then it means the type of event to add. + * For all other actions it is the unique event ID assigned when the + * event was added by the FW. + * @apply_time: When to start the Time Event (in GP2) + * @max_delay: maximum delay to event's start (apply time), in TU + * @depends_on: the unique ID of the event we depend on (if any) + * @interval: interval between repetitions, in TU + * @duration: duration of event in TU + * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS + * @max_frags: maximal number of fragments the Time Event can be divided to + * @policy: defines whether uCode shall notify the host or other uCode modules + * on event and/or fragment start and/or end + * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF + * TE_EVENT_SOCIOPATHIC + * using TE_ABSENCE and using TE_NOTIF_*, + * &enum iwl_time_event_policy + */ +struct iwl_time_event_cmd { + /* COMMON_INDEX_HDR_API_S_VER_1 */ + __le32 id_and_color; + __le32 action; + __le32 id; + /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ + __le32 apply_time; + __le32 max_delay; + __le32 depends_on; + __le32 interval; + __le32 duration; + u8 repeat; + u8 max_frags; + __le16 policy; +} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ + +/** + * struct iwl_time_event_resp - response structure to iwl_time_event_cmd + * @status: bit 0 indicates success, all others specify errors + * @id: the Time Event type + * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE + * @id_and_color: ID and color of the relevant MAC, + * &enum iwl_ctxt_id_and_color + */ +struct iwl_time_event_resp { + __le32 status; + __le32 id; + __le32 unique_id; + __le32 id_and_color; +} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ + +/** + * struct iwl_time_event_notif - notifications of time event start/stop + * ( TIME_EVENT_NOTIFICATION = 0x2a ) + * @timestamp: action timestamp in GP2 + * @session_id: session's unique id + * @unique_id: unique id of the Time Event itself + * @id_and_color: ID and color of the relevant MAC + * @action: &enum iwl_time_event_policy + * @status: true if scheduled, false otherwise (not executed) + */ +struct iwl_time_event_notif { + __le32 timestamp; + __le32 session_id; + __le32 unique_id; + __le32 id_and_color; + __le32 action; + __le32 status; +} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ + +/* + * Aux ROC command + * + * Command requests the firmware to create a time event for a certain duration + * and remain on the given channel. This is done by using the Aux framework in + * the FW. + * The command was first used for Hot Spot issues - but can be used regardless + * to Hot Spot. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @id_and_color: ID and color of the MAC + * @action: action to perform, one of FW_CTXT_ACTION_* + * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the + * event_unique_id should be the id of the time event assigned by ucode. + * Otherwise ignore the event_unique_id. + * @sta_id_and_color: station id and color, resumed during "Remain On Channel" + * activity. + * @channel_info: channel info + * @node_addr: Our MAC Address + * @reserved: reserved for alignment + * @apply_time: GP2 value to start (should always be the current GP2 value) + * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max + * time by which start of the event is allowed to be postponed. + * @duration: event duration in TU To calculate event duration: + * timeEventDuration = min(duration, remainingQuota) + */ +struct iwl_hs20_roc_req { + /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ + __le32 id_and_color; + __le32 action; + __le32 event_unique_id; + __le32 sta_id_and_color; + struct iwl_fw_channel_info channel_info; + u8 node_addr[ETH_ALEN]; + __le16 reserved; + __le32 apply_time; + __le32 apply_time_max_delay; + __le32 duration; +} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ + +/* + * values for AUX ROC result values + */ +enum iwl_mvm_hot_spot { + HOT_SPOT_RSP_STATUS_OK, + HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, + HOT_SPOT_MAX_NUM_OF_SESSIONS, +}; + +/* + * Aux ROC command response + * + * In response to iwl_hs20_roc_req the FW sends this command to notify the + * driver the uid of the timevent. + * + * ( HOT_SPOT_CMD 0x53 ) + * + * @event_unique_id: Unique ID of time event assigned by ucode + * @status: Return status 0 is success, all the rest used for specific errors + */ +struct iwl_hs20_roc_res { + __le32 event_unique_id; + __le32 status; +} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_time_event_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h new file mode 100644 index 000000000000..7328a1606146 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h @@ -0,0 +1,393 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_tof_h__ +#define __iwl_fw_api_tof_h__ + +/* ToF sub-group command IDs */ +enum iwl_mvm_tof_sub_grp_ids { + TOF_RANGE_REQ_CMD = 0x1, + TOF_CONFIG_CMD = 0x2, + TOF_RANGE_ABORT_CMD = 0x3, + TOF_RANGE_REQ_EXT_CMD = 0x4, + TOF_RESPONDER_CONFIG_CMD = 0x5, + TOF_NW_INITIATED_RES_SEND_CMD = 0x6, + TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, + TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, + TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, + TOF_RANGE_RESPONSE_NOTIF = 0xFE, + TOF_MCSI_DEBUG_NOTIF = 0xFB, +}; + +/** + * struct iwl_tof_config_cmd - ToF configuration + * @tof_disabled: 0 enabled, 1 - disabled + * @one_sided_disabled: 0 enabled, 1 - disabled + * @is_debug_mode: 1 debug mode, 0 - otherwise + * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise + */ +struct iwl_tof_config_cmd { + __le32 sub_grp_cmd_id; + u8 tof_disabled; + u8 one_sided_disabled; + u8 is_debug_mode; + u8 is_buf_required; +} __packed; + +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) + * @burst_period: future use: (currently hard coded in the LMAC) + * The interval between two sequential bursts. + * @min_delta_ftm: future use: (currently hard coded in the LMAC) + * The minimum delay between two sequential FTM Responses + * in the same burst. + * @burst_duration: future use: (currently hard coded in the LMAC) + * The total time for all FTMs handshake in the same burst. + * Affect the time events duration in the LMAC. + * @num_of_burst_exp: future use: (currently hard coded in the LMAC) + * The number of bursts for the current ToF request. Affect + * the number of events allocations in the current iteration. + * @get_ch_est: for xVT only, NA for driver + * @abort_responder: when set to '1' - Responder will terminate its activity + * (all other fields in the command are ignored) + * @recv_sta_req_params: 1 - Responder will ignore the other Responder's + * params and use the recomended Initiator params. + * 0 - otherwise + * @channel_num: current AP Channel + * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rate: current AP rate + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency: + * + * 40 MHz + * 0 below center, 1 above center + * + * 80 MHz + * bits [0..1] + * * 0 the near 20MHz to the center, + * * 1 the far 20MHz to the center + * bit[2] + * as above 40MHz + * @ftm_per_burst: FTMs per Burst + * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, + * '1' - we measure over the Initial FTM Response + * @asap_mode: ASAP / Non ASAP mode for the current WLS station + * @sta_id: index of the AP STA when in AP mode + * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF + * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @bssid: Current AP BSSID + */ +struct iwl_tof_responder_config_cmd { + __le32 sub_grp_cmd_id; + __le16 burst_period; + u8 min_delta_ftm; + u8 burst_duration; + u8 num_of_burst_exp; + u8 get_ch_est; + u8 abort_responder; + u8 recv_sta_req_params; + u8 channel_num; + u8 bandwidth; + u8 rate; + u8 ctrl_ch_position; + u8 ftm_per_burst; + u8 ftm_resp_ts_avail; + u8 asap_mode; + u8 sta_id; + __le16 tsf_timer_offset_msecs; + __le16 toa_offset; + u8 bssid[ETH_ALEN]; +} __packed; + +/** + * struct iwl_tof_range_request_ext_cmd - extended range req for WLS + * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF + * @reserved: reserved + * @min_delta_ftm: Minimal time between two consecutive measurements, + * in units of 100us. 0 means no preference by station + * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended + * value be sent to the AP + * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended + * value to be sent to the AP + * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended + * value to be sent to the AP + */ +struct iwl_tof_range_req_ext_cmd { + __le32 sub_grp_cmd_id; + __le16 tsf_timer_offset_msec; + __le16 reserved; + u8 min_delta_ftm; + u8 ftm_format_and_bw20M; + u8 ftm_format_and_bw40M; + u8 ftm_format_and_bw80M; +} __packed; + +#define IWL_MVM_TOF_MAX_APS 21 + +/** + * struct iwl_tof_range_req_ap_entry - AP configuration parameters + * @channel_num: Current AP Channel + * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @tsf_delta_direction: TSF relatively to the subject AP + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency. + * 40MHz 0 below center, 1 above center + * 80MHz bits [0..1]: 0 the near 20MHz to the center, + * 1 the far 20MHz to the center + * bit[2] as above 40MHz + * @bssid: AP's bss id + * @measure_type: Measurement type: 0 - two sided, 1 - One sided + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the + * number of measurement iterations (min 2^0 = 1, max 2^14) + * @burst_period: Recommended value to be sent to the AP. Measurement + * periodicity In units of 100ms. ignored if num_of_bursts = 0 + * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) + * 1-sided: how many rts/cts pairs should be used per burst. + * @retries_per_sample: Max number of retries that the LMAC should send + * in case of no replies by the AP. + * @tsf_delta: TSF Delta in units of microseconds. + * The difference between the AP TSF and the device local clock. + * @location_req: Location Request Bit[0] LCI should be sent in the FTMR + * Bit[1] Civic should be sent in the FTMR + * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) + * @enable_dyn_ack: Enable Dynamic ACK BW. + * 0 Initiator interact with regular AP + * 1 Initiator interact with Responder machine: need to send the + * Initiator Acks with HT 40MHz / 80MHz, since the Responder should + * use it for its ch est measurement (this flag will be set when we + * configure the opposite machine to be Responder). + * @rssi: Last received value + * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. + */ +struct iwl_tof_range_req_ap_entry { + u8 channel_num; + u8 bandwidth; + u8 tsf_delta_direction; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + u8 measure_type; + u8 num_of_bursts; + __le16 burst_period; + u8 samples_per_burst; + u8 retries_per_sample; + __le32 tsf_delta; + u8 location_req; + u8 asap_mode; + u8 enable_dyn_ack; + s8 rssi; +} __packed; + +/** + * enum iwl_tof_response_mode + * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as + * possible (not supported for this release) + * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon + * timeout expiration + * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the + * earlier of: measurements completion / timeout + * expiration. + */ +enum iwl_tof_response_mode { + IWL_MVM_TOF_RESPOSE_ASAP = 1, + IWL_MVM_TOF_RESPOSE_TIMEOUT, + IWL_MVM_TOF_RESPOSE_COMPLETE, +}; + +/** + * struct iwl_tof_range_req_cmd - start measurement cmd + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @initiator: 0- NW initiated, 1 - Client Initiated + * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, + * '1' - run ML-Algo for ToF only + * @req_timeout: Requested timeout of the response in units of 100ms. + * This is equivalent to the session time configured to the + * LMAC in Initiator Request + * @report_policy: Supported partially for this release: For current release - + * the range report will be uploaded as a batch when ready or + * when the session is done (successfully / partially). + * one of iwl_tof_response_mode. + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), + * '1' Use MAC Address randomization according to the below + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @ap: per-AP request data + */ +struct iwl_tof_range_req_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 initiator; + u8 one_sided_los_disable; + u8 req_timeout; + u8 report_policy; + u8 los_det_disable; + u8 num_of_ap; + u8 macaddr_random; + u8 macaddr_template[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +/** + * struct iwl_tof_gen_resp_cmd - generic ToF response + */ +struct iwl_tof_gen_resp_cmd { + __le32 sub_grp_cmd_id; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) + * @bssid: BSSID of the AP + * @measure_status: current APs measurement status, one of + * &enum iwl_tof_entry_status. + * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz + * @rtt: The Round Trip Time that took for the last measurement for + * current AP [nSec] + * @rtt_variance: The Variance of the RTT values measured for current AP + * @rtt_spread: The Difference between the maximum and the minimum RTT + * values measured for current AP in the current session [nsec] + * @rssi: RSSI as uploaded in the Channel Estimation notification + * @rssi_spread: The Difference between the maximum and the minimum RSSI values + * measured for current AP in the current session + * @reserved: reserved + * @range: Measured range [cm] + * @range_variance: Measured range variance [cm] + * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was + * uploaded by the LMAC + */ +struct iwl_tof_range_rsp_ap_entry_ntfy { + u8 bssid[ETH_ALEN]; + u8 measure_status; + u8 measure_bw; + __le32 rtt; + __le32 rtt_variance; + __le32 rtt_spread; + s8 rssi; + u8 rssi_spread; + __le16 reserved; + __le32 range; + __le32 range_variance; + __le32 timestamp; +} __packed; + +/** + * struct iwl_tof_range_rsp_ntfy - + * @request_id: A Token ID of the corresponding Range request + * @request_status: status of current measurement session + * @last_in_batch: reprot policy (when not all responses are uploaded at once) + * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @ap: per-AP data + */ +struct iwl_tof_range_rsp_ntfy { + u8 request_id; + u8 request_status; + u8 last_in_batch; + u8 num_of_aps; + struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; +} __packed; + +#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) +/** + * struct iwl_tof_mcsi_notif - used for debug + * @token: token ID for the current session + * @role: '0' - initiator, '1' - responder + * @reserved: reserved + * @initiator_bssid: initiator machine + * @responder_bssid: responder machine + * @mcsi_buffer: debug data + */ +struct iwl_tof_mcsi_notif { + u8 token; + u8 role; + __le16 reserved; + u8 initiator_bssid[ETH_ALEN]; + u8 responder_bssid[ETH_ALEN]; + u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; +} __packed; + +/** + * struct iwl_tof_neighbor_report_notif + * @bssid: BSSID of the AP which sent the report + * @request_token: same token as the corresponding request + * @status: + * @report_ie_len: the length of the response frame starting from the Element ID + * @data: the IEs + */ +struct iwl_tof_neighbor_report { + u8 bssid[ETH_ALEN]; + u8 request_token; + u8 status; + __le16 report_ie_len; + u8 data[]; +} __packed; + +/** + * struct iwl_tof_range_abort_cmd + * @request_id: corresponds to a range request + * @reserved: reserved + */ +struct iwl_tof_range_abort_cmd { + __le32 sub_grp_cmd_id; + u8 request_id; + u8 reserved[3]; +} __packed; + +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h new file mode 100644 index 000000000000..d20baedead98 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -0,0 +1,912 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + *****************************************************************************/ + +#ifndef __iwl_fw_api_tx_h__ +#define __iwl_fw_api_tx_h__ + +/** + * enum iwl_tx_flags - bitmasks for tx_flags in TX command + * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame + * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame + * @TX_CMD_FLG_ACK: expect ACK from receiving station + * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. + * Otherwise, use rate_n_flags from the TX command + * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected + * Must set TX_CMD_FLG_ACK with this flag. + * @TX_CMD_FLG_TXOP_PROT: TXOP protection requested + * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence + * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence + * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) + * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored + * on old firmwares). + * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame + * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. + * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command + * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU + * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame + * Should be set for beacons and probe responses + * @TX_CMD_FLG_CALIB: activate PA TX power calibrations + * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count + * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. + * Should be set for 26/30 length MAC headers + * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW + * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation + * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id + * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped + * @TX_CMD_FLG_EXEC_PAPD: execute PAPD + * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power + * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk + */ +enum iwl_tx_flags { + TX_CMD_FLG_PROT_REQUIRE = BIT(0), + TX_CMD_FLG_WRITE_TX_POWER = BIT(1), + TX_CMD_FLG_ACK = BIT(3), + TX_CMD_FLG_STA_RATE = BIT(4), + TX_CMD_FLG_BAR = BIT(6), + TX_CMD_FLG_TXOP_PROT = BIT(7), + TX_CMD_FLG_VHT_NDPA = BIT(8), + TX_CMD_FLG_HT_NDPA = BIT(9), + TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), + TX_CMD_FLG_BT_PRIO_POS = 11, + TX_CMD_FLG_BT_DIS = BIT(12), + TX_CMD_FLG_SEQ_CTL = BIT(13), + TX_CMD_FLG_MORE_FRAG = BIT(14), + TX_CMD_FLG_TSF = BIT(16), + TX_CMD_FLG_CALIB = BIT(17), + TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), + TX_CMD_FLG_MH_PAD = BIT(20), + TX_CMD_FLG_RESP_TO_DRV = BIT(21), + TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), + TX_CMD_FLG_DUR = BIT(25), + TX_CMD_FLG_FW_DROP = BIT(26), + TX_CMD_FLG_EXEC_PAPD = BIT(27), + TX_CMD_FLG_PAPD_TYPE = BIT(28), + TX_CMD_FLG_HCCA_CHUNK = BIT(31) +}; /* TX_FLAGS_BITS_API_S_VER_1 */ + +/** + * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000 + * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command + * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs + * to a secured STA + * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate + * selection, retry limits and BT kill + */ +enum iwl_tx_cmd_flags { + IWL_TX_FLAGS_CMD_RATE = BIT(0), + IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1), + IWL_TX_FLAGS_HIGH_PRI = BIT(2), +}; /* TX_FLAGS_BITS_API_S_VER_3 */ + +/** + * enum iwl_tx_pm_timeouts - pm timeout values in TX command + * @PM_FRAME_NONE: no need to suspend sleep mode + * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU + * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec + */ +enum iwl_tx_pm_timeouts { + PM_FRAME_NONE = 0, + PM_FRAME_MGMT = 2, + PM_FRAME_ASSOC = 3, +}; + +#define TX_CMD_SEC_MSK 0x07 +#define TX_CMD_SEC_WEP_KEY_IDX_POS 6 +#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 + +/** + * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command + * @TX_CMD_SEC_WEP: WEP encryption algorithm. + * @TX_CMD_SEC_CCM: CCM encryption algorithm. + * @TX_CMD_SEC_TKIP: TKIP encryption algorithm. + * @TX_CMD_SEC_EXT: extended cipher algorithm. + * @TX_CMD_SEC_GCMP: GCMP encryption algorithm. + * @TX_CMD_SEC_KEY128: set for 104 bits WEP key. + * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken + * from the table instead of from the TX command. + * If the key is taken from the key table its index should be given by the + * first byte of the TX command key field. + */ +enum iwl_tx_cmd_sec_ctrl { + TX_CMD_SEC_WEP = 0x01, + TX_CMD_SEC_CCM = 0x02, + TX_CMD_SEC_TKIP = 0x03, + TX_CMD_SEC_EXT = 0x04, + TX_CMD_SEC_GCMP = 0x05, + TX_CMD_SEC_KEY128 = 0x08, + TX_CMD_SEC_KEY_FROM_TABLE = 0x10, +}; + +/* + * TX command Frame life time in us - to be written in pm_frame_timeout + */ +#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF +#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/ +#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */ +#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0 + +/* + * TID for non QoS frames - to be written in tid_tspec + */ +#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT + +/* + * Limits on the retransmissions - to be written in {data,rts}_retry_limit + */ +#define IWL_DEFAULT_TX_RETRY 15 +#define IWL_MGMT_DFAULT_RETRY_LIMIT 3 +#define IWL_RTS_DFAULT_RETRY_LIMIT 60 +#define IWL_BAR_DFAULT_RETRY_LIMIT 60 +#define IWL_LOW_RETRY_LIMIT 7 + +/** + * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values + * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words) + * from mac header end. For normal case it is 4 words for SNAP. + * note: tx_cmd, mac header and pad are not counted in the offset. + * This is used to help the offload in case there is tunneling such as + * IPv6 in IPv4, in such case the ip header offset should point to the + * inner ip header and IPv4 checksum of the external header should be + * calculated by driver. + * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum + * @TX_CMD_OFFLD_L3_EN: enable IP header checksum + * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV + * field. Doesn't include the pad. + * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for + * alignment + * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU + */ +enum iwl_tx_offload_assist_flags_pos { + TX_CMD_OFFLD_IP_HDR = 0, + TX_CMD_OFFLD_L4_EN = 6, + TX_CMD_OFFLD_L3_EN = 7, + TX_CMD_OFFLD_MH_SIZE = 8, + TX_CMD_OFFLD_PAD = 13, + TX_CMD_OFFLD_AMSDU = 14, +}; + +#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f +#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f + +/* TODO: complete documentation for try_cnt and btkill_cnt */ +/** + * struct iwl_tx_cmd - TX command struct to FW + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @offload_assist: TX offload configuration + * @tx_flags: combination of TX_CMD_FLG_* + * @scratch: scratch buffer used by the device + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @sta_id: index of destination station in FW station table + * @sec_ctl: security control, TX_CMD_SEC_* + * @initial_rate_index: index into the the rate table for initial TX attempt. + * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. + * @reserved2: reserved + * @key: security key + * @reserved3: reserved + * @life_time: frame life time (usecs??) + * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt + + * btkill_cnd + reserved), first 32 bits. "0" disables usage. + * @dram_msb_ptr: upper bits of the scratch physical address + * @rts_retry_limit: max attempts for RTS + * @data_retry_limit: max attempts to send the data packet + * @tid_tspec: TID/tspec + * @pm_frame_timeout: PM TX frame timeout + * @reserved4: reserved + * @payload: payload (same as @hdr) + * @hdr: 802.11 header (same as @payload) + * + * The byte count (both len and next_frame_len) includes MAC header + * (24/26/30/32 bytes) + * + 2 bytes pad if 26/30 header size + * + 8 byte IV for CCM or TKIP (not used for WEP) + * + Data payload + * + 8-byte MIC (not used for CCM/WEP) + * It does not include post-MAC padding, i.e., + * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes. + * Range of len: 14-2342 bytes. + * + * After the struct fields the MAC header is placed, plus any padding, + * and then the actial payload. + */ +struct iwl_tx_cmd { + __le16 len; + __le16 offload_assist; + __le32 tx_flags; + struct { + u8 try_cnt; + u8 btkill_cnt; + __le16 reserved; + } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ + __le32 rate_n_flags; + u8 sta_id; + u8 sec_ctl; + u8 initial_rate_index; + u8 reserved2; + u8 key[16]; + __le32 reserved3; + __le32 life_time; + __le32 dram_lsb_ptr; + u8 dram_msb_ptr; + u8 rts_retry_limit; + u8 data_retry_limit; + u8 tid_tspec; + __le16 pm_frame_timeout; + __le16 reserved4; + u8 payload[0]; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_6 */ + +struct iwl_dram_sec_info { + __le32 pn_low; + __le16 pn_high; + __le16 aux_info; +} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ + +/** + * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @offload_assist: TX offload configuration + * @flags: combination of &enum iwl_tx_cmd_flags + * @dram_info: FW internal DRAM storage + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @hdr: 802.11 header + */ +struct iwl_tx_cmd_gen2 { + __le16 len; + __le16 offload_assist; + __le32 flags; + struct iwl_dram_sec_info dram_info; + __le32 rate_n_flags; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_7 */ + +/* + * TX response related data + */ + +/* + * enum iwl_tx_status - status that is returned by the fw after attempts to Tx + * @TX_STATUS_SUCCESS: + * @TX_STATUS_DIRECT_DONE: + * @TX_STATUS_POSTPONE_DELAY: + * @TX_STATUS_POSTPONE_FEW_BYTES: + * @TX_STATUS_POSTPONE_BT_PRIO: + * @TX_STATUS_POSTPONE_QUIET_PERIOD: + * @TX_STATUS_POSTPONE_CALC_TTAK: + * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: + * @TX_STATUS_FAIL_SHORT_LIMIT: + * @TX_STATUS_FAIL_LONG_LIMIT: + * @TX_STATUS_FAIL_UNDERRUN: + * @TX_STATUS_FAIL_DRAIN_FLOW: + * @TX_STATUS_FAIL_RFKILL_FLUSH: + * @TX_STATUS_FAIL_LIFE_EXPIRE: + * @TX_STATUS_FAIL_DEST_PS: + * @TX_STATUS_FAIL_HOST_ABORTED: + * @TX_STATUS_FAIL_BT_RETRY: + * @TX_STATUS_FAIL_STA_INVALID: + * @TX_TATUS_FAIL_FRAG_DROPPED: + * @TX_STATUS_FAIL_TID_DISABLE: + * @TX_STATUS_FAIL_FIFO_FLUSHED: + * @TX_STATUS_FAIL_SMALL_CF_POLL: + * @TX_STATUS_FAIL_FW_DROP: + * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and + * STA table + * @TX_FRAME_STATUS_INTERNAL_ABORT: + * @TX_MODE_MSK: + * @TX_MODE_NO_BURST: + * @TX_MODE_IN_BURST_SEQ: + * @TX_MODE_FIRST_IN_BURST: + * @TX_QUEUE_NUM_MSK: + * + * Valid only if frame_count =1 + * TODO: complete documentation + */ +enum iwl_tx_status { + TX_STATUS_MSK = 0x000000ff, + TX_STATUS_SUCCESS = 0x01, + TX_STATUS_DIRECT_DONE = 0x02, + /* postpone TX */ + TX_STATUS_POSTPONE_DELAY = 0x40, + TX_STATUS_POSTPONE_FEW_BYTES = 0x41, + TX_STATUS_POSTPONE_BT_PRIO = 0x42, + TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, + TX_STATUS_POSTPONE_CALC_TTAK = 0x44, + /* abort TX */ + TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, + TX_STATUS_FAIL_SHORT_LIMIT = 0x82, + TX_STATUS_FAIL_LONG_LIMIT = 0x83, + TX_STATUS_FAIL_UNDERRUN = 0x84, + TX_STATUS_FAIL_DRAIN_FLOW = 0x85, + TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, + TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, + TX_STATUS_FAIL_DEST_PS = 0x88, + TX_STATUS_FAIL_HOST_ABORTED = 0x89, + TX_STATUS_FAIL_BT_RETRY = 0x8a, + TX_STATUS_FAIL_STA_INVALID = 0x8b, + TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, + TX_STATUS_FAIL_TID_DISABLE = 0x8d, + TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, + TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f, + TX_STATUS_FAIL_FW_DROP = 0x90, + TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91, + TX_STATUS_INTERNAL_ABORT = 0x92, + TX_MODE_MSK = 0x00000f00, + TX_MODE_NO_BURST = 0x00000000, + TX_MODE_IN_BURST_SEQ = 0x00000100, + TX_MODE_FIRST_IN_BURST = 0x00000200, + TX_QUEUE_NUM_MSK = 0x0001f000, + TX_NARROW_BW_MSK = 0x00060000, + TX_NARROW_BW_1DIV2 = 0x00020000, + TX_NARROW_BW_1DIV4 = 0x00040000, + TX_NARROW_BW_1DIV8 = 0x00060000, +}; + +/* + * enum iwl_tx_agg_status - TX aggregation status + * @AGG_TX_STATE_STATUS_MSK: + * @AGG_TX_STATE_TRANSMITTED: + * @AGG_TX_STATE_UNDERRUN: + * @AGG_TX_STATE_BT_PRIO: + * @AGG_TX_STATE_FEW_BYTES: + * @AGG_TX_STATE_ABORT: + * @AGG_TX_STATE_LAST_SENT_TTL: + * @AGG_TX_STATE_LAST_SENT_TRY_CNT: + * @AGG_TX_STATE_LAST_SENT_BT_KILL: + * @AGG_TX_STATE_SCD_QUERY: + * @AGG_TX_STATE_TEST_BAD_CRC32: + * @AGG_TX_STATE_RESPONSE: + * @AGG_TX_STATE_DUMP_TX: + * @AGG_TX_STATE_DELAY_TX: + * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries + * occur if tx failed for this frame when it was a member of a previous + * aggregation block). If rate scaling is used, retry count indicates the + * rate table entry used for all frames in the new agg. + *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for + * this frame + * + * TODO: complete documentation + */ +enum iwl_tx_agg_status { + AGG_TX_STATE_STATUS_MSK = 0x00fff, + AGG_TX_STATE_TRANSMITTED = 0x000, + AGG_TX_STATE_UNDERRUN = 0x001, + AGG_TX_STATE_BT_PRIO = 0x002, + AGG_TX_STATE_FEW_BYTES = 0x004, + AGG_TX_STATE_ABORT = 0x008, + AGG_TX_STATE_LAST_SENT_TTL = 0x010, + AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, + AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, + AGG_TX_STATE_SCD_QUERY = 0x080, + AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100, + AGG_TX_STATE_RESPONSE = 0x1ff, + AGG_TX_STATE_DUMP_TX = 0x200, + AGG_TX_STATE_DELAY_TX = 0x400, + AGG_TX_STATE_TRY_CNT_POS = 12, + AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, +}; + +#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \ + AGG_TX_STATE_LAST_SENT_TRY_CNT| \ + AGG_TX_STATE_LAST_SENT_BT_KILL) + +/* + * The mask below describes a status where we are absolutely sure that the MPDU + * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've + * written the bytes to the TXE, but we know nothing about what the DSP did. + */ +#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \ + AGG_TX_STATE_ABORT | \ + AGG_TX_STATE_SCD_QUERY) + +/* + * REPLY_TX = 0x1c (response) + * + * This response may be in one of two slightly different formats, indicated + * by the frame_count field: + * + * 1) No aggregation (frame_count == 1). This reports Tx results for a single + * frame. Multiple attempts, at various bit rates, may have been made for + * this frame. + * + * 2) Aggregation (frame_count > 1). This reports Tx results for two or more + * frames that used block-acknowledge. All frames were transmitted at + * same rate. Rate scaling may have been used if first frame in this new + * agg block failed in previous agg block(s). + * + * Note that, for aggregation, ACK (block-ack) status is not delivered + * here; block-ack has not been received by the time the device records + * this status. + * This status relates to reasons the tx might have been blocked or aborted + * within the device, rather than whether it was received successfully by + * the destination station. + */ + +/** + * struct agg_tx_status - per packet TX aggregation status + * @status: See &enum iwl_tx_agg_status + * @sequence: Sequence # for this frame's Tx cmd (not SSN!) + */ +struct agg_tx_status { + __le16 status; + __le16 sequence; +} __packed; + +/* + * definitions for initial rate index field + * bits [3:0] initial rate index + * bits [6:4] rate table color, used for the initial rate + * bit-7 invalid rate indication + */ +#define TX_RES_INIT_RATE_INDEX_MSK 0x0f +#define TX_RES_RATE_TABLE_COLOR_POS 4 +#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 +#define TX_RES_INV_RATE_INDEX_MSK 0x80 +#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ + TX_RES_RATE_TABLE_COLOR_POS) + +#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) +#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) + +/** + * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet + * ( REPLY_TX = 0x1c ) + * @frame_count: 1 no aggregation, >1 aggregation + * @bt_kill_count: num of times blocked by bluetooth (unused for agg) + * @failure_rts: num of failures due to unsuccessful RTS + * @failure_frame: num failures due to no ACK (unused for agg) + * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the + * Tx of all the batch. RATE_MCS_* + * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. + * for agg: RTS + CTS + aggregation tx time + block-ack time. + * in usec. + * @pa_status: tx power info + * @pa_integ_res_a: tx power info + * @pa_integ_res_b: tx power info + * @pa_integ_res_c: tx power info + * @measurement_req_id: tx power info + * @reduced_tpc: transmit power reduction used + * @reserved: reserved + * @tfd_info: TFD information set by the FH + * @seq_ctl: sequence control from the Tx cmd + * @byte_cnt: byte count from the Tx cmd + * @tlc_info: TLC rate info + * @ra_tid: bits [3:0] = ra, bits [7:4] = tid + * @frame_ctrl: frame control + * @status: for non-agg: frame status TX_STATUS_* + * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields + * follow this one, up to frame_count. Length in @frame_count. + * + * After the array of statuses comes the SSN of the SCD. Look at + * %iwl_mvm_get_scd_ssn for more details. + */ +struct iwl_mvm_tx_resp_v3 { + u8 frame_count; + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 initial_rate; + __le16 wireless_media_time; + + u8 pa_status; + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_c[3]; + __le16 measurement_req_id; + u8 reduced_tpc; + u8 reserved; + + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; + __le16 frame_ctrl; + struct agg_tx_status status[]; +} __packed; /* TX_RSP_API_S_VER_3 */ + +/** + * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet + * ( REPLY_TX = 0x1c ) + * @frame_count: 1 no aggregation, >1 aggregation + * @bt_kill_count: num of times blocked by bluetooth (unused for agg) + * @failure_rts: num of failures due to unsuccessful RTS + * @failure_frame: num failures due to no ACK (unused for agg) + * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the + * Tx of all the batch. RATE_MCS_* + * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. + * for agg: RTS + CTS + aggregation tx time + block-ack time. + * in usec. + * @pa_status: tx power info + * @pa_integ_res_a: tx power info + * @pa_integ_res_b: tx power info + * @pa_integ_res_c: tx power info + * @measurement_req_id: tx power info + * @reduced_tpc: transmit power reduction used + * @reserved: reserved + * @tfd_info: TFD information set by the FH + * @seq_ctl: sequence control from the Tx cmd + * @byte_cnt: byte count from the Tx cmd + * @tlc_info: TLC rate info + * @ra_tid: bits [3:0] = ra, bits [7:4] = tid + * @frame_ctrl: frame control + * @tx_queue: TX queue for this response + * @reserved2: reserved for padding/alignment + * @status: for non-agg: frame status TX_STATUS_* + * For version 6 TX response isn't received for aggregation at all. + * + * After the array of statuses comes the SSN of the SCD. Look at + * %iwl_mvm_get_scd_ssn for more details. + */ +struct iwl_mvm_tx_resp { + u8 frame_count; + u8 bt_kill_count; + u8 failure_rts; + u8 failure_frame; + __le32 initial_rate; + __le16 wireless_media_time; + + u8 pa_status; + u8 pa_integ_res_a[3]; + u8 pa_integ_res_b[3]; + u8 pa_integ_res_c[3]; + __le16 measurement_req_id; + u8 reduced_tpc; + u8 reserved; + + __le32 tfd_info; + __le16 seq_ctl; + __le16 byte_cnt; + u8 tlc_info; + u8 ra_tid; + __le16 frame_ctrl; + __le16 tx_queue; + __le16 reserved2; + struct agg_tx_status status; +} __packed; /* TX_RSP_API_S_VER_6 */ + +/** + * struct iwl_mvm_ba_notif - notifies about reception of BA + * ( BA_NOTIF = 0xc5 ) + * @sta_addr: MAC address + * @reserved: reserved + * @sta_id: Index of recipient (BA-sending) station in fw's station table + * @tid: tid of the session + * @seq_ctl: sequence control field + * @bitmap: the bitmap of the BA notification as seen in the air + * @scd_flow: the tx queue this BA relates to + * @scd_ssn: the index of the last contiguously sent packet + * @txed: number of Txed frames in this batch + * @txed_2_done: number of Acked frames in this batch + * @reduced_txp: power reduced according to TPC. This is the actual value and + * not a copy from the LQ command. Thus, if not the first rate was used + * for Tx-ing then this value will be set to 0 by FW. + * @reserved1: reserved + */ +struct iwl_mvm_ba_notif { + u8 sta_addr[ETH_ALEN]; + __le16 reserved; + + u8 sta_id; + u8 tid; + __le16 seq_ctl; + __le64 bitmap; + __le16 scd_flow; + __le16 scd_ssn; + u8 txed; + u8 txed_2_done; + u8 reduced_txp; + u8 reserved1; +} __packed; + +/** + * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue + * @q_num: TFD queue number + * @tfd_index: Index of first un-acked frame in the TFD queue + * @scd_queue: For debug only - the physical queue the TFD queue is bound to + * @tid: TID of the queue (0-7) + * @reserved: reserved for alignment + */ +struct iwl_mvm_compressed_ba_tfd { + __le16 q_num; + __le16 tfd_index; + u8 scd_queue; + u8 tid; + u8 reserved[2]; +} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ + +/** + * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue + * @q_num: RA TID queue number + * @tid: TID of the queue + * @ssn: BA window current SSN + */ +struct iwl_mvm_compressed_ba_ratid { + u8 q_num; + u8 tid; + __le16 ssn; +} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */ + +/* + * enum iwl_mvm_ba_resp_flags - TX aggregation status + * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA + * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR + * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA + * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun + * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill + * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the + * expected time + */ +enum iwl_mvm_ba_resp_flags { + IWL_MVM_BA_RESP_TX_AGG, + IWL_MVM_BA_RESP_TX_BAR, + IWL_MVM_BA_RESP_TX_AGG_FAIL, + IWL_MVM_BA_RESP_TX_UNDERRUN, + IWL_MVM_BA_RESP_TX_BT_KILL, + IWL_MVM_BA_RESP_TX_DSP_TIMEOUT +}; + +/** + * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA + * ( BA_NOTIF = 0xc5 ) + * @flags: status flag, see the &iwl_mvm_ba_resp_flags + * @sta_id: Index of recipient (BA-sending) station in fw's station table + * @reduced_txp: power reduced according to TPC. This is the actual value and + * not a copy from the LQ command. Thus, if not the first rate was used + * for Tx-ing then this value will be set to 0 by FW. + * @initial_rate: TLC rate info, initial rate index, TLC table color + * @retry_cnt: retry count + * @query_byte_cnt: SCD query byte count + * @query_frame_cnt: SCD query frame count + * @txed: number of frames sent in the aggregation (all-TIDs) + * @done: number of frames that were Acked by the BA (all-TIDs) + * @reserved: reserved (for alignment) + * @wireless_time: Wireless-media time + * @tx_rate: the rate the aggregation was sent at + * @tfd_cnt: number of TFD-Q elements + * @ra_tid_cnt: number of RATID-Q elements + * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd + * for details. + * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See + * &iwl_mvm_compressed_ba_ratid for more details. + */ +struct iwl_mvm_compressed_ba_notif { + __le32 flags; + u8 sta_id; + u8 reduced_txp; + u8 initial_rate; + u8 retry_cnt; + __le32 query_byte_cnt; + __le16 query_frame_cnt; + __le16 txed; + __le16 done; + __le16 reserved; + __le32 wireless_time; + __le32 tx_rate; + __le16 tfd_cnt; + __le16 ra_tid_cnt; + struct iwl_mvm_compressed_ba_tfd tfd[1]; + struct iwl_mvm_compressed_ba_ratid ra_tid[0]; +} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ + +/** + * struct iwl_mac_beacon_cmd_v6 - beacon template command + * @tx: the tx commands associated with the beacon frame + * @template_id: currently equal to the mac context id of the coresponding + * mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @frame: the template of the beacon frame + */ +struct iwl_mac_beacon_cmd_v6 { + struct iwl_tx_cmd tx; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + struct ieee80211_hdr frame[0]; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ + +/** + * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA + * @template_id: currently equal to the mac context id of the coresponding + * mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @ecsa_offset: offset to the ECSA IE if present + * @csa_offset: offset to the CSA IE if present + * @frame: the template of the beacon frame + */ +struct iwl_mac_beacon_cmd_data { + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + __le32 ecsa_offset; + __le32 csa_offset; + struct ieee80211_hdr frame[0]; +}; + +/** + * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA + * @tx: the tx commands associated with the beacon frame + * @data: see &iwl_mac_beacon_cmd_data + */ +struct iwl_mac_beacon_cmd_v7 { + struct iwl_tx_cmd tx; + struct iwl_mac_beacon_cmd_data data; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ + +/** + * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA + * @byte_cnt: byte count of the beacon frame + * @flags: for future use + * @reserved: reserved + * @data: see &iwl_mac_beacon_cmd_data + */ +struct iwl_mac_beacon_cmd { + __le16 byte_cnt; + __le16 flags; + __le64 reserved; + struct iwl_mac_beacon_cmd_data data; +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ + +struct iwl_beacon_notif { + struct iwl_mvm_tx_resp beacon_notify_hdr; + __le64 tsf; + __le32 ibss_mgr_status; +} __packed; + +/** + * struct iwl_extended_beacon_notif - notifies about beacon transmission + * @beacon_notify_hdr: tx response command associated with the beacon + * @tsf: last beacon tsf + * @ibss_mgr_status: whether IBSS is manager + * @gp2: last beacon time in gp2 + */ +struct iwl_extended_beacon_notif { + struct iwl_mvm_tx_resp beacon_notify_hdr; + __le64 tsf; + __le32 ibss_mgr_status; + __le32 gp2; +} __packed; /* BEACON_NTFY_API_S_VER_5 */ + +/** + * enum iwl_dump_control - dump (flush) control flags + * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty + * and the TFD queues are empty. + */ +enum iwl_dump_control { + DUMP_TX_FIFO_FLUSH = BIT(1), +}; + +/** + * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command + * @queues_ctl: bitmap of queues to flush + * @flush_ctl: control flags + * @reserved: reserved + */ +struct iwl_tx_path_flush_cmd_v1 { + __le32 queues_ctl; + __le16 flush_ctl; + __le16 reserved; +} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ + +/** + * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command + * @sta_id: station ID to flush + * @tid_mask: TID mask to flush + * @reserved: reserved + */ +struct iwl_tx_path_flush_cmd { + __le32 sta_id; + __le16 tid_mask; + __le16 reserved; +} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */ + +/* Available options for the SCD_QUEUE_CFG HCMD */ +enum iwl_scd_cfg_actions { + SCD_CFG_DISABLE_QUEUE = 0x0, + SCD_CFG_ENABLE_QUEUE = 0x1, + SCD_CFG_UPDATE_QUEUE_TID = 0x2, +}; + +/** + * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command + * @token: unused + * @sta_id: station id + * @tid: TID + * @scd_queue: scheduler queue to confiug + * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner + * Value is one of &enum iwl_scd_cfg_actions options + * @aggregate: 1 aggregated queue, 0 otherwise + * @tx_fifo: &enum iwl_mvm_tx_fifo + * @window: BA window size + * @ssn: SSN for the BA agreement + * @reserved: reserved + */ +struct iwl_scd_txq_cfg_cmd { + u8 token; + u8 sta_id; + u8 tid; + u8 scd_queue; + u8 action; + u8 aggregate; + u8 tx_fifo; + u8 window; + __le16 ssn; + __le16 reserved; +} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */ + +/** + * struct iwl_scd_txq_cfg_rsp + * @token: taken from the command + * @sta_id: station id from the command + * @tid: tid from the command + * @scd_queue: scd_queue from the command + */ +struct iwl_scd_txq_cfg_rsp { + u8 token; + u8 sta_id; + u8 tid; + u8 scd_queue; +} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ + +#endif /* __iwl_fw_api_tx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h new file mode 100644 index 000000000000..7e7ebe82729b --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -0,0 +1,156 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_api_txq_h__ +#define __iwl_fw_api_txq_h__ + +/* Tx queue numbers for non-DQA mode */ +enum { + IWL_MVM_OFFCHANNEL_QUEUE = 8, + IWL_MVM_CMD_QUEUE = 9, +}; + +/* + * DQA queue numbers + * + * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW + * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames + * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames + * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames + * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure + * that we are never left without the possibility to connect to an AP. + * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. + * Each MGMT queue is mapped to a single STA + * MGMT frames are frames that return true on ieee80211_is_mgmt() + * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames + * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe + * responses + * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. + * DATA frames are intended for !ieee80211_is_mgmt() frames, but if + * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues + * as well + * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames + */ +enum iwl_mvm_dqa_txq { + IWL_MVM_DQA_CMD_QUEUE = 0, + IWL_MVM_DQA_AUX_QUEUE = 1, + IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, + IWL_MVM_DQA_GCAST_QUEUE = 3, + IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, + IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, + IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, + IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, + IWL_MVM_DQA_MIN_DATA_QUEUE = 10, + IWL_MVM_DQA_MAX_DATA_QUEUE = 31, +}; + +enum iwl_mvm_tx_fifo { + IWL_MVM_TX_FIFO_BK = 0, + IWL_MVM_TX_FIFO_BE, + IWL_MVM_TX_FIFO_VI, + IWL_MVM_TX_FIFO_VO, + IWL_MVM_TX_FIFO_MCAST = 5, + IWL_MVM_TX_FIFO_CMD = 7, +}; + +/** + * iwl_tx_queue_cfg_actions - TXQ config options + * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue + * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format + */ +enum iwl_tx_queue_cfg_actions { + TX_QUEUE_CFG_ENABLE_QUEUE = BIT(0), + TX_QUEUE_CFG_TFD_SHORT_FORMAT = BIT(1), +}; + +/** + * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command + * @sta_id: station id + * @tid: tid of the queue + * @flags: see &enum iwl_tx_queue_cfg_actions + * @cb_size: size of TFD cyclic buffer. Value is exponent - 3. + * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs) + * @byte_cnt_addr: address of byte count table + * @tfdq_addr: address of TFD circular buffer + */ +struct iwl_tx_queue_cfg_cmd { + u8 sta_id; + u8 tid; + __le16 flags; + __le32 cb_size; + __le64 byte_cnt_addr; + __le64 tfdq_addr; +} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */ + +/** + * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config + * @queue_number: queue number assigned to this RA -TID + * @flags: set on failure + * @write_pointer: initial value for write pointer + */ +struct iwl_tx_queue_cfg_rsp { + __le16 queue_number; + __le16 flags; + __le16 write_pointer; + __le16 reserved; +} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */ + +#endif /* __iwl_fw_api_txq_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c index 2f4044922be3..1610722b8099 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c @@ -61,10 +61,7 @@ *****************************************************************************/ #include "iwl-drv.h" #include "runtime.h" - -/* FIXME */ -#define FW_PAGING_BLOCK_CMD 0x4f -#define FW_GET_ITEM_CMD 0x1a +#include "fw/api/commands.h" void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt) { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 02f1bc985383..75575290a3e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -61,7 +61,8 @@ #include "iwl-config.h" #include "iwl-trans.h" #include "img.h" -#include "api.h" +#include "fw/api/debug.h" +#include "fw/api/paging.h" struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index 053993bf00f9..065a951cefba 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -61,11 +61,7 @@ *****************************************************************************/ #include "iwl-drv.h" #include "runtime.h" - -/* FIXME */ -#define SHARED_MEM_CFG_CMD 0x00 -#define SYSTEM_GROUP 0x2 -#define SHARED_MEM_CFG 0x25 +#include "fw/api/commands.h" static void iwl_parse_shared_mem_a000(struct iwl_fw_runtime *fwrt, struct iwl_rx_packet *pkt) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index eb6842abb4c7..e90abbfba718 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -76,7 +76,8 @@ #include "iwl-config.h" #include "fw/img.h" #include "iwl-op-mode.h" -#include "fw/api.h" +#include "fw/api/cmdhdr.h" +#include "fw/api/txq.h" /** * DOC: Transport layer - what is it ? diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 34dd5c40ce77..6c5c6510428a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -67,7 +67,7 @@ #include #include -#include "fw-api-coex.h" +#include "fw/api/coex.h" #include "iwl-modparams.h" #include "mvm.h" #include "iwl-debug.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index a7ac281e5cde..71a01df96f8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -65,7 +65,7 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw-api-tof.h" +#include "fw/api/tof.h" #include "debugfs.h" static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h deleted file mode 100644 index 8cd06aaa1f54..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h +++ /dev/null @@ -1,257 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_bt_coex_h__ -#define __fw_api_bt_coex_h__ - -#include -#include - -#define BITS(nb) (BIT(nb) - 1) - -enum iwl_bt_coex_lut_type { - BT_COEX_TIGHT_LUT = 0, - BT_COEX_LOOSE_LUT, - BT_COEX_TX_DIS_LUT, - - BT_COEX_MAX_LUT, - BT_COEX_INVALID_LUT = 0xff, -}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ - -#define BT_COEX_CORUN_LUT_SIZE (32) -#define BT_REDUCED_TX_POWER_BIT BIT(7) - -enum iwl_bt_coex_mode { - BT_COEX_DISABLE = 0x0, - BT_COEX_NW = 0x1, - BT_COEX_BT = 0x2, - BT_COEX_WIFI = 0x3, -}; /* BT_COEX_MODES_E */ - -enum iwl_bt_coex_enabled_modules { - BT_COEX_MPLUT_ENABLED = BIT(0), - BT_COEX_MPLUT_BOOST_ENABLED = BIT(1), - BT_COEX_SYNC2SCO_ENABLED = BIT(2), - BT_COEX_CORUN_ENABLED = BIT(3), - BT_COEX_HIGH_BAND_RET = BIT(4), -}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */ - -/** - * struct iwl_bt_coex_cmd - bt coex configuration command - * @mode: &enum iwl_bt_coex_mode - * @enabled_modules: &enum iwl_bt_coex_enabled_modules - * - * The structure is used for the BT_COEX command. - */ -struct iwl_bt_coex_cmd { - __le32 mode; - __le32 enabled_modules; -} __packed; /* BT_COEX_CMD_API_S_VER_6 */ - -/** - * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut - * @corun_lut20: co-running 20 MHz LUT configuration - * @corun_lut40: co-running 40 MHz LUT configuration - * - * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command. - */ -struct iwl_bt_coex_corun_lut_update_cmd { - __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE]; - __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE]; -} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ - -/** - * struct iwl_bt_coex_reduced_txp_update_cmd - * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the - * bits are the sta_id (value) - */ -struct iwl_bt_coex_reduced_txp_update_cmd { - __le32 reduced_txp; -} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */ - -/** - * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command - * @bt_primary_ci: primary channel inhibition bitmap - * @primary_ch_phy_id: primary channel PHY ID - * @bt_secondary_ci: secondary channel inhibition bitmap - * @secondary_ch_phy_id: secondary channel PHY ID - * - * Used for BT_COEX_CI command - */ -struct iwl_bt_coex_ci_cmd { - __le64 bt_primary_ci; - __le32 primary_ch_phy_id; - - __le64 bt_secondary_ci; - __le32 secondary_ch_phy_id; -} __packed; /* BT_CI_MSG_API_S_VER_2 */ - -#define BT_MBOX(n_dw, _msg, _pos, _nbits) \ - BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ - BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS - -enum iwl_bt_mxbox_dw0 { - BT_MBOX(0, LE_SLAVE_LAT, 0, 3), - BT_MBOX(0, LE_PROF1, 3, 1), - BT_MBOX(0, LE_PROF2, 4, 1), - BT_MBOX(0, LE_PROF_OTHER, 5, 1), - BT_MBOX(0, CHL_SEQ_N, 8, 4), - BT_MBOX(0, INBAND_S, 13, 1), - BT_MBOX(0, LE_MIN_RSSI, 16, 4), - BT_MBOX(0, LE_SCAN, 20, 1), - BT_MBOX(0, LE_ADV, 21, 1), - BT_MBOX(0, LE_MAX_TX_POWER, 24, 4), - BT_MBOX(0, OPEN_CON_1, 28, 2), -}; - -enum iwl_bt_mxbox_dw1 { - BT_MBOX(1, BR_MAX_TX_POWER, 0, 4), - BT_MBOX(1, IP_SR, 4, 1), - BT_MBOX(1, LE_MSTR, 5, 1), - BT_MBOX(1, AGGR_TRFC_LD, 8, 6), - BT_MBOX(1, MSG_TYPE, 16, 3), - BT_MBOX(1, SSN, 19, 2), -}; - -enum iwl_bt_mxbox_dw2 { - BT_MBOX(2, SNIFF_ACT, 0, 3), - BT_MBOX(2, PAG, 3, 1), - BT_MBOX(2, INQUIRY, 4, 1), - BT_MBOX(2, CONN, 5, 1), - BT_MBOX(2, SNIFF_INTERVAL, 8, 5), - BT_MBOX(2, DISC, 13, 1), - BT_MBOX(2, SCO_TX_ACT, 16, 2), - BT_MBOX(2, SCO_RX_ACT, 18, 2), - BT_MBOX(2, ESCO_RE_TX, 20, 2), - BT_MBOX(2, SCO_DURATION, 24, 6), -}; - -enum iwl_bt_mxbox_dw3 { - BT_MBOX(3, SCO_STATE, 0, 1), - BT_MBOX(3, SNIFF_STATE, 1, 1), - BT_MBOX(3, A2DP_STATE, 2, 1), - BT_MBOX(3, ACL_STATE, 3, 1), - BT_MBOX(3, MSTR_STATE, 4, 1), - BT_MBOX(3, OBX_STATE, 5, 1), - BT_MBOX(3, OPEN_CON_2, 8, 2), - BT_MBOX(3, TRAFFIC_LOAD, 10, 2), - BT_MBOX(3, CHL_SEQN_LSB, 12, 1), - BT_MBOX(3, INBAND_P, 13, 1), - BT_MBOX(3, MSG_TYPE_2, 16, 3), - BT_MBOX(3, SSN_2, 19, 2), - BT_MBOX(3, UPDATE_REQUEST, 21, 1), -}; - -#define BT_MBOX_MSG(_notif, _num, _field) \ - ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ - >> BT_MBOX##_num##_##_field##_POS) - -enum iwl_bt_activity_grading { - BT_OFF = 0, - BT_ON_NO_CONNECTION = 1, - BT_LOW_TRAFFIC = 2, - BT_HIGH_TRAFFIC = 3, - - BT_MAX_AG, -}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */ - -enum iwl_bt_ci_compliance { - BT_CI_COMPLIANCE_NONE = 0, - BT_CI_COMPLIANCE_PRIMARY = 1, - BT_CI_COMPLIANCE_SECONDARY = 2, - BT_CI_COMPLIANCE_BOTH = 3, -}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ - -#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \ - (_ttc_rrc_status & BIT(_phy_id)) - -#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \ - ((_ttc_rrc_status >> 4) & BIT(_phy_id)) - -/** - * struct iwl_bt_coex_profile_notif - notification about BT coex - * @mbox_msg: message from BT to WiFi - * @msg_idx: the index of the message - * @bt_ci_compliance: enum %iwl_bt_ci_compliance - * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type - * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type - * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading - * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY - * @reserved: reserved - */ -struct iwl_bt_coex_profile_notif { - __le32 mbox_msg[4]; - __le32 msg_idx; - __le32 bt_ci_compliance; - - __le32 primary_ch_lut; - __le32 secondary_ch_lut; - __le32 bt_activity_grading; - u8 ttc_rrc_status; - u8 reserved[3]; -} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ - -#endif /* __fw_api_bt_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h deleted file mode 100644 index d4a4c28b7192..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ /dev/null @@ -1,471 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_d3_h__ -#define __fw_api_d3_h__ - -/** - * enum iwl_d3_wakeup_flags - D3 manager wakeup flags - * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert - */ -enum iwl_d3_wakeup_flags { - IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0), -}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */ - -/** - * struct iwl_d3_manager_config - D3 manager configuration command - * @min_sleep_time: minimum sleep time (in usec) - * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags - * @wakeup_host_timer: force wakeup after this many seconds - * - * The structure is used for the D3_CONFIG_CMD command. - */ -struct iwl_d3_manager_config { - __le32 min_sleep_time; - __le32 wakeup_flags; - __le32 wakeup_host_timer; -} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */ - - -/* TODO: OFFLOADS_QUERY_API_S_VER_1 */ - -/** - * enum iwl_d3_proto_offloads - enabled protocol offloads - * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled - * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled - * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid - * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid - */ -enum iwl_proto_offloads { - IWL_D3_PROTO_OFFLOAD_ARP = BIT(0), - IWL_D3_PROTO_OFFLOAD_NS = BIT(1), - IWL_D3_PROTO_IPV4_VALID = BIT(2), - IWL_D3_PROTO_IPV6_VALID = BIT(3), -}; - -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L 12 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S 4 -#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 12 - -#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L 4 -#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S 2 - -/** - * struct iwl_proto_offload_cmd_common - ARP/NS offload common part - * @enabled: enable flags - * @remote_ipv4_addr: remote address to answer to (or zero if all) - * @host_ipv4_addr: our IPv4 address to respond to queries for - * @arp_mac_addr: our MAC address for ARP responses - * @reserved: unused - */ -struct iwl_proto_offload_cmd_common { - __le32 enabled; - __be32 remote_ipv4_addr; - __be32 host_ipv4_addr; - u8 arp_mac_addr[ETH_ALEN]; - __le16 reserved; -} __packed; - -/** - * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @remote_ipv6_addr: remote address to answer to (or zero if all) - * @solicited_node_ipv6_addr: broken -- solicited node address exists - * for each target address - * @target_ipv6_addr: our target addresses - * @ndp_mac_addr: neighbor solicitation response MAC address - * @reserved2: reserved - */ -struct iwl_proto_offload_cmd_v1 { - struct iwl_proto_offload_cmd_common common; - u8 remote_ipv6_addr[16]; - u8 solicited_node_ipv6_addr[16]; - u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16]; - u8 ndp_mac_addr[ETH_ALEN]; - __le16 reserved2; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */ - -/** - * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @remote_ipv6_addr: remote address to answer to (or zero if all) - * @solicited_node_ipv6_addr: broken -- solicited node address exists - * for each target address - * @target_ipv6_addr: our target addresses - * @ndp_mac_addr: neighbor solicitation response MAC address - * @num_valid_ipv6_addrs: number of valid IPv6 addresses - * @reserved2: reserved - */ -struct iwl_proto_offload_cmd_v2 { - struct iwl_proto_offload_cmd_common common; - u8 remote_ipv6_addr[16]; - u8 solicited_node_ipv6_addr[16]; - u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16]; - u8 ndp_mac_addr[ETH_ALEN]; - u8 num_valid_ipv6_addrs; - u8 reserved2[3]; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */ - -struct iwl_ns_config { - struct in6_addr source_ipv6_addr; - struct in6_addr dest_ipv6_addr; - u8 target_mac_addr[ETH_ALEN]; - __le16 reserved; -} __packed; /* NS_OFFLOAD_CONFIG */ - -struct iwl_targ_addr { - struct in6_addr addr; - __le32 config_num; -} __packed; /* TARGET_IPV6_ADDRESS */ - -/** - * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @num_valid_ipv6_addrs: number of valid IPv6 addresses - * @targ_addrs: target IPv6 addresses - * @ns_config: NS offload configurations - */ -struct iwl_proto_offload_cmd_v3_small { - struct iwl_proto_offload_cmd_common common; - __le32 num_valid_ipv6_addrs; - struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S]; - struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S]; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ - -/** - * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration - * @common: common/IPv4 configuration - * @num_valid_ipv6_addrs: number of valid IPv6 addresses - * @targ_addrs: target IPv6 addresses - * @ns_config: NS offload configurations - */ -struct iwl_proto_offload_cmd_v3_large { - struct iwl_proto_offload_cmd_common common; - __le32 num_valid_ipv6_addrs; - struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L]; - struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L]; -} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */ - -/* - * WOWLAN_PATTERNS - */ -#define IWL_WOWLAN_MIN_PATTERN_LEN 16 -#define IWL_WOWLAN_MAX_PATTERN_LEN 128 - -struct iwl_wowlan_pattern { - u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; - u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN]; - u8 mask_size; - u8 pattern_size; - __le16 reserved; -} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */ - -#define IWL_WOWLAN_MAX_PATTERNS 20 - -struct iwl_wowlan_patterns_cmd { - __le32 n_patterns; - struct iwl_wowlan_pattern patterns[]; -} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */ - -enum iwl_wowlan_wakeup_filters { - IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0), - IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1), - IWL_WOWLAN_WAKEUP_BEACON_MISS = BIT(2), - IWL_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3), - IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4), - IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5), - IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6), - IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(7), - IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8), - IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9), - IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10), - IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11), - IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12), - IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13), - IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14), - IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15), - IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16), -}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */ - -enum iwl_wowlan_flags { - IS_11W_ASSOC = BIT(0), - ENABLE_L3_FILTERING = BIT(1), - ENABLE_NBNS_FILTERING = BIT(2), - ENABLE_DHCP_FILTERING = BIT(3), - ENABLE_STORE_BEACON = BIT(4), -}; - -/** - * struct iwl_wowlan_config_cmd - WoWLAN configuration - * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters - * @non_qos_seq: non-QoS sequence counter to use next - * @qos_seq: QoS sequence counters to use next - * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down - * @is_11n_connection: indicates HT connection - * @offloading_tid: TID reserved for firmware use - * @flags: extra flags, see &enum iwl_wowlan_flags - * @reserved: reserved - */ -struct iwl_wowlan_config_cmd { - __le32 wakeup_filter; - __le16 non_qos_seq; - __le16 qos_seq[8]; - u8 wowlan_ba_teardown_tids; - u8 is_11n_connection; - u8 offloading_tid; - u8 flags; - u8 reserved[2]; -} __packed; /* WOWLAN_CONFIG_API_S_VER_4 */ - -/* - * WOWLAN_TSC_RSC_PARAMS - */ -#define IWL_NUM_RSC 16 - -struct tkip_sc { - __le16 iv16; - __le16 pad; - __le32 iv32; -} __packed; /* TKIP_SC_API_U_VER_1 */ - -struct iwl_tkip_rsc_tsc { - struct tkip_sc unicast_rsc[IWL_NUM_RSC]; - struct tkip_sc multicast_rsc[IWL_NUM_RSC]; - struct tkip_sc tsc; -} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */ - -struct aes_sc { - __le64 pn; -} __packed; /* TKIP_AES_SC_API_U_VER_1 */ - -struct iwl_aes_rsc_tsc { - struct aes_sc unicast_rsc[IWL_NUM_RSC]; - struct aes_sc multicast_rsc[IWL_NUM_RSC]; - struct aes_sc tsc; -} __packed; /* AES_TSC_RSC_API_S_VER_1 */ - -union iwl_all_tsc_rsc { - struct iwl_tkip_rsc_tsc tkip; - struct iwl_aes_rsc_tsc aes; -}; /* ALL_TSC_RSC_API_S_VER_2 */ - -struct iwl_wowlan_rsc_tsc_params_cmd { - union iwl_all_tsc_rsc all_tsc_rsc; -} __packed; /* ALL_TSC_RSC_API_S_VER_2 */ - -#define IWL_MIC_KEY_SIZE 8 -struct iwl_mic_keys { - u8 tx[IWL_MIC_KEY_SIZE]; - u8 rx_unicast[IWL_MIC_KEY_SIZE]; - u8 rx_mcast[IWL_MIC_KEY_SIZE]; -} __packed; /* MIC_KEYS_API_S_VER_1 */ - -#define IWL_P1K_SIZE 5 -struct iwl_p1k_cache { - __le16 p1k[IWL_P1K_SIZE]; -} __packed; - -#define IWL_NUM_RX_P1K_CACHE 2 - -struct iwl_wowlan_tkip_params_cmd { - struct iwl_mic_keys mic_keys; - struct iwl_p1k_cache tx; - struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE]; - struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE]; -} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */ - -#define IWL_KCK_MAX_SIZE 32 -#define IWL_KEK_MAX_SIZE 32 - -struct iwl_wowlan_kek_kck_material_cmd { - u8 kck[IWL_KCK_MAX_SIZE]; - u8 kek[IWL_KEK_MAX_SIZE]; - __le16 kck_len; - __le16 kek_len; - __le64 replay_ctr; -} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */ - -#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87 - -enum iwl_wowlan_rekey_status { - IWL_WOWLAN_REKEY_POST_REKEY = 0, - IWL_WOWLAN_REKEY_WHILE_REKEY = 1, -}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */ - -enum iwl_wowlan_wakeup_reason { - IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS = 0, - IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET = BIT(0), - IWL_WOWLAN_WAKEUP_BY_PATTERN = BIT(1), - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON = BIT(2), - IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH = BIT(3), - IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE = BIT(4), - IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED = BIT(5), - IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR = BIT(6), - IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST = BIT(7), - IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), - IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), - IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET = BIT(13), - IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14), - IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15), - IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16), - -}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ - -struct iwl_wowlan_gtk_status { - u8 key_index; - u8 reserved[3]; - u8 decrypt_key[16]; - u8 tkip_mic_key[8]; - struct iwl_wowlan_rsc_tsc_params_cmd rsc; -} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */ - -/** - * struct iwl_wowlan_status - WoWLAN status - * @gtk: GTK data - * @replay_ctr: GTK rekey replay counter - * @pattern_number: number of the matched pattern - * @non_qos_seq_ctr: non-QoS sequence counter to use next - * @qos_seq_ctr: QoS sequence counters to use next - * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason - * @num_of_gtk_rekeys: number of GTK rekeys - * @transmitted_ndps: number of transmitted neighbor discovery packets - * @received_beacons: number of received beacons - * @wake_packet_length: wakeup packet length - * @wake_packet_bufsize: wakeup packet buffer size - * @wake_packet: wakeup packet - */ -struct iwl_wowlan_status { - struct iwl_wowlan_gtk_status gtk; - __le64 replay_ctr; - __le16 pattern_number; - __le16 non_qos_seq_ctr; - __le16 qos_seq_ctr[8]; - __le32 wakeup_reasons; - __le32 num_of_gtk_rekeys; - __le32 transmitted_ndps; - __le32 received_beacons; - __le32 wake_packet_length; - __le32 wake_packet_bufsize; - u8 wake_packet[]; /* can be truncated from _length to _bufsize */ -} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */ - -#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64 -#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128 -#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048 - -struct iwl_tcp_packet_info { - __le16 tcp_pseudo_header_checksum; - __le16 tcp_payload_length; -} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */ - -struct iwl_tcp_packet { - struct iwl_tcp_packet_info info; - u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; - u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN]; -} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ - -struct iwl_remote_wake_packet { - struct iwl_tcp_packet_info info; - u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8]; - u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN]; -} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */ - -struct iwl_wowlan_remote_wake_config { - __le32 connection_max_time; /* unused */ - /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */ - u8 max_syn_retries; - u8 max_data_retries; - u8 tcp_syn_ack_timeout; - u8 tcp_ack_timeout; - - struct iwl_tcp_packet syn_tx; - struct iwl_tcp_packet synack_rx; - struct iwl_tcp_packet keepalive_ack_rx; - struct iwl_tcp_packet fin_tx; - - struct iwl_remote_wake_packet keepalive_tx; - struct iwl_remote_wake_packet wake_rx; - - /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */ - u8 sequence_number_offset; - u8 sequence_number_length; - u8 token_offset; - u8 token_length; - /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */ - __le32 initial_sequence_number; - __le16 keepalive_interval; - __le16 num_tokens; - u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS]; -} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */ - -/* TODO: NetDetect API */ - -#endif /* __fw_api_d3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h deleted file mode 100644 index 0c3350ad2f2f..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h +++ /dev/null @@ -1,396 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_mac_h__ -#define __fw_api_mac_h__ - -/* - * The first MAC indices (starting from 0) are available to the driver, - * AUX indices follows - 1 for non-CDB, 2 for CDB. - */ -#define MAC_INDEX_AUX 4 -#define MAC_INDEX_MIN_DRIVER 0 -#define NUM_MAC_INDEX_DRIVER MAC_INDEX_AUX -#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1) -#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2) - -#define IWL_MVM_STATION_COUNT 16 -#define IWL_MVM_INVALID_STA 0xFF - -#define IWL_MVM_TDLS_STA_COUNT 4 - -enum iwl_ac { - AC_BK, - AC_BE, - AC_VI, - AC_VO, - AC_NUM, -}; - -/** - * enum iwl_mac_protection_flags - MAC context flags - * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames, - * this will require CCK RTS/CTS2self. - * RTS/CTS will protect full burst time. - * @MAC_PROT_FLG_HT_PROT: enable HT protection - * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions - * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self - */ -enum iwl_mac_protection_flags { - MAC_PROT_FLG_TGG_PROTECT = BIT(3), - MAC_PROT_FLG_HT_PROT = BIT(23), - MAC_PROT_FLG_FAT_PROT = BIT(24), - MAC_PROT_FLG_SELF_CTS_EN = BIT(30), -}; - -#define MAC_FLG_SHORT_SLOT BIT(4) -#define MAC_FLG_SHORT_PREAMBLE BIT(5) - -/** - * enum iwl_mac_types - Supported MAC types - * @FW_MAC_TYPE_FIRST: lowest supported MAC type - * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal) - * @FW_MAC_TYPE_LISTENER: monitor MAC type (?) - * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS - * @FW_MAC_TYPE_IBSS: IBSS - * @FW_MAC_TYPE_BSS_STA: BSS (managed) station - * @FW_MAC_TYPE_P2P_DEVICE: P2P Device - * @FW_MAC_TYPE_P2P_STA: P2P client - * @FW_MAC_TYPE_GO: P2P GO - * @FW_MAC_TYPE_TEST: ? - * @FW_MAC_TYPE_MAX: highest support MAC type - */ -enum iwl_mac_types { - FW_MAC_TYPE_FIRST = 1, - FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST, - FW_MAC_TYPE_LISTENER, - FW_MAC_TYPE_PIBSS, - FW_MAC_TYPE_IBSS, - FW_MAC_TYPE_BSS_STA, - FW_MAC_TYPE_P2P_DEVICE, - FW_MAC_TYPE_P2P_STA, - FW_MAC_TYPE_GO, - FW_MAC_TYPE_TEST, - FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST -}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */ - -/** - * enum iwl_tsf_id - TSF hw timer ID - * @TSF_ID_A: use TSF A - * @TSF_ID_B: use TSF B - * @TSF_ID_C: use TSF C - * @TSF_ID_D: use TSF D - * @NUM_TSF_IDS: number of TSF timers available - */ -enum iwl_tsf_id { - TSF_ID_A = 0, - TSF_ID_B = 1, - TSF_ID_C = 2, - TSF_ID_D = 3, - NUM_TSF_IDS = 4, -}; /* TSF_ID_API_E_VER_1 */ - -/** - * struct iwl_mac_data_ap - configuration data for AP MAC context - * @beacon_time: beacon transmit time in system time - * @beacon_tsf: beacon transmit time in TSF - * @bi: beacon interval in TU - * @bi_reciprocal: 2^32 / bi - * @dtim_interval: dtim transmit time in TU - * @dtim_reciprocal: 2^32 / dtim_interval - * @mcast_qid: queue ID for multicast traffic. - * NOTE: obsolete from VER2 and on - * @beacon_template: beacon template ID - */ -struct iwl_mac_data_ap { - __le32 beacon_time; - __le64 beacon_tsf; - __le32 bi; - __le32 bi_reciprocal; - __le32 dtim_interval; - __le32 dtim_reciprocal; - __le32 mcast_qid; - __le32 beacon_template; -} __packed; /* AP_MAC_DATA_API_S_VER_2 */ - -/** - * struct iwl_mac_data_ibss - configuration data for IBSS MAC context - * @beacon_time: beacon transmit time in system time - * @beacon_tsf: beacon transmit time in TSF - * @bi: beacon interval in TU - * @bi_reciprocal: 2^32 / bi - * @beacon_template: beacon template ID - */ -struct iwl_mac_data_ibss { - __le32 beacon_time; - __le64 beacon_tsf; - __le32 bi; - __le32 bi_reciprocal; - __le32 beacon_template; -} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_sta - configuration data for station MAC context - * @is_assoc: 1 for associated state, 0 otherwise - * @dtim_time: DTIM arrival time in system time - * @dtim_tsf: DTIM arrival time in TSF - * @bi: beacon interval in TU, applicable only when associated - * @bi_reciprocal: 2^32 / bi , applicable only when associated - * @dtim_interval: DTIM interval in TU, applicable only when associated - * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated - * @listen_interval: in beacon intervals, applicable only when associated - * @assoc_id: unique ID assigned by the AP during association - * @assoc_beacon_arrive_time: TSF of first beacon after association - */ -struct iwl_mac_data_sta { - __le32 is_assoc; - __le32 dtim_time; - __le64 dtim_tsf; - __le32 bi; - __le32 bi_reciprocal; - __le32 dtim_interval; - __le32 dtim_reciprocal; - __le32 listen_interval; - __le32 assoc_id; - __le32 assoc_beacon_arrive_time; -} __packed; /* STA_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_go - configuration data for P2P GO MAC context - * @ap: iwl_mac_data_ap struct with most config data - * @ctwin: client traffic window in TU (period after TBTT when GO is present). - * 0 indicates that there is no CT window. - * @opp_ps_enabled: indicate that opportunistic PS allowed - */ -struct iwl_mac_data_go { - struct iwl_mac_data_ap ap; - __le32 ctwin; - __le32 opp_ps_enabled; -} __packed; /* GO_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context - * @sta: iwl_mac_data_sta struct with most config data - * @ctwin: client traffic window in TU (period after TBTT when GO is present). - * 0 indicates that there is no CT window. - */ -struct iwl_mac_data_p2p_sta { - struct iwl_mac_data_sta sta; - __le32 ctwin; -} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */ - -/** - * struct iwl_mac_data_pibss - Pseudo IBSS config data - * @stats_interval: interval in TU between statistics notifications to host. - */ -struct iwl_mac_data_pibss { - __le32 stats_interval; -} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */ - -/* - * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC - * context. - * @is_disc_extended: if set to true, P2P Device discoverability is enabled on - * other channels as well. This should be to true only in case that the - * device is discoverable and there is an active GO. Note that setting this - * field when not needed, will increase the number of interrupts and have - * effect on the platform power, as this setting opens the Rx filters on - * all macs. - */ -struct iwl_mac_data_p2p_dev { - __le32 is_disc_extended; -} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */ - -/** - * enum iwl_mac_filter_flags - MAC context filter flags - * @MAC_FILTER_IN_PROMISC: accept all data frames - * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and - * control frames to the host - * @MAC_FILTER_ACCEPT_GRP: accept multicast frames - * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames - * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames - * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host - * (in station mode when associated) - * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames - * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames - * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host - */ -enum iwl_mac_filter_flags { - MAC_FILTER_IN_PROMISC = BIT(0), - MAC_FILTER_IN_CONTROL_AND_MGMT = BIT(1), - MAC_FILTER_ACCEPT_GRP = BIT(2), - MAC_FILTER_DIS_DECRYPT = BIT(3), - MAC_FILTER_DIS_GRP_DECRYPT = BIT(4), - MAC_FILTER_IN_BEACON = BIT(6), - MAC_FILTER_OUT_BCAST = BIT(8), - MAC_FILTER_IN_CRC32 = BIT(11), - MAC_FILTER_IN_PROBE_REQUEST = BIT(12), -}; - -/** - * enum iwl_mac_qos_flags - QoS flags - * @MAC_QOS_FLG_UPDATE_EDCA: ? - * @MAC_QOS_FLG_TGN: HT is enabled - * @MAC_QOS_FLG_TXOP_TYPE: ? - * - */ -enum iwl_mac_qos_flags { - MAC_QOS_FLG_UPDATE_EDCA = BIT(0), - MAC_QOS_FLG_TGN = BIT(1), - MAC_QOS_FLG_TXOP_TYPE = BIT(4), -}; - -/** - * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD - * @cw_min: Contention window, start value in numbers of slots. - * Should be a power-of-2, minus 1. Device's default is 0x0f. - * @cw_max: Contention window, max value in numbers of slots. - * Should be a power-of-2, minus 1. Device's default is 0x3f. - * @aifsn: Number of slots in Arbitration Interframe Space (before - * performing random backoff timing prior to Tx). Device default 1. - * @fifos_mask: FIFOs used by this MAC for this AC - * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0. - * - * One instance of this config struct for each of 4 EDCA access categories - * in struct iwl_qosparam_cmd. - * - * Device will automatically increase contention window by (2*CW) + 1 for each - * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW - * value, to cap the CW value. - */ -struct iwl_ac_qos { - __le16 cw_min; - __le16 cw_max; - u8 aifsn; - u8 fifos_mask; - __le16 edca_txop; -} __packed; /* AC_QOS_API_S_VER_2 */ - -/** - * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts - * ( MAC_CONTEXT_CMD = 0x28 ) - * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* - * @mac_type: one of &enum iwl_mac_types - * @tsf_id: TSF HW timer, one of &enum iwl_tsf_id - * @node_addr: MAC address - * @reserved_for_node_addr: reserved - * @bssid_addr: BSSID - * @reserved_for_bssid_addr: reserved - * @cck_rates: basic rates available for CCK - * @ofdm_rates: basic rates available for OFDM - * @protection_flags: combination of &enum iwl_mac_protection_flags - * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise - * @short_slot: 0x10 for enabling short slots, 0 otherwise - * @filter_flags: combination of &enum iwl_mac_filter_flags - * @qos_flags: from &enum iwl_mac_qos_flags - * @ac: one iwl_mac_qos configuration for each AC - */ -struct iwl_mac_ctx_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */ - __le32 mac_type; - __le32 tsf_id; - u8 node_addr[6]; - __le16 reserved_for_node_addr; - u8 bssid_addr[6]; - __le16 reserved_for_bssid_addr; - __le32 cck_rates; - __le32 ofdm_rates; - __le32 protection_flags; - __le32 cck_short_preamble; - __le32 short_slot; - __le32 filter_flags; - /* MAC_QOS_PARAM_API_S_VER_1 */ - __le32 qos_flags; - struct iwl_ac_qos ac[AC_NUM+1]; - /* MAC_CONTEXT_COMMON_DATA_API_S */ - union { - struct iwl_mac_data_ap ap; - struct iwl_mac_data_go go; - struct iwl_mac_data_sta sta; - struct iwl_mac_data_p2p_sta p2p_sta; - struct iwl_mac_data_p2p_dev p2p_dev; - struct iwl_mac_data_pibss pibss; - struct iwl_mac_data_ibss ibss; - }; -} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ - -static inline u32 iwl_mvm_reciprocal(u32 v) -{ - if (!v) - return 0; - return 0xFFFFFFFF / v; -} - -#define IWL_NONQOS_SEQ_GET 0x1 -#define IWL_NONQOS_SEQ_SET 0x2 -struct iwl_nonqos_seq_query_cmd { - __le32 get_set_flag; - __le32 mac_id_n_color; - __le16 value; - __le16 reserved; -} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */ - -#endif /* __fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h deleted file mode 100644 index 7da57ef2454e..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h +++ /dev/null @@ -1,531 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_power_h__ -#define __fw_api_power_h__ - -/* Power Management Commands, Responses, Notifications */ - -/** - * enum iwl_ltr_config_flags - masks for LTR config command flags - * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status - * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow - * memory access - * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR - * reg change - * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from - * D0 to D3 - * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register - * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register - * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD - * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short - * idle timeout - */ -enum iwl_ltr_config_flags { - LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), - LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), - LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), - LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), - LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), - LTR_CFG_FLAG_SW_SET_LONG = BIT(5), - LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), - LTR_CFG_FLAG_UPDATE_VALUES = BIT(7), -}; - -/** - * struct iwl_ltr_config_cmd_v1 - configures the LTR - * @flags: See &enum iwl_ltr_config_flags - * @static_long: static LTR Long register value. - * @static_short: static LTR Short register value. - */ -struct iwl_ltr_config_cmd_v1 { - __le32 flags; - __le32 static_long; - __le32 static_short; -} __packed; /* LTR_CAPABLE_API_S_VER_1 */ - -#define LTR_VALID_STATES_NUM 4 - -/** - * struct iwl_ltr_config_cmd - configures the LTR - * @flags: See &enum iwl_ltr_config_flags - * @static_long: static LTR Long register value. - * @static_short: static LTR Short register value. - * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order: - * TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES - * is set. - * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if - * %LTR_CFG_FLAG_UPDATE_VALUES is set. - */ -struct iwl_ltr_config_cmd { - __le32 flags; - __le32 static_long; - __le32 static_short; - __le32 ltr_cfg_values[LTR_VALID_STATES_NUM]; - __le32 ltr_short_idle_timeout; -} __packed; /* LTR_CAPABLE_API_S_VER_2 */ - -/* Radio LP RX Energy Threshold measured in dBm */ -#define POWER_LPRX_RSSI_THRESHOLD 75 -#define POWER_LPRX_RSSI_THRESHOLD_MAX 94 -#define POWER_LPRX_RSSI_THRESHOLD_MIN 30 - -/** - * enum iwl_power_flags - masks for power table command flags - * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off - * receiver and transmitter. '0' - does not allow. - * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, - * '1' Driver enables PM (use rest of parameters) - * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM, - * '1' PM could sleep over DTIM till listen Interval. - * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all - * access categories are both delivery and trigger enabled. - * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and - * PBW Snoozing enabled - * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask - * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable. - * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving - * detection enablement -*/ -enum iwl_power_flags { - POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), - POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1), - POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2), - POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5), - POWER_FLAGS_BT_SCO_ENA = BIT(8), - POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9), - POWER_FLAGS_LPRX_ENA_MSK = BIT(11), - POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK = BIT(12), -}; - -#define IWL_POWER_VEC_SIZE 5 - -/** - * struct iwl_powertable_cmd - legacy power command. Beside old API support this - * is used also with a new power API for device wide power settings. - * POWER_TABLE_CMD = 0x77 (command, has simple generic response) - * - * @flags: Power table command flags from POWER_FLAGS_* - * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. - * Minimum allowed:- 3 * DTIM. Keep alive period must be - * set regardless of power scheme or current power state. - * FW use this value also when PM is disabled. - * @debug_flags: debug flags - * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to - * PSM transition - legacy PM - * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to - * PSM transition - legacy PM - * @sleep_interval: not in use - * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag - * is set. For example, if it is required to skip over - * one DTIM, this value need to be set to 2 (DTIM periods). - * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. - * Default: 80dbm - */ -struct iwl_powertable_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_6 */ - __le16 flags; - u8 keep_alive_seconds; - u8 debug_flags; - __le32 rx_data_timeout; - __le32 tx_data_timeout; - __le32 sleep_interval[IWL_POWER_VEC_SIZE]; - __le32 skip_dtim_periods; - __le32 lprx_rssi_threshold; -} __packed; - -/** - * enum iwl_device_power_flags - masks for device power command flags - * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK: - * '1' Allow to save power by turning off - * receiver and transmitter. '0' - does not allow. -*/ -enum iwl_device_power_flags { - DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0), -}; - -/** - * struct iwl_device_power_cmd - device wide power command. - * DEVICE_POWER_CMD = 0x77 (command, has simple generic response) - * - * @flags: Power table command flags from &enum iwl_device_power_flags - * @reserved: reserved (padding) - */ -struct iwl_device_power_cmd { - /* PM_POWER_TABLE_CMD_API_S_VER_6 */ - __le16 flags; - __le16 reserved; -} __packed; - -/** - * struct iwl_mac_power_cmd - New power command containing uAPSD support - * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response) - * @id_and_color: MAC contex identifier, &enum iwl_mvm_id_and_color - * @flags: Power table command flags from POWER_FLAGS_* - * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec. - * Minimum allowed:- 3 * DTIM. Keep alive period must be - * set regardless of power scheme or current power state. - * FW use this value also when PM is disabled. - * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to - * PSM transition - legacy PM - * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to - * PSM transition - legacy PM - * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag - * is set. For example, if it is required to skip over - * one DTIM, this value need to be set to 2 (DTIM periods). - * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to - * PSM transition - uAPSD - * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to - * PSM transition - uAPSD - * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. - * Default: 80dbm - * @snooze_interval: Maximum time between attempts to retrieve buffered data - * from the AP [msec] - * @snooze_window: A window of time in which PBW snoozing insures that all - * packets received. It is also the minimum time from last - * received unicast RX packet, before client stops snoozing - * for data. [msec] - * @snooze_step: TBD - * @qndp_tid: TID client shall use for uAPSD QNDP triggers - * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for - * each corresponding AC. - * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. - * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct - * values. - * @heavy_tx_thld_packets: TX threshold measured in number of packets - * @heavy_rx_thld_packets: RX threshold measured in number of packets - * @heavy_tx_thld_percentage: TX threshold measured in load's percentage - * @heavy_rx_thld_percentage: RX threshold measured in load's percentage - * @limited_ps_threshold: (unused) - * @reserved: reserved (padding) - */ -struct iwl_mac_power_cmd { - /* CONTEXT_DESC_API_T_VER_1 */ - __le32 id_and_color; - - /* CLIENT_PM_POWER_TABLE_S_VER_1 */ - __le16 flags; - __le16 keep_alive_seconds; - __le32 rx_data_timeout; - __le32 tx_data_timeout; - __le32 rx_data_timeout_uapsd; - __le32 tx_data_timeout_uapsd; - u8 lprx_rssi_threshold; - u8 skip_dtim_periods; - __le16 snooze_interval; - __le16 snooze_window; - u8 snooze_step; - u8 qndp_tid; - u8 uapsd_ac_flags; - u8 uapsd_max_sp; - u8 heavy_tx_thld_packets; - u8 heavy_rx_thld_packets; - u8 heavy_tx_thld_percentage; - u8 heavy_rx_thld_percentage; - u8 limited_ps_threshold; - u8 reserved; -} __packed; - -/* - * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when - * associated AP is identified as improperly implementing uAPSD protocol. - * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78 - * @sta_id: index of station in uCode's station table - associated AP ID in - * this context. - */ -struct iwl_uapsd_misbehaving_ap_notif { - __le32 sta_id; - u8 mac_id; - u8 reserved[3]; -} __packed; - -/** - * struct iwl_reduce_tx_power_cmd - TX power reduction command - * REDUCE_TX_POWER_CMD = 0x9f - * @flags: (reserved for future implementation) - * @mac_context_id: id of the mac ctx for which we are reducing TX power. - * @pwr_restriction: TX power restriction in dBms. - */ -struct iwl_reduce_tx_power_cmd { - u8 flags; - u8 mac_context_id; - __le16 pwr_restriction; -} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */ - -enum iwl_dev_tx_power_cmd_mode { - IWL_TX_POWER_MODE_SET_MAC = 0, - IWL_TX_POWER_MODE_SET_DEVICE = 1, - IWL_TX_POWER_MODE_SET_CHAINS = 2, - IWL_TX_POWER_MODE_SET_ACK = 3, -}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */; - -#define IWL_NUM_CHAIN_LIMITS 2 -#define IWL_NUM_SUB_BANDS 5 - -/** - * struct iwl_dev_tx_power_cmd - TX power reduction command - * @set_mode: see &enum iwl_dev_tx_power_cmd_mode - * @mac_context_id: id of the mac ctx for which we are reducing TX power. - * @pwr_restriction: TX power restriction in 1/8 dBms. - * @dev_24: device TX power restriction in 1/8 dBms - * @dev_52_low: device TX power restriction upper band - low - * @dev_52_high: device TX power restriction upper band - high - * @per_chain_restriction: per chain restrictions - */ -struct iwl_dev_tx_power_cmd_v3 { - __le32 set_mode; - __le32 mac_context_id; - __le16 pwr_restriction; - __le16 dev_24; - __le16 dev_52_low; - __le16 dev_52_high; - __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS]; -} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */ - -#define IWL_DEV_MAX_TX_POWER 0x7FFF - -/** - * struct iwl_dev_tx_power_cmd - TX power reduction command - * @v3: version 3 of the command, embedded here for easier software handling - * @enable_ack_reduction: enable or disable close range ack TX power - * reduction. - * @reserved: reserved (padding) - */ -struct iwl_dev_tx_power_cmd { - /* v4 is just an extension of v3 - keep this here */ - struct iwl_dev_tx_power_cmd_v3 v3; - u8 enable_ack_reduction; - u8 reserved[3]; -} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */ - -#define IWL_NUM_GEO_PROFILES 3 -#define IWL_GEO_PER_CHAIN_SIZE 3 - -/** - * enum iwl_geo_per_chain_offset_operation - type of operation - * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW. - * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table. - */ -enum iwl_geo_per_chain_offset_operation { - IWL_PER_CHAIN_OFFSET_SET_TABLES, - IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE, -}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */ - -/** - * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT. - * @max_tx_power: maximum allowed tx power. - * @chain_a: tx power offset for chain a. - * @chain_b: tx power offset for chain b. - */ -struct iwl_per_chain_offset { - __le16 max_tx_power; - u8 chain_a; - u8 chain_b; -} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ - -struct iwl_per_chain_offset_group { - struct iwl_per_chain_offset lb; - struct iwl_per_chain_offset hb; -} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */ - -/** - * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd. - * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation - * @table: offset profile per band. - */ -struct iwl_geo_tx_power_profiles_cmd { - __le32 ops; - struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES]; -} __packed; /* GEO_TX_POWER_LIMIT */ - -/** - * struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd - * @profile_idx: current geo profile in use - */ -struct iwl_geo_tx_power_profiles_resp { - __le32 profile_idx; -} __packed; /* GEO_TX_POWER_LIMIT_RESP */ - -/** - * struct iwl_beacon_filter_cmd - * REPLY_BEACON_FILTERING_CMD = 0xd2 (command) - * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon - * to driver if delta in Energy values calculated for this and last - * passed beacon is greater than this threshold. Zero value means that - * the Energy change is ignored for beacon filtering, and beacon will - * not be forced to be sent to driver regardless of this delta. Typical - * energy delta 5dB. - * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state. - * Send beacon to driver if delta in Energy values calculated for this - * and last passed beacon is greater than this threshold. Zero value - * means that the Energy change is ignored for beacon filtering while in - * Roaming state, typical energy delta 1dB. - * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values - * calculated for current beacon is less than the threshold, use - * Roaming Energy Delta Threshold, otherwise use normal Energy Delta - * Threshold. Typical energy threshold is -72dBm. - * @bf_temp_threshold: This threshold determines the type of temperature - * filtering (Slow or Fast) that is selected (Units are in Celsuis): - * If the current temperature is above this threshold - Fast filter - * will be used, If the current temperature is below this threshold - - * Slow filter will be used. - * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values - * calculated for this and the last passed beacon is greater than this - * threshold. Zero value means that the temperature change is ignored for - * beacon filtering; beacons will not be forced to be sent to driver - * regardless of whether its temerature has been changed. - * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values - * calculated for this and the last passed beacon is greater than this - * threshold. Zero value means that the temperature change is ignored for - * beacon filtering; beacons will not be forced to be sent to driver - * regardless of whether its temerature has been changed. - * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled. - * @bf_debug_flag: beacon filtering debug configuration - * @bf_escape_timer: Send beacons to to driver if no beacons were passed - * for a specific period of time. Units: Beacons. - * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed - * for a longer period of time then this escape-timeout. Units: Beacons. - * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled. - */ -struct iwl_beacon_filter_cmd { - __le32 bf_energy_delta; - __le32 bf_roaming_energy_delta; - __le32 bf_roaming_state; - __le32 bf_temp_threshold; - __le32 bf_temp_fast_filter; - __le32 bf_temp_slow_filter; - __le32 bf_enable_beacon_filter; - __le32 bf_debug_flag; - __le32 bf_escape_timer; - __le32 ba_escape_timer; - __le32 ba_enable_beacon_abort; -} __packed; - -/* Beacon filtering and beacon abort */ -#define IWL_BF_ENERGY_DELTA_DEFAULT 5 -#define IWL_BF_ENERGY_DELTA_D0I3 20 -#define IWL_BF_ENERGY_DELTA_MAX 255 -#define IWL_BF_ENERGY_DELTA_MIN 0 - -#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1 -#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20 -#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255 -#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0 - -#define IWL_BF_ROAMING_STATE_DEFAULT 72 -#define IWL_BF_ROAMING_STATE_D0I3 72 -#define IWL_BF_ROAMING_STATE_MAX 255 -#define IWL_BF_ROAMING_STATE_MIN 0 - -#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112 -#define IWL_BF_TEMP_THRESHOLD_D0I3 112 -#define IWL_BF_TEMP_THRESHOLD_MAX 255 -#define IWL_BF_TEMP_THRESHOLD_MIN 0 - -#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1 -#define IWL_BF_TEMP_FAST_FILTER_D0I3 1 -#define IWL_BF_TEMP_FAST_FILTER_MAX 255 -#define IWL_BF_TEMP_FAST_FILTER_MIN 0 - -#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5 -#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20 -#define IWL_BF_TEMP_SLOW_FILTER_MAX 255 -#define IWL_BF_TEMP_SLOW_FILTER_MIN 0 - -#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1 - -#define IWL_BF_DEBUG_FLAG_DEFAULT 0 -#define IWL_BF_DEBUG_FLAG_D0I3 0 - -#define IWL_BF_ESCAPE_TIMER_DEFAULT 0 -#define IWL_BF_ESCAPE_TIMER_D0I3 0 -#define IWL_BF_ESCAPE_TIMER_MAX 1024 -#define IWL_BF_ESCAPE_TIMER_MIN 0 - -#define IWL_BA_ESCAPE_TIMER_DEFAULT 6 -#define IWL_BA_ESCAPE_TIMER_D0I3 6 -#define IWL_BA_ESCAPE_TIMER_D3 9 -#define IWL_BA_ESCAPE_TIMER_MAX 1024 -#define IWL_BA_ESCAPE_TIMER_MIN 0 - -#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1 - -#define IWL_BF_CMD_CONFIG(mode) \ - .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \ - .bf_roaming_energy_delta = \ - cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \ - .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \ - .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \ - .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \ - .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \ - .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \ - .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \ - .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode) - -#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT) -#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3) -#endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h deleted file mode 100644 index bdf1228d050b..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h +++ /dev/null @@ -1,413 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_rs_h__ -#define __fw_api_rs_h__ - -#include "fw-api-mac.h" - -/* - * These serve as indexes into - * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT]; - * TODO: avoid overlap between legacy and HT rates - */ -enum { - IWL_RATE_1M_INDEX = 0, - IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX, - IWL_RATE_2M_INDEX, - IWL_RATE_5M_INDEX, - IWL_RATE_11M_INDEX, - IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX, - IWL_RATE_6M_INDEX, - IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX, - IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX, - IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX, - IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX, - IWL_RATE_9M_INDEX, - IWL_RATE_12M_INDEX, - IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX, - IWL_RATE_18M_INDEX, - IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX, - IWL_RATE_24M_INDEX, - IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX, - IWL_RATE_36M_INDEX, - IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX, - IWL_RATE_48M_INDEX, - IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX, - IWL_RATE_54M_INDEX, - IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX, - IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX, - IWL_RATE_60M_INDEX, - IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX, - IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX, - IWL_RATE_MCS_8_INDEX, - IWL_RATE_MCS_9_INDEX, - IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, - IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, - IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1, -}; - -#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) - -/* fw API values for legacy bit rates, both OFDM and CCK */ -enum { - IWL_RATE_6M_PLCP = 13, - IWL_RATE_9M_PLCP = 15, - IWL_RATE_12M_PLCP = 5, - IWL_RATE_18M_PLCP = 7, - IWL_RATE_24M_PLCP = 9, - IWL_RATE_36M_PLCP = 11, - IWL_RATE_48M_PLCP = 1, - IWL_RATE_54M_PLCP = 3, - IWL_RATE_1M_PLCP = 10, - IWL_RATE_2M_PLCP = 20, - IWL_RATE_5M_PLCP = 55, - IWL_RATE_11M_PLCP = 110, - IWL_RATE_INVM_PLCP = -1, -}; - -/* - * rate_n_flags bit fields - * - * The 32-bit value has different layouts in the low 8 bites depending on the - * format. There are three formats, HT, VHT and legacy (11abg, with subformats - * for CCK and OFDM). - * - * High-throughput (HT) rate format - * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM) - * Very High-throughput (VHT) rate format - * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM) - * Legacy OFDM rate format for bits 7:0 - * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM) - * Legacy CCK rate format for bits 7:0: - * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK) - */ - -/* Bit 8: (1) HT format, (0) legacy or VHT format */ -#define RATE_MCS_HT_POS 8 -#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS) - -/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */ -#define RATE_MCS_CCK_POS 9 -#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS) - -/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */ -#define RATE_MCS_VHT_POS 26 -#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS) - - -/* - * High-throughput (HT) rate format for bits 7:0 - * - * 2-0: MCS rate base - * 0) 6 Mbps - * 1) 12 Mbps - * 2) 18 Mbps - * 3) 24 Mbps - * 4) 36 Mbps - * 5) 48 Mbps - * 6) 54 Mbps - * 7) 60 Mbps - * 4-3: 0) Single stream (SISO) - * 1) Dual stream (MIMO) - * 2) Triple stream (MIMO) - * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data - * (bits 7-6 are zero) - * - * Together the low 5 bits work out to the MCS index because we don't - * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two - * streams and 16-23 have three streams. We could also support MCS 32 - * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.) - */ -#define RATE_HT_MCS_RATE_CODE_MSK 0x7 -#define RATE_HT_MCS_NSS_POS 3 -#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS) - -/* Bit 10: (1) Use Green Field preamble */ -#define RATE_HT_MCS_GF_POS 10 -#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS) - -#define RATE_HT_MCS_INDEX_MSK 0x3f - -/* - * Very High-throughput (VHT) rate format for bits 7:0 - * - * 3-0: VHT MCS (0-9) - * 5-4: number of streams - 1: - * 0) Single stream (SISO) - * 1) Dual stream (MIMO) - * 2) Triple stream (MIMO) - */ - -/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */ -#define RATE_VHT_MCS_RATE_CODE_MSK 0xf -#define RATE_VHT_MCS_NSS_POS 4 -#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS) - -/* - * Legacy OFDM rate format for bits 7:0 - * - * 3-0: 0xD) 6 Mbps - * 0xF) 9 Mbps - * 0x5) 12 Mbps - * 0x7) 18 Mbps - * 0x9) 24 Mbps - * 0xB) 36 Mbps - * 0x1) 48 Mbps - * 0x3) 54 Mbps - * (bits 7-4 are 0) - * - * Legacy CCK rate format for bits 7:0: - * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK): - * - * 6-0: 10) 1 Mbps - * 20) 2 Mbps - * 55) 5.5 Mbps - * 110) 11 Mbps - * (bit 7 is 0) - */ -#define RATE_LEGACY_RATE_MSK 0xff - -/* Bit 10 - OFDM HE */ -#define RATE_MCS_OFDM_HE_POS 10 -#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS) - -/* - * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz - * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT - */ -#define RATE_MCS_CHAN_WIDTH_POS 11 -#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS) -#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS) - -/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */ -#define RATE_MCS_SGI_POS 13 -#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS) - -/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */ -#define RATE_MCS_ANT_POS 14 -#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS) -#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS) -#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS) -#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \ - RATE_MCS_ANT_B_MSK) -#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \ - RATE_MCS_ANT_C_MSK) -#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK -#define RATE_MCS_ANT_NUM 3 - -/* Bit 17: (0) SS, (1) SS*2 */ -#define RATE_MCS_STBC_POS 17 -#define RATE_MCS_STBC_MSK BIT(RATE_MCS_STBC_POS) - -/* Bit 18: OFDM-HE dual carrier mode */ -#define RATE_HE_DUAL_CARRIER_MODE 18 -#define RATE_HE_DUAL_CARRIER_MODE_MSK BIT(RATE_HE_DUAL_CARRIER_MODE) - -/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */ -#define RATE_MCS_BF_POS 19 -#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) - -/* - * Bit 20-21: HE guard interval and LTF type. - * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us, - * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us - */ -#define RATE_MCS_HE_GI_LTF_POS 20 -#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) - -/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ -#define RATE_MCS_HE_TYPE_POS 22 -#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) - -/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ -#define RATE_MCS_DUP_POS 24 -#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS) - -/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */ -#define RATE_MCS_LDPC_POS 27 -#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) - - -/* Link Quality definitions */ - -/* # entries in rate scale table to support Tx retries */ -#define LQ_MAX_RETRY_NUM 16 - -/* Link quality command flags bit fields */ - -/* Bit 0: (0) Don't use RTS (1) Use RTS */ -#define LQ_FLAG_USE_RTS_POS 0 -#define LQ_FLAG_USE_RTS_MSK (1 << LQ_FLAG_USE_RTS_POS) - -/* Bit 1-3: LQ command color. Used to match responses to LQ commands */ -#define LQ_FLAG_COLOR_POS 1 -#define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) -#define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ - LQ_FLAG_COLOR_POS) -#define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ - LQ_FLAG_COLOR_MSK) -#define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) - -/* Bit 4-5: Tx RTS BW Signalling - * (0) No RTS BW signalling - * (1) Static BW signalling - * (2) Dynamic BW signalling - */ -#define LQ_FLAG_RTS_BW_SIG_POS 4 -#define LQ_FLAG_RTS_BW_SIG_NONE (0 << LQ_FLAG_RTS_BW_SIG_POS) -#define LQ_FLAG_RTS_BW_SIG_STATIC (1 << LQ_FLAG_RTS_BW_SIG_POS) -#define LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << LQ_FLAG_RTS_BW_SIG_POS) - -/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection - * Dyanmic BW selection allows Tx with narrower BW then requested in rates - */ -#define LQ_FLAG_DYNAMIC_BW_POS 6 -#define LQ_FLAG_DYNAMIC_BW_MSK (1 << LQ_FLAG_DYNAMIC_BW_POS) - -/* Single Stream Tx Parameters (lq_cmd->ss_params) - * Flags to control a smart FW decision about whether BFER/STBC/SISO will be - * used for single stream Tx. - */ - -/* Bit 0-1: Max STBC streams allowed. Can be 0-3. - * (0) - No STBC allowed - * (1) - 2x1 STBC allowed (HT/VHT) - * (2) - 4x2 STBC allowed (HT/VHT) - * (3) - 3x2 STBC allowed (HT only) - * All our chips are at most 2 antennas so only (1) is valid for now. - */ -#define LQ_SS_STBC_ALLOWED_POS 0 -#define LQ_SS_STBC_ALLOWED_MSK (3 << LQ_SS_STBC_ALLOWED_MSK) - -/* 2x1 STBC is allowed */ -#define LQ_SS_STBC_1SS_ALLOWED (1 << LQ_SS_STBC_ALLOWED_POS) - -/* Bit 2: Beamformer (VHT only) is allowed */ -#define LQ_SS_BFER_ALLOWED_POS 2 -#define LQ_SS_BFER_ALLOWED (1 << LQ_SS_BFER_ALLOWED_POS) - -/* Bit 3: Force BFER or STBC for testing - * If this is set: - * If BFER is allowed then force the ucode to choose BFER else - * If STBC is allowed then force the ucode to choose STBC over SISO - */ -#define LQ_SS_FORCE_POS 3 -#define LQ_SS_FORCE (1 << LQ_SS_FORCE_POS) - -/* Bit 31: ss_params field is valid. Used for FW backward compatibility - * with other drivers which don't support the ss_params API yet - */ -#define LQ_SS_PARAMS_VALID_POS 31 -#define LQ_SS_PARAMS_VALID (1 << LQ_SS_PARAMS_VALID_POS) - -/** - * struct iwl_lq_cmd - link quality command - * @sta_id: station to update - * @reduced_tpc: reduced transmit power control value - * @control: not used - * @flags: combination of LQ_FLAG_* - * @mimo_delim: the first SISO index in rs_table, which separates MIMO - * and SISO rates - * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD). - * Should be ANT_[ABC] - * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC] - * @initial_rate_index: first index from rs_table per AC category - * @agg_time_limit: aggregation max time threshold in usec/100, meaning - * value of 100 is one usec. Range is 100 to 8000 - * @agg_disable_start_th: try-count threshold for starting aggregation. - * If a frame has higher try-count, it should not be selected for - * starting an aggregation sequence. - * @agg_frame_cnt_limit: max frame count in an aggregation. - * 0: no limit - * 1: no aggregation (one frame per aggregation) - * 2 - 0x3f: maximal number of frames (up to 3f == 63) - * @reserved2: reserved - * @rs_table: array of rates for each TX try, each is rate_n_flags, - * meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP - * @ss_params: single stream features. declare whether STBC or BFER are allowed. - */ -struct iwl_lq_cmd { - u8 sta_id; - u8 reduced_tpc; - __le16 control; - /* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */ - u8 flags; - u8 mimo_delim; - u8 single_stream_ant_msk; - u8 dual_stream_ant_msk; - u8 initial_rate_index[AC_NUM]; - /* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */ - __le16 agg_time_limit; - u8 agg_disable_start_th; - u8 agg_frame_cnt_limit; - __le32 reserved2; - __le32 rs_table[LQ_MAX_RETRY_NUM]; - __le32 ss_params; -}; /* LINK_QUALITY_CMD_API_S_VER_1 */ - -#endif /* __fw_api_rs_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h deleted file mode 100644 index 59038ade08d8..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ /dev/null @@ -1,574 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_rx_h__ -#define __fw_api_rx_h__ - -/* API for pre-9000 hardware */ - -#define IWL_RX_INFO_PHY_CNT 8 -#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1 -#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff -#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00 -#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000 -#define IWL_RX_INFO_ENERGY_ANT_A_POS 0 -#define IWL_RX_INFO_ENERGY_ANT_B_POS 8 -#define IWL_RX_INFO_ENERGY_ANT_C_POS 16 - -enum iwl_mac_context_info { - MAC_CONTEXT_INFO_NONE, - MAC_CONTEXT_INFO_GSCAN, -}; - -/** - * struct iwl_rx_phy_info - phy info - * (REPLY_RX_PHY_CMD = 0xc0) - * @non_cfg_phy_cnt: non configurable DSP phy data byte count - * @cfg_phy_cnt: configurable DSP phy data byte count - * @stat_id: configurable DSP phy data set ID - * @reserved1: reserved - * @system_timestamp: GP2 at on air rise - * @timestamp: TSF at on air rise - * @beacon_time_stamp: beacon at on-air rise - * @phy_flags: general phy flags: band, modulation, ... - * @channel: channel number - * @non_cfg_phy: for various implementations of non_cfg_phy - * @rate_n_flags: RATE_MCS_* - * @byte_count: frame's byte-count - * @frame_time: frame's time on the air, based on byte count and frame rate - * calculation - * @mac_active_msk: what MACs were active when the frame was received - * @mac_context_info: additional info on the context in which the frame was - * received as defined in &enum iwl_mac_context_info - * - * Before each Rx, the device sends this data. It contains PHY information - * about the reception of the packet. - */ -struct iwl_rx_phy_info { - u8 non_cfg_phy_cnt; - u8 cfg_phy_cnt; - u8 stat_id; - u8 reserved1; - __le32 system_timestamp; - __le64 timestamp; - __le32 beacon_time_stamp; - __le16 phy_flags; - __le16 channel; - __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; - __le32 rate_n_flags; - __le32 byte_count; - u8 mac_active_msk; - u8 mac_context_info; - __le16 frame_time; -} __packed; - -/* - * TCP offload Rx assist info - * - * bits 0:3 - reserved - * bits 4:7 - MIC CRC length - * bits 8:12 - MAC header length - * bit 13 - Padding indication - * bit 14 - A-AMSDU indication - * bit 15 - Offload enabled - */ -enum iwl_csum_rx_assist_info { - CSUM_RXA_RESERVED_MASK = 0x000f, - CSUM_RXA_MICSIZE_MASK = 0x00f0, - CSUM_RXA_HEADERLEN_MASK = 0x1f00, - CSUM_RXA_PADD = BIT(13), - CSUM_RXA_AMSDU = BIT(14), - CSUM_RXA_ENA = BIT(15) -}; - -/** - * struct iwl_rx_mpdu_res_start - phy info - * @byte_count: byte count of the frame - * @assist: see &enum iwl_csum_rx_assist_info - */ -struct iwl_rx_mpdu_res_start { - __le16 byte_count; - __le16 assist; -} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */ - -/** - * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags - * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band - * @RX_RES_PHY_FLAGS_MOD_CCK: modulation is CCK - * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short - * @RX_RES_PHY_FLAGS_NARROW_BAND: narrow band (<20 MHz) receive - * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received - * @RX_RES_PHY_FLAGS_ANTENNA_POS: antenna bit position - * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU - * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame - * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble - * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame - */ -enum iwl_rx_phy_flags { - RX_RES_PHY_FLAGS_BAND_24 = BIT(0), - RX_RES_PHY_FLAGS_MOD_CCK = BIT(1), - RX_RES_PHY_FLAGS_SHORT_PREAMBLE = BIT(2), - RX_RES_PHY_FLAGS_NARROW_BAND = BIT(3), - RX_RES_PHY_FLAGS_ANTENNA = (0x7 << 4), - RX_RES_PHY_FLAGS_ANTENNA_POS = 4, - RX_RES_PHY_FLAGS_AGG = BIT(7), - RX_RES_PHY_FLAGS_OFDM_HT = BIT(8), - RX_RES_PHY_FLAGS_OFDM_GF = BIT(9), - RX_RES_PHY_FLAGS_OFDM_VHT = BIT(10), -}; - -/** - * enum iwl_mvm_rx_status - written by fw for each Rx packet - * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine - * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow - * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found - * @RX_MPDU_RES_STATUS_KEY_VALID: key was valid - * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: key parameters were usable - * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed - * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked - * in the driver. - * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine - * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or - * alg = CCM only. Checks replay attack for 11w frames. Relevant only if - * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set. - * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted - * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP - * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM - * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP - * @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension - * algorithm - * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC - * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted - * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm - * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted - * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: extended IV (set with TKIP) - * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: key ID comparison done - * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame - * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw - * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors - * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask - * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift - * @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status - * @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2 - */ -enum iwl_mvm_rx_status { - RX_MPDU_RES_STATUS_CRC_OK = BIT(0), - RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1), - RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2), - RX_MPDU_RES_STATUS_KEY_VALID = BIT(3), - RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4), - RX_MPDU_RES_STATUS_ICV_OK = BIT(5), - RX_MPDU_RES_STATUS_MIC_OK = BIT(6), - RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), - RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR = BIT(7), - RX_MPDU_RES_STATUS_SEC_NO_ENC = (0 << 8), - RX_MPDU_RES_STATUS_SEC_WEP_ENC = (1 << 8), - RX_MPDU_RES_STATUS_SEC_CCM_ENC = (2 << 8), - RX_MPDU_RES_STATUS_SEC_TKIP_ENC = (3 << 8), - RX_MPDU_RES_STATUS_SEC_EXT_ENC = (4 << 8), - RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC = (6 << 8), - RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8), - RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8), - RX_MPDU_RES_STATUS_DEC_DONE = BIT(11), - RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13), - RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14), - RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15), - RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16), - RX_MPDU_RES_STATUS_CSUM_OK = BIT(17), - RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24, - RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT, - RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000), - RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000), -}; - -/* 9000 series API */ -enum iwl_rx_mpdu_mac_flags1 { - IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK = 0x03, - IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK = 0xf0, - /* shift should be 4, but the length is measured in 2-byte - * words, so shifting only by 3 gives a byte result - */ - IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT = 3, -}; - -enum iwl_rx_mpdu_mac_flags2 { - /* in 2-byte words */ - IWL_RX_MPDU_MFLG2_HDR_LEN_MASK = 0x1f, - IWL_RX_MPDU_MFLG2_PAD = 0x20, - IWL_RX_MPDU_MFLG2_AMSDU = 0x40, -}; - -enum iwl_rx_mpdu_amsdu_info { - IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f, - IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, -}; - -enum iwl_rx_l3_proto_values { - IWL_RX_L3_TYPE_NONE, - IWL_RX_L3_TYPE_IPV4, - IWL_RX_L3_TYPE_IPV4_FRAG, - IWL_RX_L3_TYPE_IPV6_FRAG, - IWL_RX_L3_TYPE_IPV6, - IWL_RX_L3_TYPE_IPV6_IN_IPV4, - IWL_RX_L3_TYPE_ARP, - IWL_RX_L3_TYPE_EAPOL, -}; - -#define IWL_RX_L3_PROTO_POS 4 - -enum iwl_rx_l3l4_flags { - IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0), - IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1), - IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2), - IWL_RX_L3L4_TCP_ACK = BIT(3), - IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS, - IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8, - IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12, -}; - -enum iwl_rx_mpdu_status { - IWL_RX_MPDU_STATUS_CRC_OK = BIT(0), - IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1), - IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2), - IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3), - IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4), - IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), - IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), - IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), - IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, - IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, - IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, - IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, - IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, - IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, - IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, - IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), - IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), - IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13), - IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14), - IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15), -}; - -enum iwl_rx_mpdu_hash_filter { - IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f, - IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0, -}; - -enum iwl_rx_mpdu_sta_id_flags { - IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f, - IWL_RX_MPDU_SIF_RRF_ABORT = 0x20, - IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0, -}; - -#define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f - -enum iwl_rx_mpdu_reorder_data { - IWL_RX_MPDU_REORDER_NSSN_MASK = 0x00000fff, - IWL_RX_MPDU_REORDER_SN_MASK = 0x00fff000, - IWL_RX_MPDU_REORDER_SN_SHIFT = 12, - IWL_RX_MPDU_REORDER_BAID_MASK = 0x7f000000, - IWL_RX_MPDU_REORDER_BAID_SHIFT = 24, - IWL_RX_MPDU_REORDER_BA_OLD_SN = 0x80000000, -}; - -enum iwl_rx_mpdu_phy_info { - IWL_RX_MPDU_PHY_AMPDU = BIT(5), - IWL_RX_MPDU_PHY_AMPDU_TOGGLE = BIT(6), - IWL_RX_MPDU_PHY_SHORT_PREAMBLE = BIT(7), - IWL_RX_MPDU_PHY_TSF_OVERLOAD = BIT(8), -}; - -enum iwl_rx_mpdu_mac_info { - IWL_RX_MPDU_PHY_MAC_INDEX_MASK = 0x0f, - IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, -}; - -/** - * struct iwl_rx_mpdu_desc - RX MPDU descriptor - */ -struct iwl_rx_mpdu_desc { - /* DW2 */ - /** - * @mpdu_len: MPDU length - */ - __le16 mpdu_len; - /** - * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1 - */ - u8 mac_flags1; - /** - * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2 - */ - u8 mac_flags2; - /* DW3 */ - /** - * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info - */ - u8 amsdu_info; - /** - * @phy_info: &enum iwl_rx_mpdu_phy_info - */ - __le16 phy_info; - /** - * @mac_phy_idx: MAC/PHY index - */ - u8 mac_phy_idx; - /* DW4 - carries csum data only when rpa_en == 1 */ - /** - * @raw_csum: raw checksum (alledgedly unreliable) - */ - __le16 raw_csum; - /** - * @l3l4_flags: &enum iwl_rx_l3l4_flags - */ - __le16 l3l4_flags; - /* DW5 */ - /** - * @status: &enum iwl_rx_mpdu_status - */ - __le16 status; - /** - * @hash_filter: hash filter value - */ - u8 hash_filter; - /** - * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags - */ - u8 sta_id_flags; - /* DW6 */ - /** - * @reorder_data: &enum iwl_rx_mpdu_reorder_data - */ - __le32 reorder_data; - /* DW7 - carries rss_hash only when rpa_en == 1 */ - /** - * @rss_hash: RSS hash value - */ - __le32 rss_hash; - /* DW8 - carries filter_match only when rpa_en == 1 */ - /** - * @filter_match: filter match value - */ - __le32 filter_match; - /* DW9 */ - /** - * @rate_n_flags: RX rate/flags encoding - */ - __le32 rate_n_flags; - /* DW10 */ - /** - * @energy_a: energy chain A - */ - u8 energy_a; - /** - * @energy_b: energy chain B - */ - u8 energy_b; - /** - * @channel: channel number - */ - u8 channel; - /** - * @mac_context: MAC context mask - */ - u8 mac_context; - /* DW11 */ - /** - * @gp2_on_air_rise: GP2 timer value on air rise (INA) - */ - __le32 gp2_on_air_rise; - /* DW12 & DW13 */ - /** - * @tsf_on_air_rise: - * TSF value on air rise (INA), only valid if - * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set - */ - __le64 tsf_on_air_rise; -} __packed; - -struct iwl_frame_release { - u8 baid; - u8 reserved; - __le16 nssn; -}; - -enum iwl_rss_hash_func_en { - IWL_RSS_HASH_TYPE_IPV4_TCP, - IWL_RSS_HASH_TYPE_IPV4_UDP, - IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, - IWL_RSS_HASH_TYPE_IPV6_TCP, - IWL_RSS_HASH_TYPE_IPV6_UDP, - IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, -}; - -#define IWL_RSS_HASH_KEY_CNT 10 -#define IWL_RSS_INDIRECTION_TABLE_SIZE 128 -#define IWL_RSS_ENABLE 1 - -/** - * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration - * - * @flags: 1 - enable, 0 - disable - * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en - * @reserved: reserved - * @secret_key: 320 bit input of random key configuration from driver - * @indirection_table: indirection table - */ -struct iwl_rss_config_cmd { - __le32 flags; - u8 hash_mask; - u8 reserved[3]; - __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; - u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; -} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ - -#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128 -#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 -#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf - -/** - * struct iwl_rxq_sync_cmd - RXQ notification trigger - * - * @flags: flags of the notification. bit 0:3 are the sender queue - * @rxq_mask: rx queues to send the notification on - * @count: number of bytes in payload, should be DWORD aligned - * @payload: data to send to rx queues - */ -struct iwl_rxq_sync_cmd { - __le32 flags; - __le32 rxq_mask; - __le32 count; - u8 payload[]; -} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ - -/** - * struct iwl_rxq_sync_notification - Notification triggered by RXQ - * sync command - * - * @count: number of bytes in payload - * @payload: data to send to rx queues - */ -struct iwl_rxq_sync_notification { - __le32 count; - u8 payload[]; -} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ - -/** - * enum iwl_mvm_rxq_notif_type - Internal message identifier - * - * @IWL_MVM_RXQ_EMPTY: empty sync notification - * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA - */ -enum iwl_mvm_rxq_notif_type { - IWL_MVM_RXQ_EMPTY, - IWL_MVM_RXQ_NOTIF_DEL_BA, -}; - -/** - * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent - * in &iwl_rxq_sync_cmd. Should be DWORD aligned. - * FW is agnostic to the payload, so there are no endianity requirements. - * - * @type: value from &iwl_mvm_rxq_notif_type - * @sync: ctrl path is waiting for all notifications to be received - * @cookie: internal cookie to identify old notifications - * @data: payload - */ -struct iwl_mvm_internal_rxq_notif { - u16 type; - u16 sync; - u32 cookie; - u8 data[]; -} __packed; - -/** - * enum iwl_mvm_pm_event - type of station PM event - * @IWL_MVM_PM_EVENT_AWAKE: station woke up - * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep - * @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger - * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll - */ -enum iwl_mvm_pm_event { - IWL_MVM_PM_EVENT_AWAKE, - IWL_MVM_PM_EVENT_ASLEEP, - IWL_MVM_PM_EVENT_UAPSD, - IWL_MVM_PM_EVENT_PS_POLL, -}; /* PEER_PM_NTFY_API_E_VER_1 */ - -/** - * struct iwl_mvm_pm_state_notification - station PM state notification - * @sta_id: station ID of the station changing state - * @type: the new powersave state, see &enum iwl_mvm_pm_event - */ -struct iwl_mvm_pm_state_notification { - u8 sta_id; - u8 type; - /* private: */ - __le16 reserved; -} __packed; /* PEER_PM_NTFY_API_S_VER_1 */ - -#endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h deleted file mode 100644 index 1cd7cc087936..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h +++ /dev/null @@ -1,792 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_scan_h__ -#define __fw_api_scan_h__ - -/* Scan Commands, Responses, Notifications */ - -/* Max number of IEs for direct SSID scans in a command */ -#define PROBE_OPTION_MAX 20 - -/** - * struct iwl_ssid_ie - directed scan network information element - * - * Up to 20 of these may appear in REPLY_SCAN_CMD, - * selected by "type" bit field in struct iwl_scan_channel; - * each channel may select different ssids from among the 20 entries. - * SSID IEs get transmitted in reverse order of entry. - * - * @id: element ID - * @len: element length - * @ssid: element (SSID) data - */ -struct iwl_ssid_ie { - u8 id; - u8 len; - u8 ssid[IEEE80211_MAX_SSID_LEN]; -} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */ - -/* scan offload */ -#define IWL_SCAN_MAX_BLACKLIST_LEN 64 -#define IWL_SCAN_SHORT_BLACKLIST_LEN 16 -#define IWL_SCAN_MAX_PROFILES 11 -#define SCAN_OFFLOAD_PROBE_REQ_SIZE 512 - -/* Default watchdog (in MS) for scheduled scan iteration */ -#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000) - -#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1) -#define CAN_ABORT_STATUS 1 - -#define IWL_FULL_SCAN_MULTIPLIER 5 -#define IWL_FAST_SCHED_SCAN_ITERATIONS 3 -#define IWL_MAX_SCHED_SCAN_PLANS 2 - -enum scan_framework_client { - SCAN_CLIENT_SCHED_SCAN = BIT(0), - SCAN_CLIENT_NETDETECT = BIT(1), - SCAN_CLIENT_ASSET_TRACKING = BIT(2), -}; - -/** - * struct iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S - * @ssid: MAC address to filter out - * @reported_rssi: AP rssi reported to the host - * @client_bitmap: clients ignore this entry - enum scan_framework_client - */ -struct iwl_scan_offload_blacklist { - u8 ssid[ETH_ALEN]; - u8 reported_rssi; - u8 client_bitmap; -} __packed; - -enum iwl_scan_offload_network_type { - IWL_NETWORK_TYPE_BSS = 1, - IWL_NETWORK_TYPE_IBSS = 2, - IWL_NETWORK_TYPE_ANY = 3, -}; - -enum iwl_scan_offload_band_selection { - IWL_SCAN_OFFLOAD_SELECT_2_4 = 0x4, - IWL_SCAN_OFFLOAD_SELECT_5_2 = 0x8, - IWL_SCAN_OFFLOAD_SELECT_ANY = 0xc, -}; - -/** - * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S - * @ssid_index: index to ssid list in fixed part - * @unicast_cipher: encryption algorithm to match - bitmap - * @auth_alg: authentication algorithm to match - bitmap - * @network_type: enum iwl_scan_offload_network_type - * @band_selection: enum iwl_scan_offload_band_selection - * @client_bitmap: clients waiting for match - enum scan_framework_client - * @reserved: reserved - */ -struct iwl_scan_offload_profile { - u8 ssid_index; - u8 unicast_cipher; - u8 auth_alg; - u8 network_type; - u8 band_selection; - u8 client_bitmap; - u8 reserved[2]; -} __packed; - -/** - * struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1 - * @profiles: profiles to search for match - * @blacklist_len: length of blacklist - * @num_profiles: num of profiles in the list - * @match_notify: clients waiting for match found notification - * @pass_match: clients waiting for the results - * @active_clients: active clients bitmap - enum scan_framework_client - * @any_beacon_notify: clients waiting for match notification without match - * @reserved: reserved - */ -struct iwl_scan_offload_profile_cfg { - struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES]; - u8 blacklist_len; - u8 num_profiles; - u8 match_notify; - u8 pass_match; - u8 active_clients; - u8 any_beacon_notify; - u8 reserved[2]; -} __packed; - -/** - * struct iwl_scan_schedule_lmac - schedule of scan offload - * @delay: delay between iterations, in seconds. - * @iterations: num of scan iterations - * @full_scan_mul: number of partial scans before each full scan - */ -struct iwl_scan_schedule_lmac { - __le16 delay; - u8 iterations; - u8 full_scan_mul; -} __packed; /* SCAN_SCHEDULE_API_S */ - -enum iwl_scan_offload_complete_status { - IWL_SCAN_OFFLOAD_COMPLETED = 1, - IWL_SCAN_OFFLOAD_ABORTED = 2, -}; - -enum iwl_scan_ebs_status { - IWL_SCAN_EBS_SUCCESS, - IWL_SCAN_EBS_FAILED, - IWL_SCAN_EBS_CHAN_NOT_FOUND, - IWL_SCAN_EBS_INACTIVE, -}; - -/** - * struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S - * @tx_flags: combination of TX_CMD_FLG_* - * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is - * cleared. Combination of RATE_MCS_* - * @sta_id: index of destination station in FW station table - * @reserved: for alignment and future use - */ -struct iwl_scan_req_tx_cmd { - __le32 tx_flags; - __le32 rate_n_flags; - u8 sta_id; - u8 reserved[3]; -} __packed; - -enum iwl_scan_channel_flags_lmac { - IWL_UNIFIED_SCAN_CHANNEL_FULL = BIT(27), - IWL_UNIFIED_SCAN_CHANNEL_PARTIAL = BIT(28), -}; - -/** - * struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2 - * @flags: bits 1-20: directed scan to i'th ssid - * other bits &enum iwl_scan_channel_flags_lmac - * @channel_num: channel number 1-13 etc - * @iter_count: scan iteration on this channel - * @iter_interval: interval in seconds between iterations on one channel - */ -struct iwl_scan_channel_cfg_lmac { - __le32 flags; - __le16 channel_num; - __le16 iter_count; - __le32 iter_interval; -} __packed; - -/* - * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1 - * @offset: offset in the data block - * @len: length of the segment - */ -struct iwl_scan_probe_segment { - __le16 offset; - __le16 len; -} __packed; - -/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2 - * @mac_header: first (and common) part of the probe - * @band_data: band specific data - * @common_data: last (and common) part of the probe - * @buf: raw data block - */ -struct iwl_scan_probe_req { - struct iwl_scan_probe_segment mac_header; - struct iwl_scan_probe_segment band_data[2]; - struct iwl_scan_probe_segment common_data; - u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE]; -} __packed; - -enum iwl_scan_channel_flags { - IWL_SCAN_CHANNEL_FLAG_EBS = BIT(0), - IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE = BIT(1), - IWL_SCAN_CHANNEL_FLAG_CACHE_ADD = BIT(2), -}; - -/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S - * @flags: enum iwl_scan_channel_flags - * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is - * involved. - * 1 - EBS is disabled. - * 2 - every second scan will be full scan(and so on). - */ -struct iwl_scan_channel_opt { - __le16 flags; - __le16 non_ebs_ratio; -} __packed; - -/** - * enum iwl_mvm_lmac_scan_flags - LMAC scan flags - * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses - * without filtering. - * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels - * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan - * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification - * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS: multiple SSID matching - * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented - * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report - * and DS parameter set IEs into probe requests. - * @IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels - * 1, 6 and 11. - * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches - */ -enum iwl_mvm_lmac_scan_flags { - IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), - IWL_MVM_LMAC_SCAN_FLAG_PASSIVE = BIT(1), - IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = BIT(2), - IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = BIT(3), - IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), - IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), - IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), - IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = BIT(7), - IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), -}; - -enum iwl_scan_priority { - IWL_SCAN_PRIORITY_LOW, - IWL_SCAN_PRIORITY_MEDIUM, - IWL_SCAN_PRIORITY_HIGH, -}; - -enum iwl_scan_priority_ext { - IWL_SCAN_PRIORITY_EXT_0_LOWEST, - IWL_SCAN_PRIORITY_EXT_1, - IWL_SCAN_PRIORITY_EXT_2, - IWL_SCAN_PRIORITY_EXT_3, - IWL_SCAN_PRIORITY_EXT_4, - IWL_SCAN_PRIORITY_EXT_5, - IWL_SCAN_PRIORITY_EXT_6, - IWL_SCAN_PRIORITY_EXT_7_HIGHEST, -}; - -/** - * struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1 - * @reserved1: for alignment and future use - * @n_channels: num of channels to scan - * @active_dwell: dwell time for active channels - * @passive_dwell: dwell time for passive channels - * @fragmented_dwell: dwell time for fragmented passive scan - * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases) - * @reserved2: for alignment and future use - * @rx_chain_select: PHY_RX_CHAIN_* flags - * @scan_flags: &enum iwl_mvm_lmac_scan_flags - * @max_out_time: max time (in TU) to be out of associated channel - * @suspend_time: pause scan this long (TUs) when returning to service channel - * @flags: RXON flags - * @filter_flags: RXON filter - * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz - * @direct_scan: list of SSIDs for directed active scan - * @scan_prio: enum iwl_scan_priority - * @iter_num: number of scan iterations - * @delay: delay in seconds before first iteration - * @schedule: two scheduling plans. The first one is finite, the second one can - * be infinite. - * @channel_opt: channel optimization options, for full and partial scan - * @data: channel configuration and probe request packet. - */ -struct iwl_scan_req_lmac { - /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */ - __le32 reserved1; - u8 n_channels; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; - u8 extended_dwell; - u8 reserved2; - __le16 rx_chain_select; - __le32 scan_flags; - __le32 max_out_time; - __le32 suspend_time; - /* RX_ON_FLAGS_API_S_VER_1 */ - __le32 flags; - __le32 filter_flags; - struct iwl_scan_req_tx_cmd tx_cmd[2]; - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; - __le32 scan_prio; - /* SCAN_REQ_PERIODIC_PARAMS_API_S */ - __le32 iter_num; - __le32 delay; - struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS]; - struct iwl_scan_channel_opt channel_opt[2]; - u8 data[]; -} __packed; - -/** - * struct iwl_scan_results_notif - scan results for one channel - - * SCAN_RESULT_NTF_API_S_VER_3 - * @channel: which channel the results are from - * @band: 0 for 5.2 GHz, 1 for 2.4 GHz - * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request - * @num_probe_not_sent: # of request that weren't sent due to not enough time - * @duration: duration spent in channel, in usecs - */ -struct iwl_scan_results_notif { - u8 channel; - u8 band; - u8 probe_status; - u8 num_probe_not_sent; - __le32 duration; -} __packed; - -/** - * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels) - * SCAN_COMPLETE_NTF_API_S_VER_3 - * @scanned_channels: number of channels scanned (and number of valid results) - * @status: one of SCAN_COMP_STATUS_* - * @bt_status: BT on/off status - * @last_channel: last channel that was scanned - * @tsf_low: TSF timer (lower half) in usecs - * @tsf_high: TSF timer (higher half) in usecs - * @results: an array of scan results, only "scanned_channels" of them are valid - */ -struct iwl_lmac_scan_complete_notif { - u8 scanned_channels; - u8 status; - u8 bt_status; - u8 last_channel; - __le32 tsf_low; - __le32 tsf_high; - struct iwl_scan_results_notif results[]; -} __packed; - -/** - * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 - * @last_schedule_line: last schedule line executed (fast or regular) - * @last_schedule_iteration: last scan iteration executed before scan abort - * @status: &enum iwl_scan_offload_complete_status - * @ebs_status: EBS success status &enum iwl_scan_ebs_status - * @time_after_last_iter: time in seconds elapsed after last iteration - * @reserved: reserved - */ -struct iwl_periodic_scan_complete { - u8 last_schedule_line; - u8 last_schedule_iteration; - u8 status; - u8 ebs_status; - __le32 time_after_last_iter; - __le32 reserved; -} __packed; - -/* UMAC Scan API */ - -/* The maximum of either of these cannot exceed 8, because we use an - * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). - */ -#define IWL_MVM_MAX_UMAC_SCANS 8 -#define IWL_MVM_MAX_LMAC_SCANS 1 - -enum scan_config_flags { - SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), - SCAN_CONFIG_FLAG_DEACTIVATE = BIT(1), - SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = BIT(2), - SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = BIT(3), - SCAN_CONFIG_FLAG_SET_TX_CHAINS = BIT(8), - SCAN_CONFIG_FLAG_SET_RX_CHAINS = BIT(9), - SCAN_CONFIG_FLAG_SET_AUX_STA_ID = BIT(10), - SCAN_CONFIG_FLAG_SET_ALL_TIMES = BIT(11), - SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = BIT(12), - SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = BIT(13), - SCAN_CONFIG_FLAG_SET_LEGACY_RATES = BIT(14), - SCAN_CONFIG_FLAG_SET_MAC_ADDR = BIT(15), - SCAN_CONFIG_FLAG_SET_FRAGMENTED = BIT(16), - SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = BIT(17), - SCAN_CONFIG_FLAG_SET_CAM_MODE = BIT(18), - SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = BIT(19), - SCAN_CONFIG_FLAG_SET_PROMISC_MODE = BIT(20), - SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = BIT(21), - SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED = BIT(22), - SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED = BIT(23), - - /* Bits 26-31 are for num of channels in channel_array */ -#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26) -}; - -enum scan_config_rates { - /* OFDM basic rates */ - SCAN_CONFIG_RATE_6M = BIT(0), - SCAN_CONFIG_RATE_9M = BIT(1), - SCAN_CONFIG_RATE_12M = BIT(2), - SCAN_CONFIG_RATE_18M = BIT(3), - SCAN_CONFIG_RATE_24M = BIT(4), - SCAN_CONFIG_RATE_36M = BIT(5), - SCAN_CONFIG_RATE_48M = BIT(6), - SCAN_CONFIG_RATE_54M = BIT(7), - /* CCK basic rates */ - SCAN_CONFIG_RATE_1M = BIT(8), - SCAN_CONFIG_RATE_2M = BIT(9), - SCAN_CONFIG_RATE_5M = BIT(10), - SCAN_CONFIG_RATE_11M = BIT(11), - - /* Bits 16-27 are for supported rates */ -#define SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16) -}; - -enum iwl_channel_flags { - IWL_CHANNEL_FLAG_EBS = BIT(0), - IWL_CHANNEL_FLAG_ACCURATE_EBS = BIT(1), - IWL_CHANNEL_FLAG_EBS_ADD = BIT(2), - IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = BIT(3), -}; - -/** - * struct iwl_scan_dwell - * @active: default dwell time for active scan - * @passive: default dwell time for passive scan - * @fragmented: default dwell time for fragmented scan - * @extended: default dwell time for channels 1, 6 and 11 - */ -struct iwl_scan_dwell { - u8 active; - u8 passive; - u8 fragmented; - u8 extended; -} __packed; - -/** - * struct iwl_scan_config - * @flags: enum scan_config_flags - * @tx_chains: valid_tx antenna - ANT_* definitions - * @rx_chains: valid_rx antenna - ANT_* definitions - * @legacy_rates: default legacy rates - enum scan_config_rates - * @out_of_channel_time: default max out of serving channel time - * @suspend_time: default max suspend time - * @dwell: dwells for the scan - * @mac_addr: default mac address to be used in probes - * @bcast_sta_id: the index of the station in the fw - * @channel_flags: default channel flags - enum iwl_channel_flags - * scan_config_channel_flag - * @channel_array: default supported channels - */ -struct iwl_scan_config_v1 { - __le32 flags; - __le32 tx_chains; - __le32 rx_chains; - __le32 legacy_rates; - __le32 out_of_channel_time; - __le32 suspend_time; - struct iwl_scan_dwell dwell; - u8 mac_addr[ETH_ALEN]; - u8 bcast_sta_id; - u8 channel_flags; - u8 channel_array[]; -} __packed; /* SCAN_CONFIG_DB_CMD_API_S */ - -#define SCAN_TWO_LMACS 2 - -struct iwl_scan_config { - __le32 flags; - __le32 tx_chains; - __le32 rx_chains; - __le32 legacy_rates; - __le32 out_of_channel_time[SCAN_TWO_LMACS]; - __le32 suspend_time[SCAN_TWO_LMACS]; - struct iwl_scan_dwell dwell; - u8 mac_addr[ETH_ALEN]; - u8 bcast_sta_id; - u8 channel_flags; - u8 channel_array[]; -} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */ - -/** - * enum iwl_umac_scan_flags - UMAC scan flags - * @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request - * can be preempted by other scan requests with higher priority. - * The low priority scan will be resumed when the higher proirity scan is - * completed. - * @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver - * when scan starts. - */ -enum iwl_umac_scan_flags { - IWL_UMAC_SCAN_FLAG_PREEMPTIVE = BIT(0), - IWL_UMAC_SCAN_FLAG_START_NOTIF = BIT(1), -}; - -enum iwl_umac_scan_uid_offsets { - IWL_UMAC_SCAN_UID_TYPE_OFFSET = 0, - IWL_UMAC_SCAN_UID_SEQ_OFFSET = 8, -}; - -enum iwl_umac_scan_general_flags { - IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC = BIT(0), - IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT = BIT(1), - IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL = BIT(2), - IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE = BIT(3), - IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = BIT(4), - IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = BIT(5), - IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = BIT(6), - IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = BIT(7), - IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = BIT(8), - IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), - IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), - IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), -}; - -/** - * struct iwl_scan_channel_cfg_umac - * @flags: bitmap - 0-19: directed scan to i'th ssid. - * @channel_num: channel number 1-13 etc. - * @iter_count: repetition count for the channel. - * @iter_interval: interval between two scan iterations on one channel. - */ -struct iwl_scan_channel_cfg_umac { - __le32 flags; - u8 channel_num; - u8 iter_count; - __le16 iter_interval; -} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */ - -/** - * struct iwl_scan_umac_schedule - * @interval: interval in seconds between scan iterations - * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop - * @reserved: for alignment and future use - */ -struct iwl_scan_umac_schedule { - __le16 interval; - u8 iter_count; - u8 reserved; -} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */ - -/** - * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command - * parameters following channels configuration array. - * @schedule: two scheduling plans. - * @delay: delay in TUs before starting the first scan iteration - * @reserved: for future use and alignment - * @preq: probe request with IEs blocks - * @direct_scan: list of SSIDs for directed active scan - */ -struct iwl_scan_req_umac_tail { - /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */ - struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS]; - __le16 delay; - __le16 reserved; - /* SCAN_PROBE_PARAMS_API_S_VER_1 */ - struct iwl_scan_probe_req preq; - struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; -} __packed; - -/** - * struct iwl_scan_req_umac - * @flags: &enum iwl_umac_scan_flags - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @ooc_priority: out of channel priority - &enum iwl_scan_priority - * @general_flags: &enum iwl_umac_scan_general_flags - * @reserved2: for future use and alignment - * @scan_start_mac_id: report the scan start TSF time according to this mac TSF - * @extended_dwell: dwell time for channels 1, 6 and 11 - * @active_dwell: dwell time for active scan - * @passive_dwell: dwell time for passive scan - * @fragmented_dwell: dwell time for fragmented passive scan - * @max_out_time: max out of serving channel time, per LMAC - for CDB there - * are 2 LMACs - * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs - * @scan_priority: scan internal prioritization &enum iwl_scan_priority - * @channel_flags: &enum iwl_scan_channel_flags - * @n_channels: num of channels in scan request - * @reserved: for future use and alignment - * @data: &struct iwl_scan_channel_cfg_umac and - * &struct iwl_scan_req_umac_tail - */ -struct iwl_scan_req_umac { - __le32 flags; - __le32 uid; - __le32 ooc_priority; - /* SCAN_GENERAL_PARAMS_API_S_VER_4 */ - __le16 general_flags; - u8 reserved2; - u8 scan_start_mac_id; - u8 extended_dwell; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; - union { - struct { - __le32 max_out_time; - __le32 suspend_time; - __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved; - u8 data[]; - } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ - struct { - __le32 max_out_time[SCAN_TWO_LMACS]; - __le32 suspend_time[SCAN_TWO_LMACS]; - __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ - u8 channel_flags; - u8 n_channels; - __le16 reserved; - u8 data[]; - } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ - }; -} __packed; - -#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) -#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32)) - -/** - * struct iwl_umac_scan_abort - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @flags: reserved - */ -struct iwl_umac_scan_abort { - __le32 uid; - __le32 flags; -} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */ - -/** - * struct iwl_umac_scan_complete - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @last_schedule: last scheduling line - * @last_iter: last scan iteration number - * @status: &enum iwl_scan_offload_complete_status - * @ebs_status: &enum iwl_scan_ebs_status - * @time_from_last_iter: time elapsed from last iteration - * @reserved: for future use - */ -struct iwl_umac_scan_complete { - __le32 uid; - u8 last_schedule; - u8 last_iter; - u8 status; - u8 ebs_status; - __le32 time_from_last_iter; - __le32 reserved; -} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */ - -#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5 -/** - * struct iwl_scan_offload_profile_match - match information - * @bssid: matched bssid - * @reserved: reserved - * @channel: channel where the match occurred - * @energy: energy - * @matching_feature: feature matches - * @matching_channels: bitmap of channels that matched, referencing - * the channels passed in tue scan offload request - */ -struct iwl_scan_offload_profile_match { - u8 bssid[ETH_ALEN]; - __le16 reserved; - u8 channel; - u8 energy; - u8 matching_feature; - u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN]; -} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */ - -/** - * struct iwl_scan_offload_profiles_query - match results query response - * @matched_profiles: bitmap of matched profiles, referencing the - * matches passed in the scan offload request - * @last_scan_age: age of the last offloaded scan - * @n_scans_done: number of offloaded scans done - * @gp2_d0u: GP2 when D0U occurred - * @gp2_invoked: GP2 when scan offload was invoked - * @resume_while_scanning: not used - * @self_recovery: obsolete - * @reserved: reserved - * @matches: array of match information, one for each match - */ -struct iwl_scan_offload_profiles_query { - __le32 matched_profiles; - __le32 last_scan_age; - __le32 n_scans_done; - __le32 gp2_d0u; - __le32 gp2_invoked; - u8 resume_while_scanning; - u8 self_recovery; - __le16 reserved; - struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES]; -} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */ - -/** - * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration - * @uid: scan id, &enum iwl_umac_scan_uid_offsets - * @scanned_channels: number of channels scanned and number of valid elements in - * results array - * @status: one of SCAN_COMP_STATUS_* - * @bt_status: BT on/off status - * @last_channel: last channel that was scanned - * @start_tsf: TSF timer in usecs of the scan start time for the mac specified - * in &struct iwl_scan_req_umac. - * @results: array of scan results, length in @scanned_channels - */ -struct iwl_umac_scan_iter_complete_notif { - __le32 uid; - u8 scanned_channels; - u8 status; - u8 bt_status; - u8 last_channel; - __le64 start_tsf; - struct iwl_scan_results_notif results[]; -} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */ - -#endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h deleted file mode 100644 index 81f0a3463bac..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ /dev/null @@ -1,578 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_sta_h__ -#define __fw_api_sta_h__ - -/** - * enum iwl_sta_flags - flags for the ADD_STA host command - * @STA_FLG_REDUCED_TX_PWR_CTRL: reduced TX power (control frames) - * @STA_FLG_REDUCED_TX_PWR_DATA: reduced TX power (data frames) - * @STA_FLG_DISABLE_TX: set if TX should be disabled - * @STA_FLG_PS: set if STA is in Power Save - * @STA_FLG_INVALID: set if STA is invalid - * @STA_FLG_DLP_EN: Direct Link Protocol is enabled - * @STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs - * @STA_FLG_DRAIN_FLOW: drain flow - * @STA_FLG_PAN: STA is for PAN interface - * @STA_FLG_CLASS_AUTH: station is authenticated - * @STA_FLG_CLASS_ASSOC: station is associated - * @STA_FLG_RTS_MIMO_PROT: station requires RTS MIMO protection (dynamic SMPS) - * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU (mask) - * @STA_FLG_MAX_AGG_SIZE_SHIFT: maximal size for A-MPDU (bit shift) - * @STA_FLG_MAX_AGG_SIZE_8K: maximal size for A-MPDU (8k supported) - * @STA_FLG_MAX_AGG_SIZE_16K: maximal size for A-MPDU (16k supported) - * @STA_FLG_MAX_AGG_SIZE_32K: maximal size for A-MPDU (32k supported) - * @STA_FLG_MAX_AGG_SIZE_64K: maximal size for A-MPDU (64k supported) - * @STA_FLG_MAX_AGG_SIZE_128K: maximal size for A-MPDU (128k supported) - * @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported) - * @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported) - * @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported) - * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation - * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is - * initialised by driver and can be updated by fw upon reception of - * action frames that can change the channel width. When cleared the fw - * will send all the frames in 20MHz even when FAT channel is requested. - * @STA_FLG_FAT_EN_20MHZ: no wide channels are supported, only 20 MHz - * @STA_FLG_FAT_EN_40MHZ: wide channels up to 40 MHz supported - * @STA_FLG_FAT_EN_80MHZ: wide channels up to 80 MHz supported - * @STA_FLG_FAT_EN_160MHZ: wide channels up to 160 MHz supported - * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the - * driver and can be updated by fw upon reception of action frames. - * @STA_FLG_MIMO_EN_SISO: no support for MIMO - * @STA_FLG_MIMO_EN_MIMO2: 2 streams supported - * @STA_FLG_MIMO_EN_MIMO3: 3 streams supported - * @STA_FLG_MFP_EN: Management Frame Protection - * @STA_FLG_AGG_MPDU_DENS_MSK: A-MPDU density (mask) - * @STA_FLG_AGG_MPDU_DENS_SHIFT: A-MPDU density (bit shift) - * @STA_FLG_AGG_MPDU_DENS_2US: A-MPDU density (2 usec gap) - * @STA_FLG_AGG_MPDU_DENS_4US: A-MPDU density (4 usec gap) - * @STA_FLG_AGG_MPDU_DENS_8US: A-MPDU density (8 usec gap) - * @STA_FLG_AGG_MPDU_DENS_16US: A-MPDU density (16 usec gap) - */ -enum iwl_sta_flags { - STA_FLG_REDUCED_TX_PWR_CTRL = BIT(3), - STA_FLG_REDUCED_TX_PWR_DATA = BIT(6), - - STA_FLG_DISABLE_TX = BIT(4), - - STA_FLG_PS = BIT(8), - STA_FLG_DRAIN_FLOW = BIT(12), - STA_FLG_PAN = BIT(13), - STA_FLG_CLASS_AUTH = BIT(14), - STA_FLG_CLASS_ASSOC = BIT(15), - STA_FLG_RTS_MIMO_PROT = BIT(17), - - STA_FLG_MAX_AGG_SIZE_SHIFT = 19, - STA_FLG_MAX_AGG_SIZE_8K = (0 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_16K = (1 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_32K = (2 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_64K = (3 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_128K = (4 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_256K = (5 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_512K = (6 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_1024K = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), - STA_FLG_MAX_AGG_SIZE_MSK = (7 << STA_FLG_MAX_AGG_SIZE_SHIFT), - - STA_FLG_AGG_MPDU_DENS_SHIFT = 23, - STA_FLG_AGG_MPDU_DENS_2US = (4 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_4US = (5 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_8US = (6 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_16US = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), - STA_FLG_AGG_MPDU_DENS_MSK = (7 << STA_FLG_AGG_MPDU_DENS_SHIFT), - - STA_FLG_FAT_EN_20MHZ = (0 << 26), - STA_FLG_FAT_EN_40MHZ = (1 << 26), - STA_FLG_FAT_EN_80MHZ = (2 << 26), - STA_FLG_FAT_EN_160MHZ = (3 << 26), - STA_FLG_FAT_EN_MSK = (3 << 26), - - STA_FLG_MIMO_EN_SISO = (0 << 28), - STA_FLG_MIMO_EN_MIMO2 = (1 << 28), - STA_FLG_MIMO_EN_MIMO3 = (2 << 28), - STA_FLG_MIMO_EN_MSK = (3 << 28), -}; - -/** - * enum iwl_sta_key_flag - key flags for the ADD_STA host command - * @STA_KEY_FLG_NO_ENC: no encryption - * @STA_KEY_FLG_WEP: WEP encryption algorithm - * @STA_KEY_FLG_CCM: CCMP encryption algorithm - * @STA_KEY_FLG_TKIP: TKIP encryption algorithm - * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support) - * @STA_KEY_FLG_GCMP: GCMP encryption algorithm - * @STA_KEY_FLG_CMAC: CMAC encryption algorithm - * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm - * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value - * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from - * station info array (1 - n 1X mode) - * @STA_KEY_FLG_KEYID_MSK: the index of the key - * @STA_KEY_FLG_KEYID_POS: key index bit position - * @STA_KEY_NOT_VALID: key is invalid - * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key - * @STA_KEY_FLG_KEY_32BYTES: for non-wep key set for 32 bytes key - * @STA_KEY_MULTICAST: set for multical key - * @STA_KEY_MFP: key is used for Management Frame Protection - */ -enum iwl_sta_key_flag { - STA_KEY_FLG_NO_ENC = (0 << 0), - STA_KEY_FLG_WEP = (1 << 0), - STA_KEY_FLG_CCM = (2 << 0), - STA_KEY_FLG_TKIP = (3 << 0), - STA_KEY_FLG_EXT = (4 << 0), - STA_KEY_FLG_GCMP = (5 << 0), - STA_KEY_FLG_CMAC = (6 << 0), - STA_KEY_FLG_ENC_UNKNOWN = (7 << 0), - STA_KEY_FLG_EN_MSK = (7 << 0), - - STA_KEY_FLG_WEP_KEY_MAP = BIT(3), - STA_KEY_FLG_KEYID_POS = 8, - STA_KEY_FLG_KEYID_MSK = (3 << STA_KEY_FLG_KEYID_POS), - STA_KEY_NOT_VALID = BIT(11), - STA_KEY_FLG_WEP_13BYTES = BIT(12), - STA_KEY_FLG_KEY_32BYTES = BIT(12), - STA_KEY_MULTICAST = BIT(14), - STA_KEY_MFP = BIT(15), -}; - -/** - * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed - * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue - * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx - * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs - * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid - * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid - * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count - * @STA_MODIFY_PROT_TH: modify RTS threshold - * @STA_MODIFY_QUEUES: modify the queues used by this station - */ -enum iwl_sta_modify_flag { - STA_MODIFY_QUEUE_REMOVAL = BIT(0), - STA_MODIFY_TID_DISABLE_TX = BIT(1), - STA_MODIFY_UAPSD_ACS = BIT(2), - STA_MODIFY_ADD_BA_TID = BIT(3), - STA_MODIFY_REMOVE_BA_TID = BIT(4), - STA_MODIFY_SLEEPING_STA_TX_COUNT = BIT(5), - STA_MODIFY_PROT_TH = BIT(6), - STA_MODIFY_QUEUES = BIT(7), -}; - -/** - * enum iwl_sta_mode - station command mode - * @STA_MODE_ADD: add new station - * @STA_MODE_MODIFY: modify the station - */ -enum iwl_sta_mode { - STA_MODE_ADD = 0, - STA_MODE_MODIFY = 1, -}; - -/** - * enum iwl_sta_sleep_flag - type of sleep of the station - * @STA_SLEEP_STATE_AWAKE: station is awake - * @STA_SLEEP_STATE_PS_POLL: station is PS-polling - * @STA_SLEEP_STATE_UAPSD: station uses U-APSD - * @STA_SLEEP_STATE_MOREDATA: set more-data bit on - * (last) released frame - */ -enum iwl_sta_sleep_flag { - STA_SLEEP_STATE_AWAKE = 0, - STA_SLEEP_STATE_PS_POLL = BIT(0), - STA_SLEEP_STATE_UAPSD = BIT(1), - STA_SLEEP_STATE_MOREDATA = BIT(2), -}; - -#define STA_KEY_MAX_NUM (16) -#define STA_KEY_IDX_INVALID (0xff) -#define STA_KEY_MAX_DATA_KEY_NUM (4) -#define IWL_MAX_GLOBAL_KEYS (4) -#define STA_KEY_LEN_WEP40 (5) -#define STA_KEY_LEN_WEP104 (13) - -/** - * struct iwl_mvm_keyinfo - key information - * @key_flags: type &enum iwl_sta_key_flag - * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection - * @reserved1: reserved - * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx - * @key_offset: key offset in the fw's key table - * @reserved2: reserved - * @key: 16-byte unicast decryption key - * @tx_secur_seq_cnt: initial RSC / PN needed for replay check - * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only - * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only - */ -struct iwl_mvm_keyinfo { - __le16 key_flags; - u8 tkip_rx_tsc_byte2; - u8 reserved1; - __le16 tkip_rx_ttak[5]; - u8 key_offset; - u8 reserved2; - u8 key[16]; - __le64 tx_secur_seq_cnt; - __le64 hw_tkip_mic_rx_key; - __le64 hw_tkip_mic_tx_key; -} __packed; - -#define IWL_ADD_STA_STATUS_MASK 0xFF -#define IWL_ADD_STA_BAID_VALID_MASK 0x8000 -#define IWL_ADD_STA_BAID_MASK 0x7F00 -#define IWL_ADD_STA_BAID_SHIFT 8 - -/** - * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. - * ( REPLY_ADD_STA = 0x18 ) - * @add_modify: see &enum iwl_sta_mode - * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) - * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable - * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. - * @mac_id_n_color: the Mac context this station belongs to, - * see &enum iwl_mvm_id_and_color - * @addr: station's MAC address - * @reserved2: reserved - * @sta_id: index of station in uCode's station table - * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave - * alone. 1 - modify, 0 - don't change. - * @reserved3: reserved - * @station_flags: look at &enum iwl_sta_flags - * @station_flags_msk: what of %station_flags have changed, - * also &enum iwl_sta_flags - * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) - * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set - * add_immediate_ba_ssn. - * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) - * Set %STA_MODIFY_REMOVE_BA_TID to use this field - * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with - * add_immediate_ba_tid. - * @sleep_tx_count: number of packets to transmit to station even though it is - * asleep. Used to synchronise PS-poll and u-APSD responses while ucode - * keeps track of STA sleep state. - * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. - * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP - * mac-addr. - * @beamform_flags: beam forming controls - * @tfd_queue_msk: tfd queues used by this station - * - * The device contains an internal table of per-station information, with info - * on security keys, aggregation parameters, and Tx rates for initial Tx - * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). - * - * ADD_STA sets up the table entry for one station, either creating a new - * entry, or modifying a pre-existing one. - */ -struct iwl_mvm_add_sta_cmd_v7 { - u8 add_modify; - u8 awake_acs; - __le16 tid_disable_tx; - __le32 mac_id_n_color; - u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ - __le16 reserved2; - u8 sta_id; - u8 modify_mask; - __le16 reserved3; - __le32 station_flags; - __le32 station_flags_msk; - u8 add_immediate_ba_tid; - u8 remove_immediate_ba_tid; - __le16 add_immediate_ba_ssn; - __le16 sleep_tx_count; - __le16 sleep_state_flags; - __le16 assoc_id; - __le16 beamform_flags; - __le32 tfd_queue_msk; -} __packed; /* ADD_STA_CMD_API_S_VER_7 */ - -/** - * enum iwl_sta_type - FW station types - * ( REPLY_ADD_STA = 0x18 ) - * @IWL_STA_LINK: Link station - normal RX and TX traffic. - * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons - * and probe responses. - * @IWL_STA_MULTICAST: multicast traffic, - * @IWL_STA_TDLS_LINK: TDLS link station - * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on). - */ -enum iwl_sta_type { - IWL_STA_LINK, - IWL_STA_GENERAL_PURPOSE, - IWL_STA_MULTICAST, - IWL_STA_TDLS_LINK, - IWL_STA_AUX_ACTIVITY, -}; - -/** - * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. - * ( REPLY_ADD_STA = 0x18 ) - * @add_modify: see &enum iwl_sta_mode - * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD) - * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable - * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. - * @mac_id_n_color: the Mac context this station belongs to, - * see &enum iwl_mvm_id_and_color - * @addr: station's MAC address - * @reserved2: reserved - * @sta_id: index of station in uCode's station table - * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave - * alone. 1 - modify, 0 - don't change. - * @reserved3: reserved - * @station_flags: look at &enum iwl_sta_flags - * @station_flags_msk: what of %station_flags have changed, - * also &enum iwl_sta_flags - * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) - * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set - * add_immediate_ba_ssn. - * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) - * Set %STA_MODIFY_REMOVE_BA_TID to use this field - * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with - * add_immediate_ba_tid. - * @sleep_tx_count: number of packets to transmit to station even though it is - * asleep. Used to synchronise PS-poll and u-APSD responses while ucode - * keeps track of STA sleep state. - * @station_type: type of this station. See &enum iwl_sta_type. - * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag. - * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP - * mac-addr. - * @beamform_flags: beam forming controls - * @tfd_queue_msk: tfd queues used by this station. - * Obselete for new TX API (9 and above). - * @rx_ba_window: aggregation window size - * @sp_length: the size of the SP as it appears in the WME IE - * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver - * enabled ACs. - * - * The device contains an internal table of per-station information, with info - * on security keys, aggregation parameters, and Tx rates for initial Tx - * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). - * - * ADD_STA sets up the table entry for one station, either creating a new - * entry, or modifying a pre-existing one. - */ -struct iwl_mvm_add_sta_cmd { - u8 add_modify; - u8 awake_acs; - __le16 tid_disable_tx; - __le32 mac_id_n_color; - u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ - __le16 reserved2; - u8 sta_id; - u8 modify_mask; - __le16 reserved3; - __le32 station_flags; - __le32 station_flags_msk; - u8 add_immediate_ba_tid; - u8 remove_immediate_ba_tid; - __le16 add_immediate_ba_ssn; - __le16 sleep_tx_count; - u8 sleep_state_flags; - u8 station_type; - __le16 assoc_id; - __le16 beamform_flags; - __le32 tfd_queue_msk; - __le16 rx_ba_window; - u8 sp_length; - u8 uapsd_acs; -} __packed; /* ADD_STA_CMD_API_S_VER_10 */ - -/** - * struct iwl_mvm_add_sta_key_common - add/modify sta key common part - * ( REPLY_ADD_STA_KEY = 0x17 ) - * @sta_id: index of station in uCode's station table - * @key_offset: key offset in key storage - * @key_flags: type &enum iwl_sta_key_flag - * @key: key material data - * @rx_secur_seq_cnt: RX security sequence counter for the key - */ -struct iwl_mvm_add_sta_key_common { - u8 sta_id; - u8 key_offset; - __le16 key_flags; - u8 key[32]; - u8 rx_secur_seq_cnt[16]; -} __packed; - -/** - * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key - * @common: see &struct iwl_mvm_add_sta_key_common - * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection - * @reserved: reserved - * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx - */ -struct iwl_mvm_add_sta_key_cmd_v1 { - struct iwl_mvm_add_sta_key_common common; - u8 tkip_rx_tsc_byte2; - u8 reserved; - __le16 tkip_rx_ttak[5]; -} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */ - -/** - * struct iwl_mvm_add_sta_key_cmd - add/modify sta key - * @common: see &struct iwl_mvm_add_sta_key_common - * @rx_mic_key: TKIP RX unicast or multicast key - * @tx_mic_key: TKIP TX key - * @transmit_seq_cnt: TSC, transmit packet number - */ -struct iwl_mvm_add_sta_key_cmd { - struct iwl_mvm_add_sta_key_common common; - __le64 rx_mic_key; - __le64 tx_mic_key; - __le64 transmit_seq_cnt; -} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */ - -/** - * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command - * @ADD_STA_SUCCESS: operation was executed successfully - * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table - * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session - * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that - * doesn't exist. - */ -enum iwl_mvm_add_sta_rsp_status { - ADD_STA_SUCCESS = 0x1, - ADD_STA_STATIONS_OVERLOAD = 0x2, - ADD_STA_IMMEDIATE_BA_FAILURE = 0x4, - ADD_STA_MODIFY_NON_EXISTING_STA = 0x8, -}; - -/** - * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table - * ( REMOVE_STA = 0x19 ) - * @sta_id: the station id of the station to be removed - * @reserved: reserved - */ -struct iwl_mvm_rm_sta_cmd { - u8 sta_id; - u8 reserved[3]; -} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */ - -/** - * struct iwl_mvm_mgmt_mcast_key_cmd_v1 - * ( MGMT_MCAST_KEY = 0x1f ) - * @ctrl_flags: &enum iwl_sta_key_flag - * @igtk: IGTK key material - * @k1: unused - * @k2: unused - * @sta_id: station ID that support IGTK - * @key_id: key ID - * @receive_seq_cnt: initial RSC/PN needed for replay check - */ -struct iwl_mvm_mgmt_mcast_key_cmd_v1 { - __le32 ctrl_flags; - u8 igtk[16]; - u8 k1[16]; - u8 k2[16]; - __le32 key_id; - __le32 sta_id; - __le64 receive_seq_cnt; -} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */ - -/** - * struct iwl_mvm_mgmt_mcast_key_cmd - * ( MGMT_MCAST_KEY = 0x1f ) - * @ctrl_flags: &enum iwl_sta_key_flag - * @igtk: IGTK master key - * @sta_id: station ID that support IGTK - * @key_id: key ID - * @receive_seq_cnt: initial RSC/PN needed for replay check - */ -struct iwl_mvm_mgmt_mcast_key_cmd { - __le32 ctrl_flags; - u8 igtk[32]; - __le32 key_id; - __le32 sta_id; - __le64 receive_seq_cnt; -} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */ - -struct iwl_mvm_wep_key { - u8 key_index; - u8 key_offset; - __le16 reserved1; - u8 key_size; - u8 reserved2[3]; - u8 key[16]; -} __packed; - -struct iwl_mvm_wep_key_cmd { - __le32 mac_id_n_color; - u8 num_keys; - u8 decryption_type; - u8 flags; - u8 reserved; - struct iwl_mvm_wep_key wep_key[0]; -} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */ - -/** - * struct iwl_mvm_eosp_notification - EOSP notification from firmware - * @remain_frame_count: # of frames remaining, non-zero if SP was cut - * short by GO absence - * @sta_id: station ID - */ -struct iwl_mvm_eosp_notification { - __le32 remain_frame_count; - __le32 sta_id; -} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */ - -#endif /* __fw_api_sta_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h deleted file mode 100644 index c7531da508fd..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h +++ /dev/null @@ -1,479 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __fw_api_stats_h__ -#define __fw_api_stats_h__ -#include "fw-api-mac.h" - -struct mvm_statistics_dbg { - __le32 burst_check; - __le32 burst_count; - __le32 wait_for_silence_timeout_cnt; - u8 reserved[12]; -} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */ - -struct mvm_statistics_div { - __le32 tx_on_a; - __le32 tx_on_b; - __le32 exec_time; - __le32 probe_time; - __le32 rssi_ant; - __le32 reserved2; -} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */ - -/** - * struct mvm_statistics_rx_non_phy - * @bogus_cts: CTS received when not expecting CTS - * @bogus_ack: ACK received when not expecting ACK - * @non_channel_beacons: beacons with our bss id but not on our serving channel - * @channel_beacons: beacons with our bss id and in our serving channel - * @num_missed_bcon: number of missed beacons - * @adc_rx_saturation_time: count in 0.8us units the time the ADC was in - * saturation - * @ina_detection_search_time: total time (in 0.8us) searched for INA - * @beacon_silence_rssi_a: RSSI silence after beacon frame - * @beacon_silence_rssi_b: RSSI silence after beacon frame - * @beacon_silence_rssi_c: RSSI silence after beacon frame - * @interference_data_flag: flag for interference data availability. 1 when data - * is available. - * @channel_load: counts RX Enable time in uSec - * @beacon_rssi_a: beacon RSSI on anntena A - * @beacon_rssi_b: beacon RSSI on antenna B - * @beacon_rssi_c: beacon RSSI on antenna C - * @beacon_energy_a: beacon energy on antenna A - * @beacon_energy_b: beacon energy on antenna B - * @beacon_energy_c: beacon energy on antenna C - * @num_bt_kills: number of BT "kills" (frame TX aborts) - * @mac_id: mac ID - */ -struct mvm_statistics_rx_non_phy { - __le32 bogus_cts; - __le32 bogus_ack; - __le32 non_channel_beacons; - __le32 channel_beacons; - __le32 num_missed_bcon; - __le32 adc_rx_saturation_time; - __le32 ina_detection_search_time; - __le32 beacon_silence_rssi_a; - __le32 beacon_silence_rssi_b; - __le32 beacon_silence_rssi_c; - __le32 interference_data_flag; - __le32 channel_load; - __le32 beacon_rssi_a; - __le32 beacon_rssi_b; - __le32 beacon_rssi_c; - __le32 beacon_energy_a; - __le32 beacon_energy_b; - __le32 beacon_energy_c; - __le32 num_bt_kills; - __le32 mac_id; -} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_4 */ - -struct mvm_statistics_rx_non_phy_v3 { - __le32 bogus_cts; /* CTS received when not expecting CTS */ - __le32 bogus_ack; /* ACK received when not expecting ACK */ - __le32 non_bssid_frames; /* number of frames with BSSID that - * doesn't belong to the STA BSSID */ - __le32 filtered_frames; /* count frames that were dumped in the - * filtering process */ - __le32 non_channel_beacons; /* beacons with our bss id but not on - * our serving channel */ - __le32 channel_beacons; /* beacons with our bss id and in our - * serving channel */ - __le32 num_missed_bcon; /* number of missed beacons */ - __le32 adc_rx_saturation_time; /* count in 0.8us units the time the - * ADC was in saturation */ - __le32 ina_detection_search_time;/* total time (in 0.8us) searched - * for INA */ - __le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */ - __le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */ - __le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */ - __le32 interference_data_flag; /* flag for interference data - * availability. 1 when data is - * available. */ - __le32 channel_load; /* counts RX Enable time in uSec */ - __le32 dsp_false_alarms; /* DSP false alarm (both OFDM - * and CCK) counter */ - __le32 beacon_rssi_a; - __le32 beacon_rssi_b; - __le32 beacon_rssi_c; - __le32 beacon_energy_a; - __le32 beacon_energy_b; - __le32 beacon_energy_c; - __le32 num_bt_kills; - __le32 mac_id; - __le32 directed_data_mpdu; -} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */ - -struct mvm_statistics_rx_phy { - __le32 unresponded_rts; - __le32 rxe_frame_lmt_overrun; - __le32 sent_ba_rsp_cnt; - __le32 dsp_self_kill; - __le32 reserved; -} __packed; /* STATISTICS_RX_PHY_API_S_VER_3 */ - -struct mvm_statistics_rx_phy_v2 { - __le32 ina_cnt; - __le32 fina_cnt; - __le32 plcp_err; - __le32 crc32_err; - __le32 overrun_err; - __le32 early_overrun_err; - __le32 crc32_good; - __le32 false_alarm_cnt; - __le32 fina_sync_err_cnt; - __le32 sfd_timeout; - __le32 fina_timeout; - __le32 unresponded_rts; - __le32 rxe_frame_lmt_overrun; - __le32 sent_ack_cnt; - __le32 sent_cts_cnt; - __le32 sent_ba_rsp_cnt; - __le32 dsp_self_kill; - __le32 mh_format_err; - __le32 re_acq_main_rssi_sum; - __le32 reserved; -} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */ - -struct mvm_statistics_rx_ht_phy_v1 { - __le32 plcp_err; - __le32 overrun_err; - __le32 early_overrun_err; - __le32 crc32_good; - __le32 crc32_err; - __le32 mh_format_err; - __le32 agg_crc32_good; - __le32 agg_mpdu_cnt; - __le32 agg_cnt; - __le32 unsupport_mcs; -} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_1 */ - -struct mvm_statistics_rx_ht_phy { - __le32 mh_format_err; - __le32 agg_mpdu_cnt; - __le32 agg_cnt; - __le32 unsupport_mcs; -} __packed; /* STATISTICS_HT_RX_PHY_API_S_VER_2 */ - -struct mvm_statistics_tx_non_phy_v3 { - __le32 preamble_cnt; - __le32 rx_detected_cnt; - __le32 bt_prio_defer_cnt; - __le32 bt_prio_kill_cnt; - __le32 few_bytes_cnt; - __le32 cts_timeout; - __le32 ack_timeout; - __le32 expected_ack_cnt; - __le32 actual_ack_cnt; - __le32 dump_msdu_cnt; - __le32 burst_abort_next_frame_mismatch_cnt; - __le32 burst_abort_missing_next_frame_cnt; - __le32 cts_timeout_collision; - __le32 ack_or_ba_timeout_collision; -} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */ - -struct mvm_statistics_tx_non_phy { - __le32 bt_prio_defer_cnt; - __le32 bt_prio_kill_cnt; - __le32 few_bytes_cnt; - __le32 cts_timeout; - __le32 ack_timeout; - __le32 dump_msdu_cnt; - __le32 burst_abort_next_frame_mismatch_cnt; - __le32 burst_abort_missing_next_frame_cnt; - __le32 cts_timeout_collision; - __le32 ack_or_ba_timeout_collision; -} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_4 */ - -#define MAX_CHAINS 3 - -struct mvm_statistics_tx_non_phy_agg { - __le32 ba_timeout; - __le32 ba_reschedule_frames; - __le32 scd_query_agg_frame_cnt; - __le32 scd_query_no_agg; - __le32 scd_query_agg; - __le32 scd_query_mismatch; - __le32 frame_not_ready; - __le32 underrun; - __le32 bt_prio_kill; - __le32 rx_ba_rsp_cnt; - __s8 txpower[MAX_CHAINS]; - __s8 reserved; - __le32 reserved2; -} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */ - -struct mvm_statistics_tx_channel_width { - __le32 ext_cca_narrow_ch20[1]; - __le32 ext_cca_narrow_ch40[2]; - __le32 ext_cca_narrow_ch80[3]; - __le32 ext_cca_narrow_ch160[4]; - __le32 last_tx_ch_width_indx; - __le32 rx_detected_per_ch_width[4]; - __le32 success_per_ch_width[4]; - __le32 fail_per_ch_width[4]; -}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */ - -struct mvm_statistics_tx_v4 { - struct mvm_statistics_tx_non_phy_v3 general; - struct mvm_statistics_tx_non_phy_agg agg; - struct mvm_statistics_tx_channel_width channel_width; -} __packed; /* STATISTICS_TX_API_S_VER_4 */ - -struct mvm_statistics_tx { - struct mvm_statistics_tx_non_phy general; - struct mvm_statistics_tx_non_phy_agg agg; - struct mvm_statistics_tx_channel_width channel_width; -} __packed; /* STATISTICS_TX_API_S_VER_5 */ - - -struct mvm_statistics_bt_activity { - __le32 hi_priority_tx_req_cnt; - __le32 hi_priority_tx_denied_cnt; - __le32 lo_priority_tx_req_cnt; - __le32 lo_priority_tx_denied_cnt; - __le32 hi_priority_rx_req_cnt; - __le32 hi_priority_rx_denied_cnt; - __le32 lo_priority_rx_req_cnt; - __le32 lo_priority_rx_denied_cnt; -} __packed; /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */ - -struct mvm_statistics_general_common_v19 { - __le32 radio_temperature; - __le32 radio_voltage; - struct mvm_statistics_dbg dbg; - __le32 sleep_time; - __le32 slots_out; - __le32 slots_idle; - __le32 ttl_timestamp; - struct mvm_statistics_div slow_div; - __le32 rx_enable_counter; - /* - * num_of_sos_states: - * count the number of times we have to re-tune - * in order to get out of bad PHY status - */ - __le32 num_of_sos_states; - __le32 beacon_filtered; - __le32 missed_beacons; - u8 beacon_filter_average_energy; - u8 beacon_filter_reason; - u8 beacon_filter_current_energy; - u8 beacon_filter_reserved; - __le32 beacon_filter_delta_time; - struct mvm_statistics_bt_activity bt_activity; - __le64 rx_time; - __le64 on_time_rf; - __le64 on_time_scan; - __le64 tx_time; -} __packed; - -struct mvm_statistics_general_common { - __le32 radio_temperature; - struct mvm_statistics_dbg dbg; - __le32 sleep_time; - __le32 slots_out; - __le32 slots_idle; - __le32 ttl_timestamp; - struct mvm_statistics_div slow_div; - __le32 rx_enable_counter; - /* - * num_of_sos_states: - * count the number of times we have to re-tune - * in order to get out of bad PHY status - */ - __le32 num_of_sos_states; - __le32 beacon_filtered; - __le32 missed_beacons; - u8 beacon_filter_average_energy; - u8 beacon_filter_reason; - u8 beacon_filter_current_energy; - u8 beacon_filter_reserved; - __le32 beacon_filter_delta_time; - struct mvm_statistics_bt_activity bt_activity; - __le64 rx_time; - __le64 on_time_rf; - __le64 on_time_scan; - __le64 tx_time; -} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */ - -struct mvm_statistics_general_v8 { - struct mvm_statistics_general_common_v19 common; - __le32 beacon_counter[NUM_MAC_INDEX]; - u8 beacon_average_energy[NUM_MAC_INDEX]; - u8 reserved[4 - (NUM_MAC_INDEX % 4)]; -} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */ - -struct mvm_statistics_general_cdb_v9 { - struct mvm_statistics_general_common_v19 common; - __le32 beacon_counter[NUM_MAC_INDEX_CDB]; - u8 beacon_average_energy[NUM_MAC_INDEX_CDB]; - u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)]; -} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */ - -struct mvm_statistics_general_cdb { - struct mvm_statistics_general_common common; - __le32 beacon_counter[MAC_INDEX_AUX]; - u8 beacon_average_energy[MAC_INDEX_AUX]; - u8 reserved[8 - MAC_INDEX_AUX]; -} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */ - -/** - * struct mvm_statistics_load - RX statistics for multi-queue devices - * @air_time: accumulated air time, per mac - * @byte_count: accumulated byte count, per mac - * @pkt_count: accumulated packet count, per mac - * @avg_energy: average RSSI, per station - */ -struct mvm_statistics_load { - __le32 air_time[MAC_INDEX_AUX]; - __le32 byte_count[MAC_INDEX_AUX]; - __le32 pkt_count[MAC_INDEX_AUX]; - u8 avg_energy[IWL_MVM_STATION_COUNT]; -} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */ - -struct mvm_statistics_load_v1 { - __le32 air_time[NUM_MAC_INDEX]; - __le32 byte_count[NUM_MAC_INDEX]; - __le32 pkt_count[NUM_MAC_INDEX]; - u8 avg_energy[IWL_MVM_STATION_COUNT]; -} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */ - -struct mvm_statistics_rx { - struct mvm_statistics_rx_phy ofdm; - struct mvm_statistics_rx_phy cck; - struct mvm_statistics_rx_non_phy general; - struct mvm_statistics_rx_ht_phy ofdm_ht; -} __packed; /* STATISTICS_RX_API_S_VER_4 */ - -struct mvm_statistics_rx_v3 { - struct mvm_statistics_rx_phy_v2 ofdm; - struct mvm_statistics_rx_phy_v2 cck; - struct mvm_statistics_rx_non_phy_v3 general; - struct mvm_statistics_rx_ht_phy_v1 ofdm_ht; -} __packed; /* STATISTICS_RX_API_S_VER_3 */ - -/* - * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command) - * - * By default, uCode issues this notification after receiving a beacon - * while associated. To disable this behavior, set DISABLE_NOTIF flag in the - * STATISTICS_CMD (0x9c), below. - */ - -struct iwl_notif_statistics_v10 { - __le32 flag; - struct mvm_statistics_rx_v3 rx; - struct mvm_statistics_tx_v4 tx; - struct mvm_statistics_general_v8 general; -} __packed; /* STATISTICS_NTFY_API_S_VER_10 */ - -struct iwl_notif_statistics_v11 { - __le32 flag; - struct mvm_statistics_rx_v3 rx; - struct mvm_statistics_tx_v4 tx; - struct mvm_statistics_general_v8 general; - struct mvm_statistics_load_v1 load_stats; -} __packed; /* STATISTICS_NTFY_API_S_VER_11 */ - -struct iwl_notif_statistics_cdb { - __le32 flag; - struct mvm_statistics_rx rx; - struct mvm_statistics_tx tx; - struct mvm_statistics_general_cdb general; - struct mvm_statistics_load load_stats; -} __packed; /* STATISTICS_NTFY_API_S_VER_13 */ - -/** - * enum iwl_statistics_notif_flags - flags used in statistics notification - * @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report - */ -enum iwl_statistics_notif_flags { - IWL_STATISTICS_REPLY_FLG_CLEAR = 0x1, -}; - -/** - * enum iwl_statistics_cmd_flags - flags used in statistics command - * @IWL_STATISTICS_FLG_CLEAR: request to clear statistics after the report - * that's sent after this command - * @IWL_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics - * notifications - */ -enum iwl_statistics_cmd_flags { - IWL_STATISTICS_FLG_CLEAR = 0x1, - IWL_STATISTICS_FLG_DISABLE_NOTIF = 0x2, -}; - -/** - * struct iwl_statistics_cmd - statistics config command - * @flags: flags from &enum iwl_statistics_cmd_flags - */ -struct iwl_statistics_cmd { - __le32 flags; -} __packed; /* STATISTICS_CMD_API_S_VER_1 */ - -#endif /* __fw_api_stats_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h deleted file mode 100644 index 8658a983c463..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h +++ /dev/null @@ -1,398 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2015 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#ifndef __fw_api_tof_h__ -#define __fw_api_tof_h__ - -/* ToF sub-group command IDs */ -enum iwl_mvm_tof_sub_grp_ids { - TOF_RANGE_REQ_CMD = 0x1, - TOF_CONFIG_CMD = 0x2, - TOF_RANGE_ABORT_CMD = 0x3, - TOF_RANGE_REQ_EXT_CMD = 0x4, - TOF_RESPONDER_CONFIG_CMD = 0x5, - TOF_NW_INITIATED_RES_SEND_CMD = 0x6, - TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7, - TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC, - TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD, - TOF_RANGE_RESPONSE_NOTIF = 0xFE, - TOF_MCSI_DEBUG_NOTIF = 0xFB, -}; - -/** - * struct iwl_tof_config_cmd - ToF configuration - * @tof_disabled: 0 enabled, 1 - disabled - * @one_sided_disabled: 0 enabled, 1 - disabled - * @is_debug_mode: 1 debug mode, 0 - otherwise - * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise - */ -struct iwl_tof_config_cmd { - __le32 sub_grp_cmd_id; - u8 tof_disabled; - u8 one_sided_disabled; - u8 is_debug_mode; - u8 is_buf_required; -} __packed; - -/** - * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug) - * @burst_period: future use: (currently hard coded in the LMAC) - * The interval between two sequential bursts. - * @min_delta_ftm: future use: (currently hard coded in the LMAC) - * The minimum delay between two sequential FTM Responses - * in the same burst. - * @burst_duration: future use: (currently hard coded in the LMAC) - * The total time for all FTMs handshake in the same burst. - * Affect the time events duration in the LMAC. - * @num_of_burst_exp: future use: (currently hard coded in the LMAC) - * The number of bursts for the current ToF request. Affect - * the number of events allocations in the current iteration. - * @get_ch_est: for xVT only, NA for driver - * @abort_responder: when set to '1' - Responder will terminate its activity - * (all other fields in the command are ignored) - * @recv_sta_req_params: 1 - Responder will ignore the other Responder's - * params and use the recomended Initiator params. - * 0 - otherwise - * @channel_num: current AP Channel - * @bandwidth: current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rate: current AP rate - * @ctrl_ch_position: coding of the control channel position relative to - * the center frequency: - * - * 40 MHz - * 0 below center, 1 above center - * - * 80 MHz - * bits [0..1] - * * 0 the near 20MHz to the center, - * * 1 the far 20MHz to the center - * bit[2] - * as above 40MHz - * @ftm_per_burst: FTMs per Burst - * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response, - * '1' - we measure over the Initial FTM Response - * @asap_mode: ASAP / Non ASAP mode for the current WLS station - * @sta_id: index of the AP STA when in AP mode - * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF - * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug - * purposes, simulating station movement by adding various values - * to this field - * @bssid: Current AP BSSID - */ -struct iwl_tof_responder_config_cmd { - __le32 sub_grp_cmd_id; - __le16 burst_period; - u8 min_delta_ftm; - u8 burst_duration; - u8 num_of_burst_exp; - u8 get_ch_est; - u8 abort_responder; - u8 recv_sta_req_params; - u8 channel_num; - u8 bandwidth; - u8 rate; - u8 ctrl_ch_position; - u8 ftm_per_burst; - u8 ftm_resp_ts_avail; - u8 asap_mode; - u8 sta_id; - __le16 tsf_timer_offset_msecs; - __le16 toa_offset; - u8 bssid[ETH_ALEN]; -} __packed; - -/** - * struct iwl_tof_range_request_ext_cmd - extended range req for WLS - * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF - * @reserved: reserved - * @min_delta_ftm: Minimal time between two consecutive measurements, - * in units of 100us. 0 means no preference by station - * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended - * value be sent to the AP - * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended - * value to be sent to the AP - * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended - * value to be sent to the AP - */ -struct iwl_tof_range_req_ext_cmd { - __le32 sub_grp_cmd_id; - __le16 tsf_timer_offset_msec; - __le16 reserved; - u8 min_delta_ftm; - u8 ftm_format_and_bw20M; - u8 ftm_format_and_bw40M; - u8 ftm_format_and_bw80M; -} __packed; - -#define IWL_MVM_TOF_MAX_APS 21 - -/** - * struct iwl_tof_range_req_ap_entry - AP configuration parameters - * @channel_num: Current AP Channel - * @bandwidth: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @tsf_delta_direction: TSF relatively to the subject AP - * @ctrl_ch_position: Coding of the control channel position relative to the - * center frequency. - * 40MHz 0 below center, 1 above center - * 80MHz bits [0..1]: 0 the near 20MHz to the center, - * 1 the far 20MHz to the center - * bit[2] as above 40MHz - * @bssid: AP's bss id - * @measure_type: Measurement type: 0 - two sided, 1 - One sided - * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of the - * number of measurement iterations (min 2^0 = 1, max 2^14) - * @burst_period: Recommended value to be sent to the AP. Measurement - * periodicity In units of 100ms. ignored if num_of_bursts = 0 - * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31) - * 1-sided: how many rts/cts pairs should be used per burst. - * @retries_per_sample: Max number of retries that the LMAC should send - * in case of no replies by the AP. - * @tsf_delta: TSF Delta in units of microseconds. - * The difference between the AP TSF and the device local clock. - * @location_req: Location Request Bit[0] LCI should be sent in the FTMR - * Bit[1] Civic should be sent in the FTMR - * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided) - * @enable_dyn_ack: Enable Dynamic ACK BW. - * 0 Initiator interact with regular AP - * 1 Initiator interact with Responder machine: need to send the - * Initiator Acks with HT 40MHz / 80MHz, since the Responder should - * use it for its ch est measurement (this flag will be set when we - * configure the opposite machine to be Responder). - * @rssi: Last received value - * leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value. - */ -struct iwl_tof_range_req_ap_entry { - u8 channel_num; - u8 bandwidth; - u8 tsf_delta_direction; - u8 ctrl_ch_position; - u8 bssid[ETH_ALEN]; - u8 measure_type; - u8 num_of_bursts; - __le16 burst_period; - u8 samples_per_burst; - u8 retries_per_sample; - __le32 tsf_delta; - u8 location_req; - u8 asap_mode; - u8 enable_dyn_ack; - s8 rssi; -} __packed; - -/** - * enum iwl_tof_response_mode - * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as - * possible (not supported for this release) - * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon - * timeout expiration - * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the - * earlier of: measurements completion / timeout - * expiration. - */ -enum iwl_tof_response_mode { - IWL_MVM_TOF_RESPOSE_ASAP = 1, - IWL_MVM_TOF_RESPOSE_TIMEOUT, - IWL_MVM_TOF_RESPOSE_COMPLETE, -}; - -/** - * struct iwl_tof_range_req_cmd - start measurement cmd - * @request_id: A Token incremented per request. The same Token will be - * sent back in the range response - * @initiator: 0- NW initiated, 1 - Client Initiated - * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided, - * '1' - run ML-Algo for ToF only - * @req_timeout: Requested timeout of the response in units of 100ms. - * This is equivalent to the session time configured to the - * LMAC in Initiator Request - * @report_policy: Supported partially for this release: For current release - - * the range report will be uploaded as a batch when ready or - * when the session is done (successfully / partially). - * one of iwl_tof_response_mode. - * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - * @macaddr_random: '0' Use default source MAC address (i.e. p2_p), - * '1' Use MAC Address randomization according to the below - * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. - * Bits set to 1 shall be randomized by the UMAC - * @ap: per-AP request data - */ -struct iwl_tof_range_req_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 initiator; - u8 one_sided_los_disable; - u8 req_timeout; - u8 report_policy; - u8 los_det_disable; - u8 num_of_ap; - u8 macaddr_random; - u8 macaddr_template[ETH_ALEN]; - u8 macaddr_mask[ETH_ALEN]; - struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -/** - * struct iwl_tof_gen_resp_cmd - generic ToF response - */ -struct iwl_tof_gen_resp_cmd { - __le32 sub_grp_cmd_id; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response) - * @bssid: BSSID of the AP - * @measure_status: current APs measurement status, one of - * &enum iwl_tof_entry_status. - * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz - * @rtt: The Round Trip Time that took for the last measurement for - * current AP [nSec] - * @rtt_variance: The Variance of the RTT values measured for current AP - * @rtt_spread: The Difference between the maximum and the minimum RTT - * values measured for current AP in the current session [nsec] - * @rssi: RSSI as uploaded in the Channel Estimation notification - * @rssi_spread: The Difference between the maximum and the minimum RSSI values - * measured for current AP in the current session - * @reserved: reserved - * @range: Measured range [cm] - * @range_variance: Measured range variance [cm] - * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was - * uploaded by the LMAC - */ -struct iwl_tof_range_rsp_ap_entry_ntfy { - u8 bssid[ETH_ALEN]; - u8 measure_status; - u8 measure_bw; - __le32 rtt; - __le32 rtt_variance; - __le32 rtt_spread; - s8 rssi; - u8 rssi_spread; - __le16 reserved; - __le32 range; - __le32 range_variance; - __le32 timestamp; -} __packed; - -/** - * struct iwl_tof_range_rsp_ntfy - - * @request_id: A Token ID of the corresponding Range request - * @request_status: status of current measurement session - * @last_in_batch: reprot policy (when not all responses are uploaded at once) - * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) - * @ap: per-AP data - */ -struct iwl_tof_range_rsp_ntfy { - u8 request_id; - u8 request_status; - u8 last_in_batch; - u8 num_of_aps; - struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS]; -} __packed; - -#define IWL_MVM_TOF_MCSI_BUF_SIZE (245) -/** - * struct iwl_tof_mcsi_notif - used for debug - * @token: token ID for the current session - * @role: '0' - initiator, '1' - responder - * @reserved: reserved - * @initiator_bssid: initiator machine - * @responder_bssid: responder machine - * @mcsi_buffer: debug data - */ -struct iwl_tof_mcsi_notif { - u8 token; - u8 role; - __le16 reserved; - u8 initiator_bssid[ETH_ALEN]; - u8 responder_bssid[ETH_ALEN]; - u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4]; -} __packed; - -/** - * struct iwl_tof_neighbor_report_notif - * @bssid: BSSID of the AP which sent the report - * @request_token: same token as the corresponding request - * @status: - * @report_ie_len: the length of the response frame starting from the Element ID - * @data: the IEs - */ -struct iwl_tof_neighbor_report { - u8 bssid[ETH_ALEN]; - u8 request_token; - u8 status; - __le16 report_ie_len; - u8 data[]; -} __packed; - -/** - * struct iwl_tof_range_abort_cmd - * @request_id: corresponds to a range request - * @reserved: reserved - */ -struct iwl_tof_range_abort_cmd { - __le32 sub_grp_cmd_id; - u8 request_id; - u8 reserved[3]; -} __packed; - -#endif diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h deleted file mode 100644 index 97d7eed32622..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ /dev/null @@ -1,917 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - *****************************************************************************/ - -#ifndef __fw_api_tx_h__ -#define __fw_api_tx_h__ - -/** - * enum iwl_tx_flags - bitmasks for tx_flags in TX command - * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame - * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame - * @TX_CMD_FLG_ACK: expect ACK from receiving station - * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command. - * Otherwise, use rate_n_flags from the TX command - * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected - * Must set TX_CMD_FLG_ACK with this flag. - * @TX_CMD_FLG_TXOP_PROT: TXOP protection requested - * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence - * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence - * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC) - * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored - * on old firmwares). - * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame - * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control. - * Should be set for mgmt, non-QOS data, mcast, bcast and in scan command - * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU - * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame - * Should be set for beacons and probe responses - * @TX_CMD_FLG_CALIB: activate PA TX power calibrations - * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count - * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header. - * Should be set for 26/30 length MAC headers - * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW - * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation - * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id - * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped - * @TX_CMD_FLG_EXEC_PAPD: execute PAPD - * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power - * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk - */ -enum iwl_tx_flags { - TX_CMD_FLG_PROT_REQUIRE = BIT(0), - TX_CMD_FLG_WRITE_TX_POWER = BIT(1), - TX_CMD_FLG_ACK = BIT(3), - TX_CMD_FLG_STA_RATE = BIT(4), - TX_CMD_FLG_BAR = BIT(6), - TX_CMD_FLG_TXOP_PROT = BIT(7), - TX_CMD_FLG_VHT_NDPA = BIT(8), - TX_CMD_FLG_HT_NDPA = BIT(9), - TX_CMD_FLG_CSI_FDBK2HOST = BIT(10), - TX_CMD_FLG_BT_PRIO_POS = 11, - TX_CMD_FLG_BT_DIS = BIT(12), - TX_CMD_FLG_SEQ_CTL = BIT(13), - TX_CMD_FLG_MORE_FRAG = BIT(14), - TX_CMD_FLG_TSF = BIT(16), - TX_CMD_FLG_CALIB = BIT(17), - TX_CMD_FLG_KEEP_SEQ_CTL = BIT(18), - TX_CMD_FLG_MH_PAD = BIT(20), - TX_CMD_FLG_RESP_TO_DRV = BIT(21), - TX_CMD_FLG_TKIP_MIC_DONE = BIT(23), - TX_CMD_FLG_DUR = BIT(25), - TX_CMD_FLG_FW_DROP = BIT(26), - TX_CMD_FLG_EXEC_PAPD = BIT(27), - TX_CMD_FLG_PAPD_TYPE = BIT(28), - TX_CMD_FLG_HCCA_CHUNK = BIT(31) -}; /* TX_FLAGS_BITS_API_S_VER_1 */ - -/** - * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for a000 - * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command - * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs - * to a secured STA - * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate - * selection, retry limits and BT kill - */ -enum iwl_tx_cmd_flags { - IWL_TX_FLAGS_CMD_RATE = BIT(0), - IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1), - IWL_TX_FLAGS_HIGH_PRI = BIT(2), -}; /* TX_FLAGS_BITS_API_S_VER_3 */ - -/** - * enum iwl_tx_pm_timeouts - pm timeout values in TX command - * @PM_FRAME_NONE: no need to suspend sleep mode - * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU - * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec - */ -enum iwl_tx_pm_timeouts { - PM_FRAME_NONE = 0, - PM_FRAME_MGMT = 2, - PM_FRAME_ASSOC = 3, -}; - -#define TX_CMD_SEC_MSK 0x07 -#define TX_CMD_SEC_WEP_KEY_IDX_POS 6 -#define TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0 - -/** - * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command - * @TX_CMD_SEC_WEP: WEP encryption algorithm. - * @TX_CMD_SEC_CCM: CCM encryption algorithm. - * @TX_CMD_SEC_TKIP: TKIP encryption algorithm. - * @TX_CMD_SEC_EXT: extended cipher algorithm. - * @TX_CMD_SEC_GCMP: GCMP encryption algorithm. - * @TX_CMD_SEC_KEY128: set for 104 bits WEP key. - * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken - * from the table instead of from the TX command. - * If the key is taken from the key table its index should be given by the - * first byte of the TX command key field. - */ -enum iwl_tx_cmd_sec_ctrl { - TX_CMD_SEC_WEP = 0x01, - TX_CMD_SEC_CCM = 0x02, - TX_CMD_SEC_TKIP = 0x03, - TX_CMD_SEC_EXT = 0x04, - TX_CMD_SEC_GCMP = 0x05, - TX_CMD_SEC_KEY128 = 0x08, - TX_CMD_SEC_KEY_FROM_TABLE = 0x10, -}; - -/* - * TX command Frame life time in us - to be written in pm_frame_timeout - */ -#define TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF -#define TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/ -#define TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */ -#define TX_CMD_LIFE_TIME_EXPIRED_FRAME 0 - -/* - * TID for non QoS frames - to be written in tid_tspec - */ -#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT - -/* - * Limits on the retransmissions - to be written in {data,rts}_retry_limit - */ -#define IWL_DEFAULT_TX_RETRY 15 -#define IWL_MGMT_DFAULT_RETRY_LIMIT 3 -#define IWL_RTS_DFAULT_RETRY_LIMIT 60 -#define IWL_BAR_DFAULT_RETRY_LIMIT 60 -#define IWL_LOW_RETRY_LIMIT 7 - -/** - * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values - * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words) - * from mac header end. For normal case it is 4 words for SNAP. - * note: tx_cmd, mac header and pad are not counted in the offset. - * This is used to help the offload in case there is tunneling such as - * IPv6 in IPv4, in such case the ip header offset should point to the - * inner ip header and IPv4 checksum of the external header should be - * calculated by driver. - * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum - * @TX_CMD_OFFLD_L3_EN: enable IP header checksum - * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV - * field. Doesn't include the pad. - * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for - * alignment - * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU - */ -enum iwl_tx_offload_assist_flags_pos { - TX_CMD_OFFLD_IP_HDR = 0, - TX_CMD_OFFLD_L4_EN = 6, - TX_CMD_OFFLD_L3_EN = 7, - TX_CMD_OFFLD_MH_SIZE = 8, - TX_CMD_OFFLD_PAD = 13, - TX_CMD_OFFLD_AMSDU = 14, -}; - -#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f -#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f - -/* TODO: complete documentation for try_cnt and btkill_cnt */ -/** - * struct iwl_tx_cmd - TX command struct to FW - * ( TX_CMD = 0x1c ) - * @len: in bytes of the payload, see below for details - * @offload_assist: TX offload configuration - * @tx_flags: combination of TX_CMD_FLG_* - * @scratch: scratch buffer used by the device - * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is - * cleared. Combination of RATE_MCS_* - * @sta_id: index of destination station in FW station table - * @sec_ctl: security control, TX_CMD_SEC_* - * @initial_rate_index: index into the the rate table for initial TX attempt. - * Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames. - * @reserved2: reserved - * @key: security key - * @reserved3: reserved - * @life_time: frame life time (usecs??) - * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt + - * btkill_cnd + reserved), first 32 bits. "0" disables usage. - * @dram_msb_ptr: upper bits of the scratch physical address - * @rts_retry_limit: max attempts for RTS - * @data_retry_limit: max attempts to send the data packet - * @tid_tspec: TID/tspec - * @pm_frame_timeout: PM TX frame timeout - * @reserved4: reserved - * @payload: payload (same as @hdr) - * @hdr: 802.11 header (same as @payload) - * - * The byte count (both len and next_frame_len) includes MAC header - * (24/26/30/32 bytes) - * + 2 bytes pad if 26/30 header size - * + 8 byte IV for CCM or TKIP (not used for WEP) - * + Data payload - * + 8-byte MIC (not used for CCM/WEP) - * It does not include post-MAC padding, i.e., - * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes. - * Range of len: 14-2342 bytes. - * - * After the struct fields the MAC header is placed, plus any padding, - * and then the actial payload. - */ -struct iwl_tx_cmd { - __le16 len; - __le16 offload_assist; - __le32 tx_flags; - struct { - u8 try_cnt; - u8 btkill_cnt; - __le16 reserved; - } scratch; /* DRAM_SCRATCH_API_U_VER_1 */ - __le32 rate_n_flags; - u8 sta_id; - u8 sec_ctl; - u8 initial_rate_index; - u8 reserved2; - u8 key[16]; - __le32 reserved3; - __le32 life_time; - __le32 dram_lsb_ptr; - u8 dram_msb_ptr; - u8 rts_retry_limit; - u8 data_retry_limit; - u8 tid_tspec; - __le16 pm_frame_timeout; - __le16 reserved4; - u8 payload[0]; - struct ieee80211_hdr hdr[0]; -} __packed; /* TX_CMD_API_S_VER_6 */ - -struct iwl_dram_sec_info { - __le32 pn_low; - __le16 pn_high; - __le16 aux_info; -} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */ - -/** - * struct iwl_tx_cmd_gen2 - TX command struct to FW for a000 devices - * ( TX_CMD = 0x1c ) - * @len: in bytes of the payload, see below for details - * @offload_assist: TX offload configuration - * @flags: combination of &enum iwl_tx_cmd_flags - * @dram_info: FW internal DRAM storage - * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is - * cleared. Combination of RATE_MCS_* - * @hdr: 802.11 header - */ -struct iwl_tx_cmd_gen2 { - __le16 len; - __le16 offload_assist; - __le32 flags; - struct iwl_dram_sec_info dram_info; - __le32 rate_n_flags; - struct ieee80211_hdr hdr[0]; -} __packed; /* TX_CMD_API_S_VER_7 */ - -/* - * TX response related data - */ - -/* - * enum iwl_tx_status - status that is returned by the fw after attempts to Tx - * @TX_STATUS_SUCCESS: - * @TX_STATUS_DIRECT_DONE: - * @TX_STATUS_POSTPONE_DELAY: - * @TX_STATUS_POSTPONE_FEW_BYTES: - * @TX_STATUS_POSTPONE_BT_PRIO: - * @TX_STATUS_POSTPONE_QUIET_PERIOD: - * @TX_STATUS_POSTPONE_CALC_TTAK: - * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY: - * @TX_STATUS_FAIL_SHORT_LIMIT: - * @TX_STATUS_FAIL_LONG_LIMIT: - * @TX_STATUS_FAIL_UNDERRUN: - * @TX_STATUS_FAIL_DRAIN_FLOW: - * @TX_STATUS_FAIL_RFKILL_FLUSH: - * @TX_STATUS_FAIL_LIFE_EXPIRE: - * @TX_STATUS_FAIL_DEST_PS: - * @TX_STATUS_FAIL_HOST_ABORTED: - * @TX_STATUS_FAIL_BT_RETRY: - * @TX_STATUS_FAIL_STA_INVALID: - * @TX_TATUS_FAIL_FRAG_DROPPED: - * @TX_STATUS_FAIL_TID_DISABLE: - * @TX_STATUS_FAIL_FIFO_FLUSHED: - * @TX_STATUS_FAIL_SMALL_CF_POLL: - * @TX_STATUS_FAIL_FW_DROP: - * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and - * STA table - * @TX_FRAME_STATUS_INTERNAL_ABORT: - * @TX_MODE_MSK: - * @TX_MODE_NO_BURST: - * @TX_MODE_IN_BURST_SEQ: - * @TX_MODE_FIRST_IN_BURST: - * @TX_QUEUE_NUM_MSK: - * - * Valid only if frame_count =1 - * TODO: complete documentation - */ -enum iwl_tx_status { - TX_STATUS_MSK = 0x000000ff, - TX_STATUS_SUCCESS = 0x01, - TX_STATUS_DIRECT_DONE = 0x02, - /* postpone TX */ - TX_STATUS_POSTPONE_DELAY = 0x40, - TX_STATUS_POSTPONE_FEW_BYTES = 0x41, - TX_STATUS_POSTPONE_BT_PRIO = 0x42, - TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43, - TX_STATUS_POSTPONE_CALC_TTAK = 0x44, - /* abort TX */ - TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81, - TX_STATUS_FAIL_SHORT_LIMIT = 0x82, - TX_STATUS_FAIL_LONG_LIMIT = 0x83, - TX_STATUS_FAIL_UNDERRUN = 0x84, - TX_STATUS_FAIL_DRAIN_FLOW = 0x85, - TX_STATUS_FAIL_RFKILL_FLUSH = 0x86, - TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, - TX_STATUS_FAIL_DEST_PS = 0x88, - TX_STATUS_FAIL_HOST_ABORTED = 0x89, - TX_STATUS_FAIL_BT_RETRY = 0x8a, - TX_STATUS_FAIL_STA_INVALID = 0x8b, - TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, - TX_STATUS_FAIL_TID_DISABLE = 0x8d, - TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e, - TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f, - TX_STATUS_FAIL_FW_DROP = 0x90, - TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91, - TX_STATUS_INTERNAL_ABORT = 0x92, - TX_MODE_MSK = 0x00000f00, - TX_MODE_NO_BURST = 0x00000000, - TX_MODE_IN_BURST_SEQ = 0x00000100, - TX_MODE_FIRST_IN_BURST = 0x00000200, - TX_QUEUE_NUM_MSK = 0x0001f000, - TX_NARROW_BW_MSK = 0x00060000, - TX_NARROW_BW_1DIV2 = 0x00020000, - TX_NARROW_BW_1DIV4 = 0x00040000, - TX_NARROW_BW_1DIV8 = 0x00060000, -}; - -/* - * enum iwl_tx_agg_status - TX aggregation status - * @AGG_TX_STATE_STATUS_MSK: - * @AGG_TX_STATE_TRANSMITTED: - * @AGG_TX_STATE_UNDERRUN: - * @AGG_TX_STATE_BT_PRIO: - * @AGG_TX_STATE_FEW_BYTES: - * @AGG_TX_STATE_ABORT: - * @AGG_TX_STATE_LAST_SENT_TTL: - * @AGG_TX_STATE_LAST_SENT_TRY_CNT: - * @AGG_TX_STATE_LAST_SENT_BT_KILL: - * @AGG_TX_STATE_SCD_QUERY: - * @AGG_TX_STATE_TEST_BAD_CRC32: - * @AGG_TX_STATE_RESPONSE: - * @AGG_TX_STATE_DUMP_TX: - * @AGG_TX_STATE_DELAY_TX: - * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries - * occur if tx failed for this frame when it was a member of a previous - * aggregation block). If rate scaling is used, retry count indicates the - * rate table entry used for all frames in the new agg. - *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for - * this frame - * - * TODO: complete documentation - */ -enum iwl_tx_agg_status { - AGG_TX_STATE_STATUS_MSK = 0x00fff, - AGG_TX_STATE_TRANSMITTED = 0x000, - AGG_TX_STATE_UNDERRUN = 0x001, - AGG_TX_STATE_BT_PRIO = 0x002, - AGG_TX_STATE_FEW_BYTES = 0x004, - AGG_TX_STATE_ABORT = 0x008, - AGG_TX_STATE_LAST_SENT_TTL = 0x010, - AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, - AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, - AGG_TX_STATE_SCD_QUERY = 0x080, - AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100, - AGG_TX_STATE_RESPONSE = 0x1ff, - AGG_TX_STATE_DUMP_TX = 0x200, - AGG_TX_STATE_DELAY_TX = 0x400, - AGG_TX_STATE_TRY_CNT_POS = 12, - AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, -}; - -#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \ - AGG_TX_STATE_LAST_SENT_TRY_CNT| \ - AGG_TX_STATE_LAST_SENT_BT_KILL) - -/* - * The mask below describes a status where we are absolutely sure that the MPDU - * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've - * written the bytes to the TXE, but we know nothing about what the DSP did. - */ -#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \ - AGG_TX_STATE_ABORT | \ - AGG_TX_STATE_SCD_QUERY) - -/* - * REPLY_TX = 0x1c (response) - * - * This response may be in one of two slightly different formats, indicated - * by the frame_count field: - * - * 1) No aggregation (frame_count == 1). This reports Tx results for a single - * frame. Multiple attempts, at various bit rates, may have been made for - * this frame. - * - * 2) Aggregation (frame_count > 1). This reports Tx results for two or more - * frames that used block-acknowledge. All frames were transmitted at - * same rate. Rate scaling may have been used if first frame in this new - * agg block failed in previous agg block(s). - * - * Note that, for aggregation, ACK (block-ack) status is not delivered - * here; block-ack has not been received by the time the device records - * this status. - * This status relates to reasons the tx might have been blocked or aborted - * within the device, rather than whether it was received successfully by - * the destination station. - */ - -/** - * struct agg_tx_status - per packet TX aggregation status - * @status: See &enum iwl_tx_agg_status - * @sequence: Sequence # for this frame's Tx cmd (not SSN!) - */ -struct agg_tx_status { - __le16 status; - __le16 sequence; -} __packed; - -/* - * definitions for initial rate index field - * bits [3:0] initial rate index - * bits [6:4] rate table color, used for the initial rate - * bit-7 invalid rate indication - */ -#define TX_RES_INIT_RATE_INDEX_MSK 0x0f -#define TX_RES_RATE_TABLE_COLOR_POS 4 -#define TX_RES_RATE_TABLE_COLOR_MSK 0x70 -#define TX_RES_INV_RATE_INDEX_MSK 0x80 -#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ - TX_RES_RATE_TABLE_COLOR_POS) - -#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) -#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) - -/** - * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet - * ( REPLY_TX = 0x1c ) - * @frame_count: 1 no aggregation, >1 aggregation - * @bt_kill_count: num of times blocked by bluetooth (unused for agg) - * @failure_rts: num of failures due to unsuccessful RTS - * @failure_frame: num failures due to no ACK (unused for agg) - * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the - * Tx of all the batch. RATE_MCS_* - * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. - * for agg: RTS + CTS + aggregation tx time + block-ack time. - * in usec. - * @pa_status: tx power info - * @pa_integ_res_a: tx power info - * @pa_integ_res_b: tx power info - * @pa_integ_res_c: tx power info - * @measurement_req_id: tx power info - * @reduced_tpc: transmit power reduction used - * @reserved: reserved - * @tfd_info: TFD information set by the FH - * @seq_ctl: sequence control from the Tx cmd - * @byte_cnt: byte count from the Tx cmd - * @tlc_info: TLC rate info - * @ra_tid: bits [3:0] = ra, bits [7:4] = tid - * @frame_ctrl: frame control - * @status: for non-agg: frame status TX_STATUS_* - * for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields - * follow this one, up to frame_count. Length in @frame_count. - * - * After the array of statuses comes the SSN of the SCD. Look at - * %iwl_mvm_get_scd_ssn for more details. - */ -struct iwl_mvm_tx_resp_v3 { - u8 frame_count; - u8 bt_kill_count; - u8 failure_rts; - u8 failure_frame; - __le32 initial_rate; - __le16 wireless_media_time; - - u8 pa_status; - u8 pa_integ_res_a[3]; - u8 pa_integ_res_b[3]; - u8 pa_integ_res_c[3]; - __le16 measurement_req_id; - u8 reduced_tpc; - u8 reserved; - - __le32 tfd_info; - __le16 seq_ctl; - __le16 byte_cnt; - u8 tlc_info; - u8 ra_tid; - __le16 frame_ctrl; - struct agg_tx_status status[]; -} __packed; /* TX_RSP_API_S_VER_3 */ - -/** - * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet - * ( REPLY_TX = 0x1c ) - * @frame_count: 1 no aggregation, >1 aggregation - * @bt_kill_count: num of times blocked by bluetooth (unused for agg) - * @failure_rts: num of failures due to unsuccessful RTS - * @failure_frame: num failures due to no ACK (unused for agg) - * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the - * Tx of all the batch. RATE_MCS_* - * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. - * for agg: RTS + CTS + aggregation tx time + block-ack time. - * in usec. - * @pa_status: tx power info - * @pa_integ_res_a: tx power info - * @pa_integ_res_b: tx power info - * @pa_integ_res_c: tx power info - * @measurement_req_id: tx power info - * @reduced_tpc: transmit power reduction used - * @reserved: reserved - * @tfd_info: TFD information set by the FH - * @seq_ctl: sequence control from the Tx cmd - * @byte_cnt: byte count from the Tx cmd - * @tlc_info: TLC rate info - * @ra_tid: bits [3:0] = ra, bits [7:4] = tid - * @frame_ctrl: frame control - * @tx_queue: TX queue for this response - * @reserved2: reserved for padding/alignment - * @status: for non-agg: frame status TX_STATUS_* - * For version 6 TX response isn't received for aggregation at all. - * - * After the array of statuses comes the SSN of the SCD. Look at - * %iwl_mvm_get_scd_ssn for more details. - */ -struct iwl_mvm_tx_resp { - u8 frame_count; - u8 bt_kill_count; - u8 failure_rts; - u8 failure_frame; - __le32 initial_rate; - __le16 wireless_media_time; - - u8 pa_status; - u8 pa_integ_res_a[3]; - u8 pa_integ_res_b[3]; - u8 pa_integ_res_c[3]; - __le16 measurement_req_id; - u8 reduced_tpc; - u8 reserved; - - __le32 tfd_info; - __le16 seq_ctl; - __le16 byte_cnt; - u8 tlc_info; - u8 ra_tid; - __le16 frame_ctrl; - __le16 tx_queue; - __le16 reserved2; - struct agg_tx_status status; -} __packed; /* TX_RSP_API_S_VER_6 */ - -/** - * struct iwl_mvm_ba_notif - notifies about reception of BA - * ( BA_NOTIF = 0xc5 ) - * @sta_addr: MAC address - * @reserved: reserved - * @sta_id: Index of recipient (BA-sending) station in fw's station table - * @tid: tid of the session - * @seq_ctl: sequence control field - * @bitmap: the bitmap of the BA notification as seen in the air - * @scd_flow: the tx queue this BA relates to - * @scd_ssn: the index of the last contiguously sent packet - * @txed: number of Txed frames in this batch - * @txed_2_done: number of Acked frames in this batch - * @reduced_txp: power reduced according to TPC. This is the actual value and - * not a copy from the LQ command. Thus, if not the first rate was used - * for Tx-ing then this value will be set to 0 by FW. - * @reserved1: reserved - */ -struct iwl_mvm_ba_notif { - u8 sta_addr[ETH_ALEN]; - __le16 reserved; - - u8 sta_id; - u8 tid; - __le16 seq_ctl; - __le64 bitmap; - __le16 scd_flow; - __le16 scd_ssn; - u8 txed; - u8 txed_2_done; - u8 reduced_txp; - u8 reserved1; -} __packed; - -/** - * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue - * @q_num: TFD queue number - * @tfd_index: Index of first un-acked frame in the TFD queue - * @scd_queue: For debug only - the physical queue the TFD queue is bound to - * @tid: TID of the queue (0-7) - * @reserved: reserved for alignment - */ -struct iwl_mvm_compressed_ba_tfd { - __le16 q_num; - __le16 tfd_index; - u8 scd_queue; - u8 tid; - u8 reserved[2]; -} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */ - -/** - * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue - * @q_num: RA TID queue number - * @tid: TID of the queue - * @ssn: BA window current SSN - */ -struct iwl_mvm_compressed_ba_ratid { - u8 q_num; - u8 tid; - __le16 ssn; -} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */ - -/* - * enum iwl_mvm_ba_resp_flags - TX aggregation status - * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA - * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR - * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA - * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun - * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill - * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the - * expected time - */ -enum iwl_mvm_ba_resp_flags { - IWL_MVM_BA_RESP_TX_AGG, - IWL_MVM_BA_RESP_TX_BAR, - IWL_MVM_BA_RESP_TX_AGG_FAIL, - IWL_MVM_BA_RESP_TX_UNDERRUN, - IWL_MVM_BA_RESP_TX_BT_KILL, - IWL_MVM_BA_RESP_TX_DSP_TIMEOUT -}; - -/** - * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA - * ( BA_NOTIF = 0xc5 ) - * @flags: status flag, see the &iwl_mvm_ba_resp_flags - * @sta_id: Index of recipient (BA-sending) station in fw's station table - * @reduced_txp: power reduced according to TPC. This is the actual value and - * not a copy from the LQ command. Thus, if not the first rate was used - * for Tx-ing then this value will be set to 0 by FW. - * @initial_rate: TLC rate info, initial rate index, TLC table color - * @retry_cnt: retry count - * @query_byte_cnt: SCD query byte count - * @query_frame_cnt: SCD query frame count - * @txed: number of frames sent in the aggregation (all-TIDs) - * @done: number of frames that were Acked by the BA (all-TIDs) - * @reserved: reserved (for alignment) - * @wireless_time: Wireless-media time - * @tx_rate: the rate the aggregation was sent at - * @tfd_cnt: number of TFD-Q elements - * @ra_tid_cnt: number of RATID-Q elements - * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd - * for details. - * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See - * &iwl_mvm_compressed_ba_ratid for more details. - */ -struct iwl_mvm_compressed_ba_notif { - __le32 flags; - u8 sta_id; - u8 reduced_txp; - u8 initial_rate; - u8 retry_cnt; - __le32 query_byte_cnt; - __le16 query_frame_cnt; - __le16 txed; - __le16 done; - __le16 reserved; - __le32 wireless_time; - __le32 tx_rate; - __le16 tfd_cnt; - __le16 ra_tid_cnt; - struct iwl_mvm_compressed_ba_tfd tfd[1]; - struct iwl_mvm_compressed_ba_ratid ra_tid[0]; -} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */ - -/** - * struct iwl_mac_beacon_cmd_v6 - beacon template command - * @tx: the tx commands associated with the beacon frame - * @template_id: currently equal to the mac context id of the coresponding - * mac. - * @tim_idx: the offset of the tim IE in the beacon - * @tim_size: the length of the tim IE - * @frame: the template of the beacon frame - */ -struct iwl_mac_beacon_cmd_v6 { - struct iwl_tx_cmd tx; - __le32 template_id; - __le32 tim_idx; - __le32 tim_size; - struct ieee80211_hdr frame[0]; -} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ - -/** - * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA - * @template_id: currently equal to the mac context id of the coresponding - * mac. - * @tim_idx: the offset of the tim IE in the beacon - * @tim_size: the length of the tim IE - * @ecsa_offset: offset to the ECSA IE if present - * @csa_offset: offset to the CSA IE if present - * @frame: the template of the beacon frame - */ -struct iwl_mac_beacon_cmd_data { - __le32 template_id; - __le32 tim_idx; - __le32 tim_size; - __le32 ecsa_offset; - __le32 csa_offset; - struct ieee80211_hdr frame[0]; -}; - -/** - * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA - * @tx: the tx commands associated with the beacon frame - * @data: see &iwl_mac_beacon_cmd_data - */ -struct iwl_mac_beacon_cmd_v7 { - struct iwl_tx_cmd tx; - struct iwl_mac_beacon_cmd_data data; -} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ - -/** - * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA - * @byte_cnt: byte count of the beacon frame - * @flags: for future use - * @reserved: reserved - * @data: see &iwl_mac_beacon_cmd_data - */ -struct iwl_mac_beacon_cmd { - __le16 byte_cnt; - __le16 flags; - __le64 reserved; - struct iwl_mac_beacon_cmd_data data; -} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ - -struct iwl_beacon_notif { - struct iwl_mvm_tx_resp beacon_notify_hdr; - __le64 tsf; - __le32 ibss_mgr_status; -} __packed; - -/** - * struct iwl_extended_beacon_notif - notifies about beacon transmission - * @beacon_notify_hdr: tx response command associated with the beacon - * @tsf: last beacon tsf - * @ibss_mgr_status: whether IBSS is manager - * @gp2: last beacon time in gp2 - */ -struct iwl_extended_beacon_notif { - struct iwl_mvm_tx_resp beacon_notify_hdr; - __le64 tsf; - __le32 ibss_mgr_status; - __le32 gp2; -} __packed; /* BEACON_NTFY_API_S_VER_5 */ - -/** - * enum iwl_dump_control - dump (flush) control flags - * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty - * and the TFD queues are empty. - */ -enum iwl_dump_control { - DUMP_TX_FIFO_FLUSH = BIT(1), -}; - -/** - * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command - * @queues_ctl: bitmap of queues to flush - * @flush_ctl: control flags - * @reserved: reserved - */ -struct iwl_tx_path_flush_cmd_v1 { - __le32 queues_ctl; - __le16 flush_ctl; - __le16 reserved; -} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */ - -/** - * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command - * @sta_id: station ID to flush - * @tid_mask: TID mask to flush - * @reserved: reserved - */ -struct iwl_tx_path_flush_cmd { - __le32 sta_id; - __le16 tid_mask; - __le16 reserved; -} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */ - -/* Available options for the SCD_QUEUE_CFG HCMD */ -enum iwl_scd_cfg_actions { - SCD_CFG_DISABLE_QUEUE = 0x0, - SCD_CFG_ENABLE_QUEUE = 0x1, - SCD_CFG_UPDATE_QUEUE_TID = 0x2, -}; - -/** - * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command - * @token: unused - * @sta_id: station id - * @tid: TID - * @scd_queue: scheduler queue to confiug - * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner - * Value is one of &enum iwl_scd_cfg_actions options - * @aggregate: 1 aggregated queue, 0 otherwise - * @tx_fifo: &enum iwl_mvm_tx_fifo - * @window: BA window size - * @ssn: SSN for the BA agreement - * @reserved: reserved - */ -struct iwl_scd_txq_cfg_cmd { - u8 token; - u8 sta_id; - u8 tid; - u8 scd_queue; - u8 action; - u8 aggregate; - u8 tx_fifo; - u8 window; - __le16 ssn; - __le16 reserved; -} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */ - -/** - * struct iwl_scd_txq_cfg_rsp - * @token: taken from the command - * @sta_id: station id from the command - * @tid: tid from the command - * @scd_queue: scd_queue from the command - */ -struct iwl_scd_txq_cfg_rsp { - u8 token; - u8 sta_id; - u8 tid; - u8 scd_queue; -} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */ - -#endif /* __fw_api_tx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index bfc865ad8904..69336f38ac58 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -68,2692 +68,32 @@ #ifndef __fw_api_h__ #define __fw_api_h__ -#include "fw-api-rs.h" -#include "fw-api-rx.h" -#include "fw-api-tx.h" -#include "fw-api-sta.h" -#include "fw-api-mac.h" -#include "fw-api-power.h" -#include "fw-api-d3.h" -#include "fw-api-coex.h" -#include "fw-api-scan.h" -#include "fw-api-stats.h" -#include "fw-api-tof.h" - -/* Tx queue numbers for non-DQA mode */ -enum { - IWL_MVM_OFFCHANNEL_QUEUE = 8, - IWL_MVM_CMD_QUEUE = 9, -}; - -/* - * DQA queue numbers - * - * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW - * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames - * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames - * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames - * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure - * that we are never left without the possibility to connect to an AP. - * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. - * Each MGMT queue is mapped to a single STA - * MGMT frames are frames that return true on ieee80211_is_mgmt() - * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames - * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe - * responses - * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. - * DATA frames are intended for !ieee80211_is_mgmt() frames, but if - * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues - * as well - * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames - */ -enum iwl_mvm_dqa_txq { - IWL_MVM_DQA_CMD_QUEUE = 0, - IWL_MVM_DQA_AUX_QUEUE = 1, - IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2, - IWL_MVM_DQA_GCAST_QUEUE = 3, - IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4, - IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, - IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, - IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9, - IWL_MVM_DQA_MIN_DATA_QUEUE = 10, - IWL_MVM_DQA_MAX_DATA_QUEUE = 31, -}; - -enum iwl_mvm_tx_fifo { - IWL_MVM_TX_FIFO_BK = 0, - IWL_MVM_TX_FIFO_BE, - IWL_MVM_TX_FIFO_VI, - IWL_MVM_TX_FIFO_VO, - IWL_MVM_TX_FIFO_MCAST = 5, - IWL_MVM_TX_FIFO_CMD = 7, -}; - - -/** - * enum iwl_legacy_cmds - legacy group command IDs - */ -enum iwl_legacy_cmds { - /** - * @MVM_ALIVE: - * Alive data from the firmware, as described in - * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp. - */ - MVM_ALIVE = 0x1, - - /** - * @REPLY_ERROR: Cause an error in the firmware, for testing purposes. - */ - REPLY_ERROR = 0x2, - - /** - * @ECHO_CMD: Send data to the device to have it returned immediately. - */ - ECHO_CMD = 0x3, - - /** - * @INIT_COMPLETE_NOTIF: Notification that initialization is complete. - */ - INIT_COMPLETE_NOTIF = 0x4, - - /** - * @PHY_CONTEXT_CMD: - * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd. - */ - PHY_CONTEXT_CMD = 0x8, - - /** - * @DBG_CFG: Debug configuration command. - */ - DBG_CFG = 0x9, - - /** - * @ANTENNA_COUPLING_NOTIFICATION: - * Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif - */ - ANTENNA_COUPLING_NOTIFICATION = 0xa, - - /** - * @SCAN_ITERATION_COMPLETE_UMAC: - * Firmware indicates a scan iteration completed, using - * &struct iwl_umac_scan_iter_complete_notif. - */ - SCAN_ITERATION_COMPLETE_UMAC = 0xb5, - - /** - * @SCAN_CFG_CMD: - * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config - */ - SCAN_CFG_CMD = 0xc, - - /** - * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac - */ - SCAN_REQ_UMAC = 0xd, - - /** - * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort - */ - SCAN_ABORT_UMAC = 0xe, - - /** - * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete - */ - SCAN_COMPLETE_UMAC = 0xf, - - /** - * @BA_WINDOW_STATUS_NOTIFICATION_ID: - * uses &struct iwl_ba_window_status_notif - */ - BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, - - /** - * @ADD_STA_KEY: - * &struct iwl_mvm_add_sta_key_cmd_v1 or - * &struct iwl_mvm_add_sta_key_cmd. - */ - ADD_STA_KEY = 0x17, - - /** - * @ADD_STA: - * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7. - */ - ADD_STA = 0x18, - - /** - * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd - */ - REMOVE_STA = 0x19, - - /** - * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd - */ - FW_GET_ITEM_CMD = 0x1a, - - /** - * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, - * response in &struct iwl_mvm_tx_resp or - * &struct iwl_mvm_tx_resp_v3 - */ - TX_CMD = 0x1c, - - /** - * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd - */ - TXPATH_FLUSH = 0x1e, - - /** - * @MGMT_MCAST_KEY: - * &struct iwl_mvm_mgmt_mcast_key_cmd or - * &struct iwl_mvm_mgmt_mcast_key_cmd_v1 - */ - MGMT_MCAST_KEY = 0x1f, - - /* scheduler config */ - /** - * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware, - * &struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp - * for newer (A000) hardware. - */ - SCD_QUEUE_CFG = 0x1d, - - /** - * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd - */ - WEP_KEY = 0x20, - - /** - * @SHARED_MEM_CFG: - * retrieve shared memory configuration - response in - * &struct iwl_shared_mem_cfg - */ - SHARED_MEM_CFG = 0x25, - - /** - * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd - */ - TDLS_CHANNEL_SWITCH_CMD = 0x27, - - /** - * @TDLS_CHANNEL_SWITCH_NOTIFICATION: - * uses &struct iwl_tdls_channel_switch_notif - */ - TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa, - - /** - * @TDLS_CONFIG_CMD: - * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res - */ - TDLS_CONFIG_CMD = 0xa7, - - /** - * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd - */ - MAC_CONTEXT_CMD = 0x28, - - /** - * @TIME_EVENT_CMD: - * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp - */ - TIME_EVENT_CMD = 0x29, /* both CMD and response */ - - /** - * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif - */ - TIME_EVENT_NOTIFICATION = 0x2a, - - /** - * @BINDING_CONTEXT_CMD: - * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1 - */ - BINDING_CONTEXT_CMD = 0x2b, - - /** - * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd - */ - TIME_QUOTA_CMD = 0x2c, - - /** - * @NON_QOS_TX_COUNTER_CMD: - * command is &struct iwl_nonqos_seq_query_cmd - */ - NON_QOS_TX_COUNTER_CMD = 0x2d, - - /** - * @LQ_CMD: using &struct iwl_lq_cmd - */ - LQ_CMD = 0x4e, - - /** - * @FW_PAGING_BLOCK_CMD: - * &struct iwl_fw_paging_cmd - */ - FW_PAGING_BLOCK_CMD = 0x4f, - - /** - * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac - */ - SCAN_OFFLOAD_REQUEST_CMD = 0x51, - - /** - * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents - */ - SCAN_OFFLOAD_ABORT_CMD = 0x52, - - /** - * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req - */ - HOT_SPOT_CMD = 0x53, - - /** - * @SCAN_OFFLOAD_COMPLETE: - * notification, &struct iwl_periodic_scan_complete - */ - SCAN_OFFLOAD_COMPLETE = 0x6D, - - /** - * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD: - * update scan offload (scheduled scan) profiles/blacklist/etc. - */ - SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E, - - /** - * @MATCH_FOUND_NOTIFICATION: scan match found - */ - MATCH_FOUND_NOTIFICATION = 0xd9, - - /** - * @SCAN_ITERATION_COMPLETE: - * uses &struct iwl_lmac_scan_complete_notif - */ - SCAN_ITERATION_COMPLETE = 0xe7, - - /* Phy */ - /** - * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd - */ - PHY_CONFIGURATION_CMD = 0x6a, - - /** - * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db - */ - CALIB_RES_NOTIF_PHY_DB = 0x6b, - - /** - * @PHY_DB_CMD: &struct iwl_phy_db_cmd - */ - PHY_DB_CMD = 0x6c, - - /** - * @TOF_CMD: &struct iwl_tof_config_cmd - */ - TOF_CMD = 0x10, - - /** - * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd - */ - TOF_NOTIFICATION = 0x11, - - /** - * @POWER_TABLE_CMD: &struct iwl_device_power_cmd - */ - POWER_TABLE_CMD = 0x77, - - /** - * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION: - * &struct iwl_uapsd_misbehaving_ap_notif - */ - PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, - - /** - * @LTR_CONFIG: &struct iwl_ltr_config_cmd - */ - LTR_CONFIG = 0xee, - - /** - * @REPLY_THERMAL_MNG_BACKOFF: - * Thermal throttling command - */ - REPLY_THERMAL_MNG_BACKOFF = 0x7e, - - /** - * @DC2DC_CONFIG_CMD: - * Set/Get DC2DC frequency tune - * Command is &struct iwl_dc2dc_config_cmd, - * response is &struct iwl_dc2dc_config_resp - */ - DC2DC_CONFIG_CMD = 0x83, - - /** - * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd - */ - NVM_ACCESS_CMD = 0x88, - - /** - * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif - */ - BEACON_NOTIFICATION = 0x90, - - /** - * @BEACON_TEMPLATE_CMD: - * Uses one of &struct iwl_mac_beacon_cmd_v6, - * &struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd - * depending on the device version. - */ - BEACON_TEMPLATE_CMD = 0x91, - /** - * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd - */ - TX_ANT_CONFIGURATION_CMD = 0x98, - - /** - * @STATISTICS_CMD: - * one of &struct iwl_statistics_cmd, - * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_v10, - * &struct iwl_notif_statistics_cdb - */ - STATISTICS_CMD = 0x9c, - - /** - * @STATISTICS_NOTIFICATION: - * one of &struct iwl_notif_statistics_v10, - * &struct iwl_notif_statistics_v11, - * &struct iwl_notif_statistics_cdb - */ - STATISTICS_NOTIFICATION = 0x9d, - - /** - * @EOSP_NOTIFICATION: - * Notify that a service period ended, - * &struct iwl_mvm_eosp_notification - */ - EOSP_NOTIFICATION = 0x9e, - - /** - * @REDUCE_TX_POWER_CMD: - * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd - */ - REDUCE_TX_POWER_CMD = 0x9f, - - /** - * @CARD_STATE_NOTIFICATION: - * Card state (RF/CT kill) notification, - * uses &struct iwl_card_state_notif - */ - CARD_STATE_NOTIFICATION = 0xa1, - - /** - * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif - */ - MISSED_BEACONS_NOTIFICATION = 0xa2, - - /** - * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd - */ - MAC_PM_POWER_TABLE = 0xa9, - - /** - * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif - */ - MFUART_LOAD_NOTIFICATION = 0xb1, - - /** - * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd - */ - RSS_CONFIG_CMD = 0xb3, - - /** - * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info - */ - REPLY_RX_PHY_CMD = 0xc0, - - /** - * @REPLY_RX_MPDU_CMD: - * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc - */ - REPLY_RX_MPDU_CMD = 0xc1, - - /** - * @FRAME_RELEASE: - * Frame release (reorder helper) notification, uses - * &struct iwl_frame_release - */ - FRAME_RELEASE = 0xc3, - - /** - * @BA_NOTIF: - * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif - * or &struct iwl_mvm_ba_notif depending on the HW - */ - BA_NOTIF = 0xc5, - - /* Location Aware Regulatory */ - /** - * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd - */ - MCC_UPDATE_CMD = 0xc8, - - /** - * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif - */ - MCC_CHUB_UPDATE_CMD = 0xc9, - - /** - * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker - */ - MARKER_CMD = 0xcb, - - /** - * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif - */ - BT_PROFILE_NOTIFICATION = 0xce, - - /** - * @BT_CONFIG: &struct iwl_bt_coex_cmd - */ - BT_CONFIG = 0x9b, - - /** - * @BT_COEX_UPDATE_CORUN_LUT: - * &struct iwl_bt_coex_corun_lut_update_cmd - */ - BT_COEX_UPDATE_CORUN_LUT = 0x5b, - - /** - * @BT_COEX_UPDATE_REDUCED_TXP: - * &struct iwl_bt_coex_reduced_txp_update_cmd - */ - BT_COEX_UPDATE_REDUCED_TXP = 0x5c, - - /** - * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd - */ - BT_COEX_CI = 0x5d, - - /** - * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd - */ - REPLY_SF_CFG_CMD = 0xd1, - /** - * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd - */ - REPLY_BEACON_FILTERING_CMD = 0xd2, - - /** - * @DTS_MEASUREMENT_NOTIFICATION: - * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 - */ - DTS_MEASUREMENT_NOTIFICATION = 0xdd, - - /** - * @LDBG_CONFIG_CMD: configure continuous trace recording - */ - LDBG_CONFIG_CMD = 0xf6, - - /** - * @DEBUG_LOG_MSG: Debugging log data from firmware - */ - DEBUG_LOG_MSG = 0xf7, - - /** - * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd - */ - BCAST_FILTER_CMD = 0xcf, - - /** - * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd - */ - MCAST_FILTER_CMD = 0xd0, - - /** - * @D3_CONFIG_CMD: &struct iwl_d3_manager_config - */ - D3_CONFIG_CMD = 0xd3, - - /** - * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of - * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2, - * &struct iwl_proto_offload_cmd_v3_small, - * &struct iwl_proto_offload_cmd_v3_large - */ - PROT_OFFLOAD_CONFIG_CMD = 0xd4, - - /** - * @OFFLOADS_QUERY_CMD: - * No data in command, response in &struct iwl_wowlan_status - */ - OFFLOADS_QUERY_CMD = 0xd5, - - /** - * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config - */ - REMOTE_WAKE_CONFIG_CMD = 0xd6, - - /** - * @D0I3_END_CMD: End D0i3/D3 state, no command data - */ - D0I3_END_CMD = 0xed, - - /** - * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd - */ - WOWLAN_PATTERNS = 0xe0, - - /** - * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd - */ - WOWLAN_CONFIGURATION = 0xe1, - - /** - * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd - */ - WOWLAN_TSC_RSC_PARAM = 0xe2, - - /** - * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd - */ - WOWLAN_TKIP_PARAM = 0xe3, - - /** - * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd - */ - WOWLAN_KEK_KCK_MATERIAL = 0xe4, - - /** - * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status - */ - WOWLAN_GET_STATUSES = 0xe5, - - /** - * @SCAN_OFFLOAD_PROFILES_QUERY_CMD: - * No command data, response is &struct iwl_scan_offload_profiles_query - */ - SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56, -}; - -/* Please keep this enum *SORTED* by hex value. - * Needed for binary search, otherwise a warning will be triggered. - */ -enum iwl_mac_conf_subcmd_ids { - LINK_QUALITY_MEASUREMENT_CMD = 0x1, - LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE, - CHANNEL_SWITCH_NOA_NOTIF = 0xFF, -}; - -/** - * enum iwl_phy_ops_subcmd_ids - PHY group commands - */ -enum iwl_phy_ops_subcmd_ids { - /** - * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE: - * Uses either &struct iwl_dts_measurement_cmd or - * &struct iwl_ext_dts_measurement_cmd - */ - CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, - - /** - * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd - */ - CTDP_CONFIG_CMD = 0x03, - - /** - * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd - */ - TEMP_REPORTING_THRESHOLDS_CMD = 0x04, - - /** - * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd - */ - GEO_TX_POWER_LIMIT = 0x05, - - /** - * @CT_KILL_NOTIFICATION: &struct ct_kill_notif - */ - CT_KILL_NOTIFICATION = 0xFE, - - /** - * @DTS_MEASUREMENT_NOTIF_WIDE: - * &struct iwl_dts_measurement_notif_v1 or - * &struct iwl_dts_measurement_notif_v2 - */ - DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, -}; - -/** - * enum iwl_system_subcmd_ids - system group command IDs - */ -enum iwl_system_subcmd_ids { - /** - * @SHARED_MEM_CFG_CMD: - * response in &struct iwl_shared_mem_cfg or - * &struct iwl_shared_mem_cfg_v2 - */ - SHARED_MEM_CFG_CMD = 0x0, - - /** - * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd - */ - INIT_EXTENDED_CFG_CMD = 0x03, -}; - -/** - * enum iwl_data_path_subcmd_ids - data path group commands - */ -enum iwl_data_path_subcmd_ids { - /** - * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd - */ - DQA_ENABLE_CMD = 0x0, - - /** - * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd - */ - UPDATE_MU_GROUPS_CMD = 0x1, - - /** - * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd - */ - TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, - - /** - * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification - */ - STA_PM_NOTIF = 0xFD, - - /** - * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif - */ - MU_GROUP_MGMT_NOTIF = 0xFE, - - /** - * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification - */ - RX_QUEUES_NOTIFICATION = 0xFF, -}; - -/** - * enum iwl_prot_offload_subcmd_ids - protocol offload commands - */ -enum iwl_prot_offload_subcmd_ids { - /** - * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif - */ - STORED_BEACON_NTF = 0xFF, -}; - -/** - * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands - */ -enum iwl_regulatory_and_nvm_subcmd_ids { - /** - * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd - */ - NVM_ACCESS_COMPLETE = 0x0, - - /** - * @NVM_GET_INFO: - * Command is &struct iwl_nvm_get_info, - * response is &struct iwl_nvm_get_info_rsp - */ - NVM_GET_INFO = 0x2, -}; - -/** - * enum iwl_debug_cmds - debug commands - */ -enum iwl_debug_cmds { - /** - * @LMAC_RD_WR: - * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and - * &struct iwl_dbg_mem_access_rsp - */ - LMAC_RD_WR = 0x0, - /** - * @UMAC_RD_WR: - * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and - * &struct iwl_dbg_mem_access_rsp - */ - UMAC_RD_WR = 0x1, - /** - * @MFU_ASSERT_DUMP_NTF: - * &struct iwl_mfu_assert_dump_notif - */ - MFU_ASSERT_DUMP_NTF = 0xFE, -}; - -/** - * enum iwl_mvm_command_groups - command groups for the firmware - * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds - * @LONG_GROUP: legacy group with long header, also uses command IDs - * from &enum iwl_legacy_cmds - * @SYSTEM_GROUP: system group, uses command IDs from - * &enum iwl_system_subcmd_ids - * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from - * &enum iwl_mac_conf_subcmd_ids - * @PHY_OPS_GROUP: PHY operations group, uses command IDs from - * &enum iwl_phy_ops_subcmd_ids - * @DATA_PATH_GROUP: data path group, uses command IDs from - * &enum iwl_data_path_subcmd_ids - * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids - * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids - * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from - * &enum iwl_prot_offload_subcmd_ids - * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from - * &enum iwl_regulatory_and_nvm_subcmd_ids - * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds - */ -enum iwl_mvm_command_groups { - LEGACY_GROUP = 0x0, - LONG_GROUP = 0x1, - SYSTEM_GROUP = 0x2, - MAC_CONF_GROUP = 0x3, - PHY_OPS_GROUP = 0x4, - DATA_PATH_GROUP = 0x5, - PROT_OFFLOAD_GROUP = 0xb, - REGULATORY_AND_NVM_GROUP = 0xc, - DEBUG_GROUP = 0xf, -}; - -/** - * struct iwl_cmd_response - generic response struct for most commands - * @status: status of the command asked, changes for each one - */ -struct iwl_cmd_response { - __le32 status; -}; - -/* - * struct iwl_dqa_enable_cmd - * @cmd_queue: the TXQ number of the command queue - */ -struct iwl_dqa_enable_cmd { - __le32 cmd_queue; -} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */ - -/* - * struct iwl_tx_ant_cfg_cmd - * @valid: valid antenna configuration - */ -struct iwl_tx_ant_cfg_cmd { - __le32 valid; -} __packed; - -/** - * struct iwl_calib_ctrl - Calibration control struct. - * Sent as part of the phy configuration command. - * @flow_trigger: bitmap for which calibrations to perform according to - * flow triggers, using &enum iwl_calib_cfg - * @event_trigger: bitmap for which calibrations to perform according to - * event triggers, using &enum iwl_calib_cfg - */ -struct iwl_calib_ctrl { - __le32 flow_trigger; - __le32 event_trigger; -} __packed; - -/* This enum defines the bitmap of various calibrations to enable in both - * init ucode and runtime ucode through CALIBRATION_CFG_CMD. - */ -enum iwl_calib_cfg { - IWL_CALIB_CFG_XTAL_IDX = BIT(0), - IWL_CALIB_CFG_TEMPERATURE_IDX = BIT(1), - IWL_CALIB_CFG_VOLTAGE_READ_IDX = BIT(2), - IWL_CALIB_CFG_PAPD_IDX = BIT(3), - IWL_CALIB_CFG_TX_PWR_IDX = BIT(4), - IWL_CALIB_CFG_DC_IDX = BIT(5), - IWL_CALIB_CFG_BB_FILTER_IDX = BIT(6), - IWL_CALIB_CFG_LO_LEAKAGE_IDX = BIT(7), - IWL_CALIB_CFG_TX_IQ_IDX = BIT(8), - IWL_CALIB_CFG_TX_IQ_SKEW_IDX = BIT(9), - IWL_CALIB_CFG_RX_IQ_IDX = BIT(10), - IWL_CALIB_CFG_RX_IQ_SKEW_IDX = BIT(11), - IWL_CALIB_CFG_SENSITIVITY_IDX = BIT(12), - IWL_CALIB_CFG_CHAIN_NOISE_IDX = BIT(13), - IWL_CALIB_CFG_DISCONNECTED_ANT_IDX = BIT(14), - IWL_CALIB_CFG_ANT_COUPLING_IDX = BIT(15), - IWL_CALIB_CFG_DAC_IDX = BIT(16), - IWL_CALIB_CFG_ABS_IDX = BIT(17), - IWL_CALIB_CFG_AGC_IDX = BIT(18), -}; - -/** - * struct iwl_phy_cfg_cmd - Phy configuration command - * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg - * @calib_control: calibration control data - */ -struct iwl_phy_cfg_cmd { - __le32 phy_cfg; - struct iwl_calib_ctrl calib_control; -} __packed; - -#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) -#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) -#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) -#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) -#define PHY_CFG_TX_CHAIN_A BIT(8) -#define PHY_CFG_TX_CHAIN_B BIT(9) -#define PHY_CFG_TX_CHAIN_C BIT(10) -#define PHY_CFG_RX_CHAIN_A BIT(12) -#define PHY_CFG_RX_CHAIN_B BIT(13) -#define PHY_CFG_RX_CHAIN_C BIT(14) - - -/** - * enum iwl_nvm_access_op - NVM access opcode - * @IWL_NVM_READ: read NVM - * @IWL_NVM_WRITE: write NVM - */ -enum iwl_nvm_access_op { - IWL_NVM_READ = 0, - IWL_NVM_WRITE = 1, -}; - -/** - * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD - * @NVM_ACCESS_TARGET_CACHE: access the cache - * @NVM_ACCESS_TARGET_OTP: access the OTP - * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM - */ -enum iwl_nvm_access_target { - NVM_ACCESS_TARGET_CACHE = 0, - NVM_ACCESS_TARGET_OTP = 1, - NVM_ACCESS_TARGET_EEPROM = 2, -}; - -/** - * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD - * @NVM_SECTION_TYPE_SW: software section - * @NVM_SECTION_TYPE_REGULATORY: regulatory section - * @NVM_SECTION_TYPE_CALIBRATION: calibration section - * @NVM_SECTION_TYPE_PRODUCTION: production section - * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section - * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section - * @NVM_MAX_NUM_SECTIONS: number of sections - */ -enum iwl_nvm_section_type { - NVM_SECTION_TYPE_SW = 1, - NVM_SECTION_TYPE_REGULATORY = 3, - NVM_SECTION_TYPE_CALIBRATION = 4, - NVM_SECTION_TYPE_PRODUCTION = 5, - NVM_SECTION_TYPE_MAC_OVERRIDE = 11, - NVM_SECTION_TYPE_PHY_SKU = 12, - NVM_MAX_NUM_SECTIONS = 13, -}; - -/** - * struct iwl_nvm_access_cmd - Request the device to send an NVM section - * @op_code: &enum iwl_nvm_access_op - * @target: &enum iwl_nvm_access_target - * @type: &enum iwl_nvm_section_type - * @offset: offset in bytes into the section - * @length: in bytes, to read/write - * @data: if write operation, the data to write. On read its empty - */ -struct iwl_nvm_access_cmd { - u8 op_code; - u8 target; - __le16 type; - __le16 offset; - __le16 length; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ - -#define CONT_REC_COMMAND_SIZE 80 -#define ENABLE_CONT_RECORDING 0x15 -#define DISABLE_CONT_RECORDING 0x16 - -/* - * struct iwl_continuous_record_mode - recording mode - */ -struct iwl_continuous_record_mode { - __le16 enable_recording; -} __packed; - -/* - * struct iwl_continuous_record_cmd - enable/disable continuous recording - */ -struct iwl_continuous_record_cmd { - struct iwl_continuous_record_mode record_mode; - u8 pad[CONT_REC_COMMAND_SIZE - - sizeof(struct iwl_continuous_record_mode)]; -} __packed; - -/** - * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD - * @offset: offset in bytes into the section - * @length: in bytes, either how much was written or read - * @type: NVM_SECTION_TYPE_* - * @status: 0 for success, fail otherwise - * @data: if read operation, the data returned. Empty on write. - */ -struct iwl_nvm_access_resp { - __le16 offset; - __le16 length; - __le16 type; - __le16 status; - u8 data[]; -} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */ - -/* MVM_ALIVE 0x1 */ - -/* alive response is_valid values */ -#define ALIVE_RESP_UCODE_OK BIT(0) -#define ALIVE_RESP_RFKILL BIT(1) - -/* alive response ver_type values */ -enum { - FW_TYPE_HW = 0, - FW_TYPE_PROT = 1, - FW_TYPE_AP = 2, - FW_TYPE_WOWLAN = 3, - FW_TYPE_TIMING = 4, - FW_TYPE_WIPAN = 5 -}; - -/* alive response ver_subtype values */ -enum { - FW_SUBTYPE_FULL_FEATURE = 0, - FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */ - FW_SUBTYPE_REDUCED = 2, - FW_SUBTYPE_ALIVE_ONLY = 3, - FW_SUBTYPE_WOWLAN = 4, - FW_SUBTYPE_AP_SUBTYPE = 5, - FW_SUBTYPE_WIPAN = 6, - FW_SUBTYPE_INITIALIZE = 9 -}; - -#define IWL_ALIVE_STATUS_ERR 0xDEAD -#define IWL_ALIVE_STATUS_OK 0xCAFE - -#define IWL_ALIVE_FLG_RFKILL BIT(0) - -struct iwl_lmac_alive { - __le32 ucode_minor; - __le32 ucode_major; - u8 ver_subtype; - u8 ver_type; - u8 mac; - u8 opt; - __le32 timestamp; - __le32 error_event_table_ptr; /* SRAM address for error log */ - __le32 log_event_table_ptr; /* SRAM address for LMAC event log */ - __le32 cpu_register_ptr; - __le32 dbgm_config_ptr; - __le32 alive_counter_ptr; - __le32 scd_base_ptr; /* SRAM address for SCD */ - __le32 st_fwrd_addr; /* pointer to Store and forward */ - __le32 st_fwrd_size; -} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */ - -struct iwl_umac_alive { - __le32 umac_minor; /* UMAC version: minor */ - __le32 umac_major; /* UMAC version: major */ - __le32 error_info_addr; /* SRAM address for UMAC error log */ - __le32 dbg_print_buff_addr; -} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */ - -struct mvm_alive_resp_v3 { - __le16 status; - __le16 flags; - struct iwl_lmac_alive lmac_data; - struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_3 */ - -struct mvm_alive_resp { - __le16 status; - __le16 flags; - struct iwl_lmac_alive lmac_data[2]; - struct iwl_umac_alive umac_data; -} __packed; /* ALIVE_RES_API_S_VER_4 */ - -/* Error response/notification */ -enum { - FW_ERR_UNKNOWN_CMD = 0x0, - FW_ERR_INVALID_CMD_PARAM = 0x1, - FW_ERR_SERVICE = 0x2, - FW_ERR_ARC_MEMORY = 0x3, - FW_ERR_ARC_CODE = 0x4, - FW_ERR_WATCH_DOG = 0x5, - FW_ERR_WEP_GRP_KEY_INDX = 0x10, - FW_ERR_WEP_KEY_SIZE = 0x11, - FW_ERR_OBSOLETE_FUNC = 0x12, - FW_ERR_UNEXPECTED = 0xFE, - FW_ERR_FATAL = 0xFF -}; - -/** - * struct iwl_error_resp - FW error indication - * ( REPLY_ERROR = 0x2 ) - * @error_type: one of FW_ERR_* - * @cmd_id: the command ID for which the error occured - * @reserved1: reserved - * @bad_cmd_seq_num: sequence number of the erroneous command - * @error_service: which service created the error, applicable only if - * error_type = 2, otherwise 0 - * @timestamp: TSF in usecs. - */ -struct iwl_error_resp { - __le32 error_type; - u8 cmd_id; - u8 reserved1; - __le16 bad_cmd_seq_num; - __le32 error_service; - __le64 timestamp; -} __packed; - - -/* Common PHY, MAC and Bindings definitions */ -#define MAX_MACS_IN_BINDING (3) -#define MAX_BINDINGS (4) - -/** - * enum iwl_mvm_id_and_color - ID and color fields in context dword - * @FW_CTXT_ID_POS: position of the ID - * @FW_CTXT_ID_MSK: mask of the ID - * @FW_CTXT_COLOR_POS: position of the color - * @FW_CTXT_COLOR_MSK: mask of the color - * @FW_CTXT_INVALID: value used to indicate unused/invalid - */ -enum iwl_mvm_id_and_color { - FW_CTXT_ID_POS = 0, - FW_CTXT_ID_MSK = 0xff << FW_CTXT_ID_POS, - FW_CTXT_COLOR_POS = 8, - FW_CTXT_COLOR_MSK = 0xff << FW_CTXT_COLOR_POS, - FW_CTXT_INVALID = 0xffffffff, -}; - -#define FW_CMD_ID_AND_COLOR(_id, _color) ((_id << FW_CTXT_ID_POS) |\ - (_color << FW_CTXT_COLOR_POS)) - -/* Possible actions on PHYs, MACs and Bindings */ -enum iwl_phy_ctxt_action { - FW_CTXT_ACTION_STUB = 0, - FW_CTXT_ACTION_ADD, - FW_CTXT_ACTION_MODIFY, - FW_CTXT_ACTION_REMOVE, - FW_CTXT_ACTION_NUM -}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */ - -/* Time Events */ - -/* Time Event types, according to MAC type */ -enum iwl_time_event_type { - /* BSS Station Events */ - TE_BSS_STA_AGGRESSIVE_ASSOC, - TE_BSS_STA_ASSOC, - TE_BSS_EAP_DHCP_PROT, - TE_BSS_QUIET_PERIOD, - - /* P2P Device Events */ - TE_P2P_DEVICE_DISCOVERABLE, - TE_P2P_DEVICE_LISTEN, - TE_P2P_DEVICE_ACTION_SCAN, - TE_P2P_DEVICE_FULL_SCAN, - - /* P2P Client Events */ - TE_P2P_CLIENT_AGGRESSIVE_ASSOC, - TE_P2P_CLIENT_ASSOC, - TE_P2P_CLIENT_QUIET_PERIOD, - - /* P2P GO Events */ - TE_P2P_GO_ASSOC_PROT, - TE_P2P_GO_REPETITIVET_NOA, - TE_P2P_GO_CT_WINDOW, - - /* WiDi Sync Events */ - TE_WIDI_TX_SYNC, - - /* Channel Switch NoA */ - TE_CHANNEL_SWITCH_PERIOD, - - TE_MAX -}; /* MAC_EVENT_TYPE_API_E_VER_1 */ - - - -/* Time event - defines for command API v1 */ - -/* - * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V1_FRAG_NONE = 0, - TE_V1_FRAG_SINGLE = 1, - TE_V1_FRAG_DUAL = 2, - TE_V1_FRAG_ENDLESS = 0xffffffff -}; - -/* If a Time Event can be fragmented, this is the max number of fragments */ -#define TE_V1_FRAG_MAX_MSK 0x0fffffff -/* Repeat the time event endlessly (until removed) */ -#define TE_V1_REPEAT_ENDLESS 0xffffffff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff - -/* Time Event dependencies: none, on another TE, or in a specific time */ -enum { - TE_V1_INDEPENDENT = 0, - TE_V1_DEP_OTHER = BIT(0), - TE_V1_DEP_TSF = BIT(1), - TE_V1_EVENT_SOCIOPATHIC = BIT(2), -}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */ - -/* - * @TE_V1_NOTIF_NONE: no notifications - * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use. - * - * Supported Time event notifications configuration. - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - */ -enum { - TE_V1_NOTIF_NONE = 0, - TE_V1_NOTIF_HOST_EVENT_START = BIT(0), - TE_V1_NOTIF_HOST_EVENT_END = BIT(1), - TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3), - TE_V1_NOTIF_HOST_FRAG_START = BIT(4), - TE_V1_NOTIF_HOST_FRAG_END = BIT(5), - TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7), -}; /* MAC_EVENT_ACTION_API_E_VER_2 */ - -/* Time event - defines for command API */ - -/* - * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed. - * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only - * the first fragment is scheduled. - * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only - * the first 2 fragments are scheduled. - * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any - * number of fragments are valid. - * - * Other than the constant defined above, specifying a fragmentation value 'x' - * means that the event can be fragmented but only the first 'x' will be - * scheduled. - */ -enum { - TE_V2_FRAG_NONE = 0, - TE_V2_FRAG_SINGLE = 1, - TE_V2_FRAG_DUAL = 2, - TE_V2_FRAG_MAX = 0xfe, - TE_V2_FRAG_ENDLESS = 0xff -}; - -/* Repeat the time event endlessly (until removed) */ -#define TE_V2_REPEAT_ENDLESS 0xff -/* If a Time Event has bounded repetitions, this is the maximal value */ -#define TE_V2_REPEAT_MAX 0xfe - -#define TE_V2_PLACEMENT_POS 12 -#define TE_V2_ABSENCE_POS 15 - -/** - * enum iwl_time_event_policy - Time event policy values - * A notification (both event and fragment) includes a status indicating weather - * the FW was able to schedule the event or not. For fragment start/end - * notification the status is always success. There is no start/end fragment - * notification for monolithic events. - * - * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable - * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start - * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end - * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use - * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use. - * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start - * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end - * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. - * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. - * @T2_V2_START_IMMEDIATELY: start time event immediately - * @TE_V2_DEP_OTHER: depends on another time event - * @TE_V2_DEP_TSF: depends on a specific time - * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC - * @TE_V2_ABSENCE: are we present or absent during the Time Event. - */ -enum iwl_time_event_policy { - TE_V2_DEFAULT_POLICY = 0x0, - - /* notifications (event start/stop, fragment start/stop) */ - TE_V2_NOTIF_HOST_EVENT_START = BIT(0), - TE_V2_NOTIF_HOST_EVENT_END = BIT(1), - TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2), - TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3), - - TE_V2_NOTIF_HOST_FRAG_START = BIT(4), - TE_V2_NOTIF_HOST_FRAG_END = BIT(5), - TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), - TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), - T2_V2_START_IMMEDIATELY = BIT(11), - - /* placement characteristics */ - TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), - TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1), - TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2), - - /* are we present or absent during the Time Event. */ - TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS), -}; - -/** - * struct iwl_time_event_cmd - configuring Time Events - * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also - * with version 1. determined by IWL_UCODE_TLV_FLAGS) - * ( TIME_EVENT_CMD = 0x29 ) - * @id_and_color: ID and color of the relevant MAC, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of &enum iwl_phy_ctxt_action - * @id: this field has two meanings, depending on the action: - * If the action is ADD, then it means the type of event to add. - * For all other actions it is the unique event ID assigned when the - * event was added by the FW. - * @apply_time: When to start the Time Event (in GP2) - * @max_delay: maximum delay to event's start (apply time), in TU - * @depends_on: the unique ID of the event we depend on (if any) - * @interval: interval between repetitions, in TU - * @duration: duration of event in TU - * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS - * @max_frags: maximal number of fragments the Time Event can be divided to - * @policy: defines whether uCode shall notify the host or other uCode modules - * on event and/or fragment start and/or end - * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF - * TE_EVENT_SOCIOPATHIC - * using TE_ABSENCE and using TE_NOTIF_*, - * &enum iwl_time_event_policy - */ -struct iwl_time_event_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - __le32 id; - /* MAC_TIME_EVENT_DATA_API_S_VER_2 */ - __le32 apply_time; - __le32 max_delay; - __le32 depends_on; - __le32 interval; - __le32 duration; - u8 repeat; - u8 max_frags; - __le16 policy; -} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */ - -/** - * struct iwl_time_event_resp - response structure to iwl_time_event_cmd - * @status: bit 0 indicates success, all others specify errors - * @id: the Time Event type - * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE - * @id_and_color: ID and color of the relevant MAC, - * &enum iwl_mvm_id_and_color - */ -struct iwl_time_event_resp { - __le32 status; - __le32 id; - __le32 unique_id; - __le32 id_and_color; -} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */ - -/** - * struct iwl_time_event_notif - notifications of time event start/stop - * ( TIME_EVENT_NOTIFICATION = 0x2a ) - * @timestamp: action timestamp in GP2 - * @session_id: session's unique id - * @unique_id: unique id of the Time Event itself - * @id_and_color: ID and color of the relevant MAC - * @action: &enum iwl_time_event_policy - * @status: true if scheduled, false otherwise (not executed) - */ -struct iwl_time_event_notif { - __le32 timestamp; - __le32 session_id; - __le32 unique_id; - __le32 id_and_color; - __le32 action; - __le32 status; -} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */ - - -/* Bindings and Time Quota */ - -/** - * struct iwl_binding_cmd_v1 - configuring bindings - * ( BINDING_CONTEXT_CMD = 0x2b ) - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* - * @macs: array of MAC id and colors which belong to the binding, - * &enum iwl_mvm_id_and_color - * @phy: PHY id and color which belongs to the binding, - * &enum iwl_mvm_id_and_color - */ -struct iwl_binding_cmd_v1 { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* BINDING_DATA_API_S_VER_1 */ - __le32 macs[MAX_MACS_IN_BINDING]; - __le32 phy; -} __packed; /* BINDING_CMD_API_S_VER_1 */ - -/** - * struct iwl_binding_cmd - configuring bindings - * ( BINDING_CONTEXT_CMD = 0x2b ) - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @action: action to perform, one of FW_CTXT_ACTION_* - * @macs: array of MAC id and colors which belong to the binding - * &enum iwl_mvm_id_and_color - * @phy: PHY id and color which belongs to the binding - * &enum iwl_mvm_id_and_color - * @lmac_id: the lmac id the binding belongs to - */ -struct iwl_binding_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* BINDING_DATA_API_S_VER_1 */ - __le32 macs[MAX_MACS_IN_BINDING]; - __le32 phy; - __le32 lmac_id; -} __packed; /* BINDING_CMD_API_S_VER_2 */ - -#define IWL_BINDING_CMD_SIZE_V1 sizeof(struct iwl_binding_cmd_v1) -#define IWL_LMAC_24G_INDEX 0 -#define IWL_LMAC_5G_INDEX 1 - -/* The maximal number of fragments in the FW's schedule session */ -#define IWL_MVM_MAX_QUOTA 128 - -/** - * struct iwl_time_quota_data - configuration of time quota per binding - * @id_and_color: ID and color of the relevant Binding, - * &enum iwl_mvm_id_and_color - * @quota: absolute time quota in TU. The scheduler will try to divide the - * remainig quota (after Time Events) according to this quota. - * @max_duration: max uninterrupted context duration in TU - */ -struct iwl_time_quota_data { - __le32 id_and_color; - __le32 quota; - __le32 max_duration; -} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ - -/** - * struct iwl_time_quota_cmd - configuration of time quota between bindings - * ( TIME_QUOTA_CMD = 0x2c ) - * @quotas: allocations per binding - * Note: on non-CDB the fourth one is the auxilary mac and is - * essentially zero. - * On CDB the fourth one is a regular binding. - */ -struct iwl_time_quota_cmd { - struct iwl_time_quota_data quotas[MAX_BINDINGS]; -} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */ - - -/* PHY context */ - -/* Supported bands */ -#define PHY_BAND_5 (0) -#define PHY_BAND_24 (1) - -/* Supported channel width, vary if there is VHT support */ -#define PHY_VHT_CHANNEL_MODE20 (0x0) -#define PHY_VHT_CHANNEL_MODE40 (0x1) -#define PHY_VHT_CHANNEL_MODE80 (0x2) -#define PHY_VHT_CHANNEL_MODE160 (0x3) - -/* - * Control channel position: - * For legacy set bit means upper channel, otherwise lower. - * For VHT - bit-2 marks if the control is lower/upper relative to center-freq - * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. - * center_freq - * | - * 40Mhz |_______|_______| - * 80Mhz |_______|_______|_______|_______| - * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| - * code 011 010 001 000 | 100 101 110 111 - */ -#define PHY_VHT_CTRL_POS_1_BELOW (0x0) -#define PHY_VHT_CTRL_POS_2_BELOW (0x1) -#define PHY_VHT_CTRL_POS_3_BELOW (0x2) -#define PHY_VHT_CTRL_POS_4_BELOW (0x3) -#define PHY_VHT_CTRL_POS_1_ABOVE (0x4) -#define PHY_VHT_CTRL_POS_2_ABOVE (0x5) -#define PHY_VHT_CTRL_POS_3_ABOVE (0x6) -#define PHY_VHT_CTRL_POS_4_ABOVE (0x7) - -/* - * @band: PHY_BAND_* - * @channel: channel number - * @width: PHY_[VHT|LEGACY]_CHANNEL_* - * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* - */ -struct iwl_fw_channel_info { - u8 band; - u8 channel; - u8 width; - u8 ctrl_pos; -} __packed; - -#define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) -#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) -#define PHY_RX_CHAIN_VALID_POS (1) -#define PHY_RX_CHAIN_VALID_MSK \ - (0x7 << PHY_RX_CHAIN_VALID_POS) -#define PHY_RX_CHAIN_FORCE_SEL_POS (4) -#define PHY_RX_CHAIN_FORCE_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) -#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ - (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) -#define PHY_RX_CHAIN_CNT_POS (10) -#define PHY_RX_CHAIN_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_CNT_POS) -#define PHY_RX_CHAIN_MIMO_CNT_POS (12) -#define PHY_RX_CHAIN_MIMO_CNT_MSK \ - (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) -#define PHY_RX_CHAIN_MIMO_FORCE_POS (14) -#define PHY_RX_CHAIN_MIMO_FORCE_MSK \ - (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) - -/* TODO: fix the value, make it depend on firmware at runtime? */ -#define NUM_PHY_CTX 3 - -/* TODO: complete missing documentation */ -/** - * struct iwl_phy_context_cmd - config of the PHY context - * ( PHY_CONTEXT_CMD = 0x8 ) - * @id_and_color: ID and color of the relevant Binding - * @action: action to perform, one of FW_CTXT_ACTION_* - * @apply_time: 0 means immediate apply and context switch. - * other value means apply new params after X usecs - * @tx_param_color: ??? - * @ci: channel info - * @txchain_info: ??? - * @rxchain_info: ??? - * @acquisition_data: ??? - * @dsp_cfg_flags: set to 0 - */ -struct iwl_phy_context_cmd { - /* COMMON_INDEX_HDR_API_S_VER_1 */ - __le32 id_and_color; - __le32 action; - /* PHY_CONTEXT_DATA_API_S_VER_1 */ - __le32 apply_time; - __le32 tx_param_color; - struct iwl_fw_channel_info ci; - __le32 txchain_info; - __le32 rxchain_info; - __le32 acquisition_data; - __le32 dsp_cfg_flags; -} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ - -/* - * Aux ROC command - * - * Command requests the firmware to create a time event for a certain duration - * and remain on the given channel. This is done by using the Aux framework in - * the FW. - * The command was first used for Hot Spot issues - but can be used regardless - * to Hot Spot. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @id_and_color: ID and color of the MAC - * @action: action to perform, one of FW_CTXT_ACTION_* - * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the - * event_unique_id should be the id of the time event assigned by ucode. - * Otherwise ignore the event_unique_id. - * @sta_id_and_color: station id and color, resumed during "Remain On Channel" - * activity. - * @channel_info: channel info - * @node_addr: Our MAC Address - * @reserved: reserved for alignment - * @apply_time: GP2 value to start (should always be the current GP2 value) - * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max - * time by which start of the event is allowed to be postponed. - * @duration: event duration in TU To calculate event duration: - * timeEventDuration = min(duration, remainingQuota) - */ -struct iwl_hs20_roc_req { - /* COMMON_INDEX_HDR_API_S_VER_1 hdr */ - __le32 id_and_color; - __le32 action; - __le32 event_unique_id; - __le32 sta_id_and_color; - struct iwl_fw_channel_info channel_info; - u8 node_addr[ETH_ALEN]; - __le16 reserved; - __le32 apply_time; - __le32 apply_time_max_delay; - __le32 duration; -} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */ - -/* - * values for AUX ROC result values - */ -enum iwl_mvm_hot_spot { - HOT_SPOT_RSP_STATUS_OK, - HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS, - HOT_SPOT_MAX_NUM_OF_SESSIONS, -}; - -/* - * Aux ROC command response - * - * In response to iwl_hs20_roc_req the FW sends this command to notify the - * driver the uid of the timevent. - * - * ( HOT_SPOT_CMD 0x53 ) - * - * @event_unique_id: Unique ID of time event assigned by ucode - * @status: Return status 0 is success, all the rest used for specific errors - */ -struct iwl_hs20_roc_res { - __le32 event_unique_id; - __le32 status; -} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */ - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( RADIO_VERSION_NOTIFICATION = 0x68 ) - * @radio_flavor: radio flavor - * @radio_step: radio version step - * @radio_dash: radio version dash - */ -struct iwl_radio_version_notif { - __le32 radio_flavor; - __le32 radio_step; - __le32 radio_dash; -} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */ - -enum iwl_card_state_flags { - CARD_ENABLED = 0x00, - HW_CARD_DISABLED = 0x01, - SW_CARD_DISABLED = 0x02, - CT_KILL_CARD_DISABLED = 0x04, - HALT_CARD_DISABLED = 0x08, - CARD_DISABLED_MSK = 0x0f, - CARD_IS_RX_ON = 0x10, -}; - -/** - * struct iwl_radio_version_notif - information on the radio version - * ( CARD_STATE_NOTIFICATION = 0xa1 ) - * @flags: %iwl_card_state_flags - */ -struct iwl_card_state_notif { - __le32 flags; -} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ - -/** - * struct iwl_missed_beacons_notif - information on missed beacons - * ( MISSED_BEACONS_NOTIFICATION = 0xa2 ) - * @mac_id: interface ID - * @consec_missed_beacons_since_last_rx: number of consecutive missed - * beacons since last RX. - * @consec_missed_beacons: number of consecutive missed beacons - * @num_expected_beacons: number of expected beacons - * @num_recvd_beacons: number of received beacons - */ -struct iwl_missed_beacons_notif { - __le32 mac_id; - __le32 consec_missed_beacons_since_last_rx; - __le32 consec_missed_beacons; - __le32 num_expected_beacons; - __le32 num_recvd_beacons; -} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ - -/** - * struct iwl_mfuart_load_notif - mfuart image version & status - * ( MFUART_LOAD_NOTIFICATION = 0xb1 ) - * @installed_ver: installed image version - * @external_ver: external image version - * @status: MFUART loading status - * @duration: MFUART loading time - * @image_size: MFUART image size in bytes -*/ -struct iwl_mfuart_load_notif { - __le32 installed_ver; - __le32 external_ver; - __le32 status; - __le32 duration; - /* image size valid only in v2 of the command */ - __le32 image_size; -} __packed; /*MFU_LOADER_NTFY_API_S_VER_2*/ - -/** - * struct iwl_mfu_assert_dump_notif - mfuart dump logs - * ( MFU_ASSERT_DUMP_NTF = 0xfe ) - * @assert_id: mfuart assert id that cause the notif - * @curr_reset_num: number of asserts since uptime - * @index_num: current chunk id - * @parts_num: total number of chunks - * @data_size: number of data bytes sent - * @data: data buffer - */ -struct iwl_mfu_assert_dump_notif { - __le32 assert_id; - __le32 curr_reset_num; - __le16 index_num; - __le16 parts_num; - __le32 data_size; - __le32 data[0]; -} __packed; /*MFU_DUMP_ASSERT_API_S_VER_1*/ - -#define MAX_PORT_ID_NUM 2 -#define MAX_MCAST_FILTERING_ADDRESSES 256 - -/** - * struct iwl_mcast_filter_cmd - configure multicast filter. - * @filter_own: Set 1 to filter out multicast packets sent by station itself - * @port_id: Multicast MAC addresses array specifier. This is a strange way - * to identify network interface adopted in host-device IF. - * It is used by FW as index in array of addresses. This array has - * MAX_PORT_ID_NUM members. - * @count: Number of MAC addresses in the array - * @pass_all: Set 1 to pass all multicast packets. - * @bssid: current association BSSID. - * @reserved: reserved - * @addr_list: Place holder for array of MAC addresses. - * IMPORTANT: add padding if necessary to ensure DWORD alignment. - */ -struct iwl_mcast_filter_cmd { - u8 filter_own; - u8 port_id; - u8 count; - u8 pass_all; - u8 bssid[6]; - u8 reserved[2]; - u8 addr_list[0]; -} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ - -#define MAX_BCAST_FILTERS 8 -#define MAX_BCAST_FILTER_ATTRS 2 - -/** - * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet - * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start. - * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e. - * start of ip payload). - */ -enum iwl_mvm_bcast_filter_attr_offset { - BCAST_FILTER_OFFSET_PAYLOAD_START = 0, - BCAST_FILTER_OFFSET_IP_END = 1, -}; - -/** - * struct iwl_fw_bcast_filter_attr - broadcast filter attribute - * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset. - * @offset: starting offset of this pattern. - * @reserved1: reserved - * @val: value to match - big endian (MSB is the first - * byte to match from offset pos). - * @mask: mask to match (big endian). - */ -struct iwl_fw_bcast_filter_attr { - u8 offset_type; - u8 offset; - __le16 reserved1; - __be32 val; - __be32 mask; -} __packed; /* BCAST_FILTER_ATT_S_VER_1 */ - -/** - * enum iwl_mvm_bcast_filter_frame_type - filter frame type - * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames. - * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames - */ -enum iwl_mvm_bcast_filter_frame_type { - BCAST_FILTER_FRAME_TYPE_ALL = 0, - BCAST_FILTER_FRAME_TYPE_IPV4 = 1, -}; - -/** - * struct iwl_fw_bcast_filter - broadcast filter - * @discard: discard frame (1) or let it pass (0). - * @frame_type: &enum iwl_mvm_bcast_filter_frame_type. - * @reserved1: reserved - * @num_attrs: number of valid attributes in this filter. - * @attrs: attributes of this filter. a filter is considered matched - * only when all its attributes are matched (i.e. AND relationship) - */ -struct iwl_fw_bcast_filter { - u8 discard; - u8 frame_type; - u8 num_attrs; - u8 reserved1; - struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; -} __packed; /* BCAST_FILTER_S_VER_1 */ - -#define BA_WINDOW_STREAMS_MAX 16 -#define BA_WINDOW_STATUS_TID_MSK 0x000F -#define BA_WINDOW_STATUS_STA_ID_POS 4 -#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 -#define BA_WINDOW_STATUS_VALID_MSK BIT(9) - -/** - * struct iwl_ba_window_status_notif - reordering window's status notification - * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] - * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid - * @start_seq_num: the start sequence number of the bitmap - * @mpdu_rx_count: the number of received MPDUs since entering D0i3 - */ -struct iwl_ba_window_status_notif { - __le64 bitmap[BA_WINDOW_STREAMS_MAX]; - __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; - __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; - __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; -} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ - -/** - * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. - * @default_discard: default action for this mac (discard (1) / pass (0)). - * @reserved1: reserved - * @attached_filters: bitmap of relevant filters for this mac. - */ -struct iwl_fw_bcast_mac { - u8 default_discard; - u8 reserved1; - __le16 attached_filters; -} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */ - -/** - * struct iwl_bcast_filter_cmd - broadcast filtering configuration - * @disable: enable (0) / disable (1) - * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS) - * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER) - * @reserved1: reserved - * @filters: broadcast filters - * @macs: broadcast filtering configuration per-mac - */ -struct iwl_bcast_filter_cmd { - u8 disable; - u8 max_bcast_filters; - u8 max_macs; - u8 reserved1; - struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS]; - struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER]; -} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */ - -/* - * enum iwl_mvm_marker_id - maker ids - * - * The ids for different type of markers to insert into the usniffer logs - */ -enum iwl_mvm_marker_id { - MARKER_ID_TX_FRAME_LATENCY = 1, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_mvm_marker - mark info into the usniffer logs - * - * (MARKER_CMD = 0xcb) - * - * Mark the UTC time stamp into the usniffer logs together with additional - * metadata, so the usniffer output can be parsed. - * In the command response the ucode will return the GP2 time. - * - * @dw_len: The amount of dwords following this byte including this byte. - * @marker_id: A unique marker id (iwl_mvm_marker_id). - * @reserved: reserved. - * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC - * @metadata: additional meta data that will be written to the unsiffer log - */ -struct iwl_mvm_marker { - u8 dw_len; - u8 marker_id; - __le16 reserved; - __le64 timestamp; - __le32 metadata[0]; -} __packed; /* MARKER_API_S_VER_1 */ - -/* - * enum iwl_dc2dc_config_id - flag ids - * - * Ids of dc2dc configuration flags - */ -enum iwl_dc2dc_config_id { - DCDC_LOW_POWER_MODE_MSK_SET = 0x1, /* not used */ - DCDC_FREQ_TUNE_SET = 0x2, -}; /* MARKER_ID_API_E_VER_1 */ - -/** - * struct iwl_dc2dc_config_cmd - configure dc2dc values - * - * (DC2DC_CONFIG_CMD = 0x83) - * - * Set/Get & configure dc2dc values. - * The command always returns the current dc2dc values. - * - * @flags: set/get dc2dc - * @enable_low_power_mode: not used. - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_cmd { - __le32 flags; - __le32 enable_low_power_mode; /* not used */ - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd - * - * Current dc2dc values returned by the FW. - * - * @dc2dc_freq_tune0: frequency divider - digital domain - * @dc2dc_freq_tune1: frequency divider - analog domain - */ -struct iwl_dc2dc_config_resp { - __le32 dc2dc_freq_tune0; - __le32 dc2dc_freq_tune1; -} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ - -/*********************************** - * Smart Fifo API - ***********************************/ -/* Smart Fifo state */ -enum iwl_sf_state { - SF_LONG_DELAY_ON = 0, /* should never be called by driver */ - SF_FULL_ON, - SF_UNINIT, - SF_INIT_OFF, - SF_HW_NUM_STATES -}; - -/* Smart Fifo possible scenario */ -enum iwl_sf_scenario { - SF_SCENARIO_SINGLE_UNICAST, - SF_SCENARIO_AGG_UNICAST, - SF_SCENARIO_MULTICAST, - SF_SCENARIO_BA_RESP, - SF_SCENARIO_TX_RESP, - SF_NUM_SCENARIO -}; - -#define SF_TRANSIENT_STATES_NUMBER 2 /* SF_LONG_DELAY_ON and SF_FULL_ON */ -#define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ - -/* smart FIFO default values */ -#define SF_W_MARK_SISO 6144 -#define SF_W_MARK_MIMO2 8192 -#define SF_W_MARK_MIMO3 6144 -#define SF_W_MARK_LEGACY 4096 -#define SF_W_MARK_SCAN 4096 - -/* SF Scenarios timers for default configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ -#define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ -#define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ -#define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ - -/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ -#define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ -#define SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */ -#define SF_MCAST_AGING_TIMER 10016 /* 10 mSec */ -#define SF_BA_IDLE_TIMER 320 /* 300 uSec */ -#define SF_BA_AGING_TIMER 2016 /* 2 mSec */ -#define SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */ -#define SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */ - -#define SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */ - -#define SF_CFG_DUMMY_NOTIF_OFF BIT(16) - -/** - * struct iwl_sf_cfg_cmd - Smart Fifo configuration command. - * @state: smart fifo state, types listed in &enum iwl_sf_state. - * @watermark: Minimum allowed availabe free space in RXF for transient state. - * @long_delay_timeouts: aging and idle timer values for each scenario - * in long delay state. - * @full_on_timeouts: timer values for each scenario in full on state. - */ -struct iwl_sf_cfg_cmd { - __le32 state; - __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; - __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; - __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; -} __packed; /* SF_CFG_API_S_VER_2 */ - -/*********************************** - * Location Aware Regulatory (LAR) API - MCC updates - ***********************************/ - -/** - * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - */ -struct iwl_mcc_update_cmd_v1 { - __le16 mcc; - u8 source_id; - u8 reserved; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */ - -/** - * struct iwl_mcc_update_cmd - Request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: the source from where we got the MCC, see iwl_mcc_source - * @reserved: reserved for alignment - * @key: integrity key for MCC API OEM testing - * @reserved2: reserved - */ -struct iwl_mcc_update_cmd { - __le16 mcc; - u8 source_id; - u8 reserved; - __le32 key; - u8 reserved2[20]; -} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */ - -/** - * struct iwl_mcc_update_resp_v1 - response to MCC_UPDATE_CMD. - * Contains the new channel control profile map, if changed, and the new MCC - * (mobile country code). - * The new MCC may be different than what was requested in MCC_UPDATE_CMD. - * @status: see &enum iwl_mcc_update_status - * @mcc: the new applied MCC - * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) - * @channels: channel control data map, DWORD for each channel. Only the first - * 16bits are used. - */ -struct iwl_mcc_update_resp_v1 { - __le32 status; - __le16 mcc; - u8 cap; - u8 source_id; - __le32 n_channels; - __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */ - -/** - * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD. - * Contains the new channel control profile map, if changed, and the new MCC - * (mobile country code). - * The new MCC may be different than what was requested in MCC_UPDATE_CMD. - * @status: see &enum iwl_mcc_update_status - * @mcc: the new applied MCC - * @cap: capabilities for all channels which matches the MCC - * @source_id: the MCC source, see iwl_mcc_source - * @time: time elapsed from the MCC test start (in 30 seconds TU) - * @reserved: reserved. - * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 - * channels, depending on platform) - * @channels: channel control data map, DWORD for each channel. Only the first - * 16bits are used. - */ -struct iwl_mcc_update_resp { - __le32 status; - __le16 mcc; - u8 cap; - u8 source_id; - __le16 time; - __le16 reserved; - __le32 n_channels; - __le32 channels[0]; -} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */ - -/** - * struct iwl_mcc_chub_notif - chub notifies of mcc change - * (MCC_CHUB_UPDATE_CMD = 0xc9) - * The Chub (Communication Hub, CommsHUB) is a HW component that connects to - * the cellular and connectivity cores that gets updates of the mcc, and - * notifies the ucode directly of any mcc change. - * The ucode requests the driver to request the device to update geographic - * regulatory profile according to the given MCC (Mobile Country Code). - * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. - * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the - * MCC in the cmd response will be the relevant MCC in the NVM. - * @mcc: given mobile country code - * @source_id: identity of the change originator, see iwl_mcc_source - * @reserved1: reserved for alignment - */ -struct iwl_mcc_chub_notif { - __le16 mcc; - u8 source_id; - u8 reserved1; -} __packed; /* LAR_MCC_NOTIFY_S */ - -enum iwl_mcc_update_status { - MCC_RESP_NEW_CHAN_PROFILE, - MCC_RESP_SAME_CHAN_PROFILE, - MCC_RESP_INVALID, - MCC_RESP_NVM_DISABLED, - MCC_RESP_ILLEGAL, - MCC_RESP_LOW_PRIORITY, - MCC_RESP_TEST_MODE_ACTIVE, - MCC_RESP_TEST_MODE_NOT_ACTIVE, - MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE, -}; - -enum iwl_mcc_source { - MCC_SOURCE_OLD_FW = 0, - MCC_SOURCE_ME = 1, - MCC_SOURCE_BIOS = 2, - MCC_SOURCE_3G_LTE_HOST = 3, - MCC_SOURCE_3G_LTE_DEVICE = 4, - MCC_SOURCE_WIFI = 5, - MCC_SOURCE_RESERVED = 6, - MCC_SOURCE_DEFAULT = 7, - MCC_SOURCE_UNINITIALIZED = 8, - MCC_SOURCE_MCC_API = 9, - MCC_SOURCE_GET_CURRENT = 0x10, - MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11, -}; - -/* DTS measurements */ - -enum iwl_dts_measurement_flags { - DTS_TRIGGER_CMD_FLAGS_TEMP = BIT(0), - DTS_TRIGGER_CMD_FLAGS_VOLT = BIT(1), -}; - -/** - * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements - * - * @flags: indicates which measurements we want as specified in - * &enum iwl_dts_measurement_flags - */ -struct iwl_dts_measurement_cmd { - __le32 flags; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */ - -/** -* enum iwl_dts_control_measurement_mode - DTS measurement type -* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read -* back (latest value. Not waiting for new value). Use automatic -* SW DTS configuration. -* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings, -* trigger DTS reading and provide read back temperature read -* when available. -* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read -* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result, -* without measurement trigger. -*/ -enum iwl_dts_control_measurement_mode { - DTS_AUTOMATIC = 0, - DTS_REQUEST_READ = 1, - DTS_OVER_WRITE = 2, - DTS_DIRECT_WITHOUT_MEASURE = 3, -}; - -/** -* enum iwl_dts_used - DTS to use or used for measurement in the DTS request -* @DTS_USE_TOP: Top -* @DTS_USE_CHAIN_A: chain A -* @DTS_USE_CHAIN_B: chain B -* @DTS_USE_CHAIN_C: chain C -* @XTAL_TEMPERATURE: read temperature from xtal -*/ -enum iwl_dts_used { - DTS_USE_TOP = 0, - DTS_USE_CHAIN_A = 1, - DTS_USE_CHAIN_B = 2, - DTS_USE_CHAIN_C = 3, - XTAL_TEMPERATURE = 4, -}; - -/** -* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode -* @DTS_BIT6_MODE: bit 6 mode -* @DTS_BIT8_MODE: bit 8 mode -*/ -enum iwl_dts_bit_mode { - DTS_BIT6_MODE = 0, - DTS_BIT8_MODE = 1, -}; - -/** - * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements - * @control_mode: see &enum iwl_dts_control_measurement_mode - * @temperature: used when over write DTS mode is selected - * @sensor: set temperature sensor to use. See &enum iwl_dts_used - * @avg_factor: average factor to DTS in request DTS read mode - * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode - * @step_duration: step duration for the DTS - */ -struct iwl_ext_dts_measurement_cmd { - __le32 control_mode; - __le32 temperature; - __le32 sensor; - __le32 avg_factor; - __le32 bit_mode; - __le32 step_duration; -} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ - -/** - * struct iwl_dts_measurement_notif_v1 - measurements notification - * - * @temp: the measured temperature - * @voltage: the measured voltage - */ -struct iwl_dts_measurement_notif_v1 { - __le32 temp; - __le32 voltage; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ - -/** - * struct iwl_dts_measurement_notif_v2 - measurements notification - * - * @temp: the measured temperature - * @voltage: the measured voltage - * @threshold_idx: the trip index that was crossed - */ -struct iwl_dts_measurement_notif_v2 { - __le32 temp; - __le32 voltage; - __le32 threshold_idx; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ - -/** - * struct ct_kill_notif - CT-kill entry notification - * - * @temperature: the current temperature in celsius - * @reserved: reserved - */ -struct ct_kill_notif { - __le16 temperature; - __le16 reserved; -} __packed; /* GRP_PHY_CT_KILL_NTF */ - -/** -* enum ctdp_cmd_operation - CTDP command operations -* @CTDP_CMD_OPERATION_START: update the current budget -* @CTDP_CMD_OPERATION_STOP: stop ctdp -* @CTDP_CMD_OPERATION_REPORT: get the average budget -*/ -enum iwl_mvm_ctdp_cmd_operation { - CTDP_CMD_OPERATION_START = 0x1, - CTDP_CMD_OPERATION_STOP = 0x2, - CTDP_CMD_OPERATION_REPORT = 0x4, -};/* CTDP_CMD_OPERATION_TYPE_E */ - -/** - * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget - * - * @operation: see &enum iwl_mvm_ctdp_cmd_operation - * @budget: the budget in milliwatt - * @window_size: defined in API but not used - */ -struct iwl_mvm_ctdp_cmd { - __le32 operation; - __le32 budget; - __le32 window_size; -} __packed; - -#define IWL_MAX_DTS_TRIPS 8 - -/** - * struct temp_report_ths_cmd - set temperature thresholds - * - * @num_temps: number of temperature thresholds passed - * @thresholds: array with the thresholds to be configured - */ -struct temp_report_ths_cmd { - __le32 num_temps; - __le16 thresholds[IWL_MAX_DTS_TRIPS]; -} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ - -/*********************************** - * TDLS API - ***********************************/ - -/* Type of TDLS request */ -enum iwl_tdls_channel_switch_type { - TDLS_SEND_CHAN_SW_REQ = 0, - TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH, - TDLS_MOVE_CH, -}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch - * @frame_timestamp: GP2 timestamp of channel-switch request/response packet - * received from peer - * @max_offchan_duration: What amount of microseconds out of a DTIM is given - * to the TDLS off-channel communication. For instance if the DTIM is - * 200TU and the TDLS peer is to be given 25% of the time, the value - * given will be 50TU, or 50 * 1024 if translated into microseconds. - * @switch_time: switch time the peer sent in its channel switch timing IE - * @switch_timeout: switch timeout the peer sent in its channel switch timing IE - */ -struct iwl_tdls_channel_switch_timing { - __le32 frame_timestamp; /* GP2 time of peer packet Rx */ - __le32 max_offchan_duration; /* given in micro-seconds */ - __le32 switch_time; /* given in micro-seconds */ - __le32 switch_timeout; /* given in micro-seconds */ -} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */ - -#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200 - -/** - * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template - * - * A template representing a TDLS channel-switch request or response frame - * - * @switch_time_offset: offset to the channel switch timing IE in the template - * @tx_cmd: Tx parameters for the frame - * @data: frame data - */ -struct iwl_tdls_channel_switch_frame { - __le32 switch_time_offset; - struct iwl_tx_cmd tx_cmd; - u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE]; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command - * - * The command is sent to initiate a channel switch and also in response to - * incoming TDLS channel-switch request/response packets from remote peers. - * - * @switch_type: see &enum iwl_tdls_channel_switch_type - * @peer_sta_id: station id of TDLS peer - * @ci: channel we switch to - * @timing: timing related data for command - * @frame: channel-switch request/response template, depending to switch_type - */ -struct iwl_tdls_channel_switch_cmd { - u8 switch_type; - __le32 peer_sta_id; - struct iwl_fw_channel_info ci; - struct iwl_tdls_channel_switch_timing timing; - struct iwl_tdls_channel_switch_frame frame; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */ - -/** - * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification - * - * @status: non-zero on success - * @offchannel_duration: duration given in microseconds - * @sta_id: peer currently performing the channel-switch with - */ -struct iwl_tdls_channel_switch_notif { - __le32 status; - __le32 offchannel_duration; - __le32 sta_id; -} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */ - -/** - * struct iwl_tdls_sta_info - TDLS station info - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx - * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer - * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise - */ -struct iwl_tdls_sta_info { - u8 sta_id; - u8 tx_to_peer_tid; - __le16 tx_to_peer_ssn; - __le32 is_initiator; -} __packed; /* TDLS_STA_INFO_VER_1 */ - -/** - * struct iwl_tdls_config_cmd - TDLS basic config command - * - * @id_and_color: MAC id and color being configured - * @tdls_peer_count: amount of currently connected TDLS peers - * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx - * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP - * @sta_info: per-station info. Only the first tdls_peer_count entries are set - * @pti_req_data_offset: offset of network-level data for the PTI template - * @pti_req_tx_cmd: Tx parameters for PTI request template - * @pti_req_template: PTI request template data - */ -struct iwl_tdls_config_cmd { - __le32 id_and_color; /* mac id and color */ - u8 tdls_peer_count; - u8 tx_to_ap_tid; - __le16 tx_to_ap_ssn; - struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT]; - - __le32 pti_req_data_offset; - struct iwl_tx_cmd pti_req_tx_cmd; - u8 pti_req_template[0]; -} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */ - -/** - * struct iwl_tdls_config_sta_info_res - TDLS per-station config information - * - * @sta_id: station id of the TDLS peer - * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to - * the peer - */ -struct iwl_tdls_config_sta_info_res { - __le16 sta_id; - __le16 tx_to_peer_last_seq; -} __packed; /* TDLS_STA_INFO_RSP_VER_1 */ - -/** - * struct iwl_tdls_config_res - TDLS config information from FW - * - * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP - * @sta_info: per-station TDLS config information - */ -struct iwl_tdls_config_res { - __le32 tx_to_ap_last_seq; - struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT]; -} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */ - -/** - * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration - * - * @reserved: reserved - * @membership_status: a bitmap of MU groups - * @user_position:the position of station in a group. If the station is in the - * group then bits (group * 2) is the position -1 - */ -struct iwl_mu_group_mgmt_cmd { - __le32 reserved; - __le32 membership_status[2]; - __le32 user_position[4]; -} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ - -/** - * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification - * - * @membership_status: a bitmap of MU groups - * @user_position: the position of station in a group. If the station is in the - * group then bits (group * 2) is the position -1 - */ -struct iwl_mu_group_mgmt_notif { - __le32 membership_status[2]; - __le32 user_position[4]; -} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ - -#define MAX_STORED_BEACON_SIZE 600 - -/** - * struct iwl_stored_beacon_notif - Stored beacon notification - * - * @system_time: system time on air rise - * @tsf: TSF on air rise - * @beacon_timestamp: beacon on air rise - * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition - * @channel: channel this beacon was received on - * @rates: rate in ucode internal format - * @byte_count: frame's byte count - * @data: beacon data, length in @byte_count - */ -struct iwl_stored_beacon_notif { - __le32 system_time; - __le64 tsf; - __le32 beacon_timestamp; - __le16 band; - __le16 channel; - __le32 rates; - __le32 byte_count; - u8 data[MAX_STORED_BEACON_SIZE]; -} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */ - -#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16 - -enum iwl_lqm_cmd_operatrions { - LQM_CMD_OPERATION_START_MEASUREMENT = 0x01, - LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02, -}; - -enum iwl_lqm_status { - LQM_STATUS_SUCCESS = 0, - LQM_STATUS_TIMEOUT = 1, - LQM_STATUS_ABORT = 2, -}; - -/** - * struct iwl_link_qual_msrmnt_cmd - Link Quality Measurement command - * @cmd_operation: command operation to be performed (start or stop) - * as defined above. - * @mac_id: MAC ID the measurement applies to. - * @measurement_time: time of the total measurement to be performed, in uSec. - * @timeout: maximum time allowed until a response is sent, in uSec. - */ -struct iwl_link_qual_msrmnt_cmd { - __le32 cmd_operation; - __le32 mac_id; - __le32 measurement_time; - __le32 timeout; -} __packed /* LQM_CMD_API_S_VER_1 */; - -/** - * struct iwl_link_qual_msrmnt_notif - Link Quality Measurement notification - * - * @frequent_stations_air_time: an array containing the total air time - * (in uSec) used by the most frequently transmitting stations. - * @number_of_stations: the number of uniqe stations included in the array - * (a number between 0 to 16) - * @total_air_time_other_stations: the total air time (uSec) used by all the - * stations which are not included in the above report. - * @time_in_measurement_window: the total time in uSec in which a measurement - * took place. - * @tx_frame_dropped: the number of TX frames dropped due to retry limit during - * measurement - * @mac_id: MAC ID the measurement applies to. - * @status: return status. may be one of the LQM_STATUS_* defined above. - * @reserved: reserved. - */ -struct iwl_link_qual_msrmnt_notif { - __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT]; - __le32 number_of_stations; - __le32 total_air_time_other_stations; - __le32 time_in_measurement_window; - __le32 tx_frame_dropped; - __le32 mac_id; - __le32 status; - u8 reserved[12]; -} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */ - -/** - * struct iwl_channel_switch_noa_notif - Channel switch NOA notification - * - * @id_and_color: ID and color of the MAC - */ -struct iwl_channel_switch_noa_notif { - __le32 id_and_color; -} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */ - -/* Operation types for the debug mem access */ -enum { - DEBUG_MEM_OP_READ = 0, - DEBUG_MEM_OP_WRITE = 1, - DEBUG_MEM_OP_WRITE_BYTES = 2, -}; - -#define DEBUG_MEM_MAX_SIZE_DWORDS 32 - -/** - * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory - * @op: DEBUG_MEM_OP_* - * @addr: address to read/write from/to - * @len: in dwords, to read/write - * @data: for write opeations, contains the source buffer - */ -struct iwl_dbg_mem_access_cmd { - __le32 op; - __le32 addr; - __le32 len; - __le32 data[]; -} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */ - -/* Status responses for the debug mem access */ -enum { - DEBUG_MEM_STATUS_SUCCESS = 0x0, - DEBUG_MEM_STATUS_FAILED = 0x1, - DEBUG_MEM_STATUS_LOCKED = 0x2, - DEBUG_MEM_STATUS_HIDDEN = 0x3, - DEBUG_MEM_STATUS_LENGTH = 0x4, -}; - -/** - * struct iwl_dbg_mem_access_rsp - Response to debug mem commands - * @status: DEBUG_MEM_STATUS_* - * @len: read dwords (0 for write operations) - * @data: contains the read DWs - */ -struct iwl_dbg_mem_access_rsp { - __le32 status; - __le32 len; - __le32 data[]; -} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */ - -/** - * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed - * @reserved: reserved - */ -struct iwl_nvm_access_complete_cmd { - __le32 reserved; -} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */ - -/** - * enum iwl_extended_cfg_flag - commands driver may send before - * finishing init flow - * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command - * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands - * @IWL_INIT_PHY: driver is going to send PHY_DB commands - */ -enum iwl_extended_cfg_flags { - IWL_INIT_DEBUG_CFG, - IWL_INIT_NVM, - IWL_INIT_PHY, -}; - -/** - * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for - * before finishing init flows - * @init_flags: values from iwl_extended_cfg_flags - */ -struct iwl_init_extended_cfg_cmd { - __le32 init_flags; -} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */ - -/* - * struct iwl_nvm_get_info - request to get NVM data - */ -struct iwl_nvm_get_info { - __le32 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_general - general NVM data - * @flags: 1 - empty, 0 - valid - * @nvm_version: nvm version - * @board_type: board type - * @reserved: reserved - */ -struct iwl_nvm_get_info_general { - __le32 flags; - __le16 nvm_version; - u8 board_type; - u8 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_sku - mac information - * @enable_24g: band 2.4G enabled - * @enable_5g: band 5G enabled - * @enable_11n: 11n enabled - * @enable_11ac: 11ac enabled - * @mimo_disable: MIMO enabled - * @ext_crypto: Extended crypto enabled - */ -struct iwl_nvm_get_info_sku { - __le32 enable_24g; - __le32 enable_5g; - __le32 enable_11n; - __le32 enable_11ac; - __le32 mimo_disable; - __le32 ext_crypto; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_phy - phy information - * @tx_chains: BIT 0 chain A, BIT 1 chain B - * @rx_chains: BIT 0 chain A, BIT 1 chain B - */ -struct iwl_nvm_get_info_phy { - __le32 tx_chains; - __le32 rx_chains; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */ - -#define IWL_NUM_CHANNELS (51) - -/** - * struct iwl_nvm_get_info_regulatory - regulatory information - * @lar_enabled: is LAR enabled - * @channel_profile: regulatory data of this channel - * @reserved: reserved - */ -struct iwl_nvm_get_info_regulatory { - __le32 lar_enabled; - __le16 channel_profile[IWL_NUM_CHANNELS]; - __le16 reserved; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */ - -/** - * struct iwl_nvm_get_info_rsp - response to get NVM data - * @general: general NVM data - * @mac_sku: data relating to MAC sku - * @phy_sku: data relating to PHY sku - * @regulatory: regulatory data - */ -struct iwl_nvm_get_info_rsp { - struct iwl_nvm_get_info_general general; - struct iwl_nvm_get_info_sku mac_sku; - struct iwl_nvm_get_info_phy phy_sku; - struct iwl_nvm_get_info_regulatory regulatory; -} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_1 */ - -/** - * struct iwl_mvm_antenna_coupling_notif - antenna coupling notification - * @isolation: antenna isolation value - */ -struct iwl_mvm_antenna_coupling_notif { - __le32 isolation; -} __packed; +#include "fw/api/tdls.h" +#include "fw/api/mac-cfg.h" +#include "fw/api/offload.h" +#include "fw/api/context.h" +#include "fw/api/time-event.h" +#include "fw/api/datapath.h" +#include "fw/api/phy.h" +#include "fw/api/config.h" +#include "fw/api/alive.h" +#include "fw/api/binding.h" +#include "fw/api/cmdhdr.h" +#include "fw/api/coex.h" +#include "fw/api/commands.h" +#include "fw/api/d3.h" +#include "fw/api/filter.h" +#include "fw/api/mac.h" +#include "fw/api/nvm-reg.h" +#include "fw/api/phy-ctxt.h" +#include "fw/api/power.h" +#include "fw/api/rs.h" +#include "fw/api/rx.h" +#include "fw/api/scan.h" +#include "fw/api/sf.h" +#include "fw/api/sta.h" +#include "fw/api/stats.h" +#include "fw/api/tof.h" +#include "fw/api/tx.h" #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 3c4d82045c1c..6b385cf10555 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -82,10 +82,10 @@ #include "iwl-io.h" #include "iwl-prph.h" #include "rs.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #include "time-event.h" #include "fw-api.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" MODULE_DESCRIPTION(DRV_DESCRIPTION); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c index fb9eaf003ea5..7ee8e9077baf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c @@ -251,7 +251,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, struct cfg80211_chan_def *chandef, u8 chains_static, u8 chains_dynamic) { - enum iwl_phy_ctxt_action action = FW_CTXT_ACTION_MODIFY; + enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY; lockdep_assert_held(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index c05e5ac565ae..c11fe2621d51 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -75,7 +75,7 @@ #include "iwl-debug.h" #include "mvm.h" #include "iwl-modparams.h" -#include "fw-api-power.h" +#include "fw/api/power.h" #define POWER_KEEP_ALIVE_PERIOD_SEC 25 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 35e813bdfbe5..cb44e4114655 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -69,7 +69,7 @@ #include #include "mvm.h" -#include "fw-api-scan.h" +#include "fw/api/scan.h" #include "iwl-io.h" #define IWL_DENSE_EBS_SCAN_RATIO 5 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c index 634175b2480c..2d0b8a391308 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c @@ -61,7 +61,7 @@ * *****************************************************************************/ #include "mvm.h" -#include "fw-api-tof.h" +#include "fw/api/tof.h" #define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h index 8c3421c9991d..2ff560aa1a82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h @@ -63,7 +63,7 @@ #ifndef __tof_h__ #define __tof_h__ -#include "fw-api-tof.h" +#include "fw/api/tof.h" struct iwl_mvm_tof_data { struct iwl_tof_config_cmd tof_cfg; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 3ccd16f26b91..2ea74abad73d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -71,7 +71,7 @@ #include "iwl-prph.h" #include "iwl-csr.h" #include "mvm.h" -#include "fw-api-rs.h" +#include "fw/api/rs.h" /* * Will return 0 even if the cmd failed when RFKILL is asserted unless diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index a3795ba0d7b9..5e85f29220f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -55,7 +55,7 @@ #include "iwl-csr.h" #include "iwl-io.h" #include "internal.h" -#include "mvm/fw-api.h" +#include "fw/api/tx.h" /* * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index de50418adae5..073793a53d8e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -43,8 +43,7 @@ #include "iwl-scd.h" #include "iwl-op-mode.h" #include "internal.h" -/* FIXME: need to abstract out TX command (once we know what it looks like) */ -#include "dvm/commands.h" +#include "fw/api/tx.h" #define IWL_TX_CRC_SIZE 4 #define IWL_TX_DELIMITER_SIZE 4 @@ -2367,7 +2366,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tb1_len = ALIGN(len, 4); /* Tell NIC about any 2-byte padding after MAC header */ if (tb1_len != len) - tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); } else { tb1_len = len; } -- cgit v1.2.3-55-g7522 From b37ca87f93e475eb41aa6dfd3675e5bffdf67c0a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 6 Jun 2017 12:19:22 +0200 Subject: iwlwifi: fw api: fix various kernel-doc warnings Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h | 22 +++++++++++++++++++--- drivers/net/wireless/intel/iwlwifi/fw/api/txq.h | 3 ++- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h index fd97cccaedb8..ea4a3f04a83a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h @@ -112,15 +112,24 @@ static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version) #define IWL_ALWAYS_LONG_GROUP 1 /** - * struct iwl_cmd_header + * struct iwl_cmd_header - (short) command header format * * This header format appears in the beginning of each command sent from the * driver, and each response/notification received from uCode. */ struct iwl_cmd_header { - u8 cmd; /* Command ID: REPLY_RXON, etc. */ + /** + * @cmd: Command ID: REPLY_RXON, etc. + */ + u8 cmd; + /** + * @group_id: group ID, for commands with groups + */ u8 group_id; - /* + /** + * @sequence: + * Sequence number for the command. + * * The driver sets up the sequence number to values of its choosing. * uCode does not use this value, but passes it back to the driver * when sending the response to each driver-originated command, so @@ -150,6 +159,13 @@ struct iwl_cmd_header { * driver, and each response/notification received from uCode. * this is the wide version that contains more information about the command * like length, version and command type + * + * @cmd: command ID, like in &struct iwl_cmd_header + * @group_id: group ID, like in &struct iwl_cmd_header + * @sequence: sequence, like in &struct iwl_cmd_header + * @length: length of the command + * @reserved: reserved + * @version: command version */ struct iwl_cmd_header_wide { u8 cmd; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 7e7ebe82729b..805e161641aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -112,7 +112,7 @@ enum iwl_mvm_tx_fifo { }; /** - * iwl_tx_queue_cfg_actions - TXQ config options + * enum iwl_tx_queue_cfg_actions - TXQ config options * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format */ @@ -145,6 +145,7 @@ struct iwl_tx_queue_cfg_cmd { * @queue_number: queue number assigned to this RA -TID * @flags: set on failure * @write_pointer: initial value for write pointer + * @reserved: reserved */ struct iwl_tx_queue_cfg_rsp { __le16 queue_number; -- cgit v1.2.3-55-g7522 From 7d6222e27013a18f36e54f40a66e209a859fc1a8 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 8 Jun 2017 09:18:22 +0200 Subject: iwlwifi: mvm: add and use iwl_mvm_has_unified_ucode() This may need to be refined later, but for now using this, even with the TODO, is better than checking "has new TX API". Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 8 ++++---- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 6 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 2 +- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index e6f6de2500db..58f846610e5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -457,7 +457,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) }; int ret; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, true); lockdep_assert_held(&mvm->mutex); @@ -1034,7 +1034,7 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) { int ret; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_has_unified_ucode(mvm)) return iwl_run_unified_mvm_ucode(mvm, false); ret = iwl_run_init_mvm_ucode(mvm, false); @@ -1101,8 +1101,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; - /* Send phy db control command and then phy db calibration*/ - if (!iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_unified_ucode(mvm)) { + /* Send phy db control command and then phy db calibration */ ret = iwl_send_phy_db_data(mvm->phy_db); if (ret) goto error; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6fa2c44e6edd..e9440ef3022f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1232,6 +1232,12 @@ static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm) return mvm->trans->cfg->use_tfh; } +static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm) +{ + /* TODO - better define this */ + return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_A000; +} + static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm) { /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 6b385cf10555..4d591fdc61f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -645,7 +645,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; } mvm->sf_state = SF_UNINIT; - if (iwl_mvm_has_new_tx_api(mvm)) + if (iwl_mvm_has_unified_ucode(mvm)) iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); else iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT); -- cgit v1.2.3-55-g7522 From c8c017a6ff933ff88f3abf7d924e717284e218af Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 8 Jun 2017 09:42:28 +0200 Subject: iwlwifi: mvm: check family instead of new TX API for workarounds There are two workarounds because RSS is currently broken on A000 devices due to firmware issues, but checking for the new TX API doesn't really make sense. Check the hardware family instead of the new TX API - there's nothing better to check since it's just a temporary workaround. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 3 ++- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 58f846610e5d..e04bf2f7c1ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1118,7 +1118,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* Init RSS configuration */ /* TODO - remove a000 disablement when we have RXQ config API */ - if (iwl_mvm_has_new_rx_api(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_has_new_rx_api(mvm) && + mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_A000) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index ab6c6bf79565..44f144c58720 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4280,7 +4280,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); /* TODO - remove a000 disablement when we have RXQ config API */ - if (!iwl_mvm_has_new_rx_api(mvm) || iwl_mvm_has_new_tx_api(mvm)) + if (!iwl_mvm_has_new_rx_api(mvm) || + mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) return; notif->cookie = mvm->queue_sync_cookie; -- cgit v1.2.3-55-g7522 From c67a3d05024fad00ac160e868967a1e711073993 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 7 Jun 2017 09:31:13 +0200 Subject: iwlwifi: mvm: byte-swap constant instead of variable Convention has it to byte-swap the constant instead of the variable when doing bit checks. This also generates better code when the swap is actually needed, since the constant can be swapped at compile-time. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 13733252c1fb..6b8e57b7234a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -851,7 +851,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_lock(); - if (le16_to_cpu(desc->status) & IWL_RX_MPDU_STATUS_SRC_STA_FOUND) { + if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK; if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) { -- cgit v1.2.3-55-g7522 From 57bcc81d52a264847944f6e2b812588fbab09e5d Mon Sep 17 00:00:00 2001 From: Seraphime Kirkovski Date: Wed, 7 Jun 2017 01:20:27 +0200 Subject: iwlwifi: dvm: remove unused defines Those constants have been unused for quite some time now. Signed-off-by: Seraphime Kirkovski Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/commands.h | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 2ab2773655a8..ede47e3c5971 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1437,22 +1437,6 @@ struct agg_tx_status { __le16 sequence; } __packed; -/* - * definitions for initial rate index field - * bits [3:0] initial rate index - * bits [6:4] rate table color, used for the initial rate - * bit-7 invalid rate indication - * i.e. rate was not chosen from rate table - * or rate table color was changed during frame retries - * refer tlc rate info - */ - -#define IWL50_TX_RES_INIT_RATE_INDEX_POS 0 -#define IWL50_TX_RES_INIT_RATE_INDEX_MSK 0x0f -#define IWL50_TX_RES_RATE_TABLE_COLOR_POS 4 -#define IWL50_TX_RES_RATE_TABLE_COLOR_MSK 0x70 -#define IWL50_TX_RES_INV_RATE_INDEX_MSK 0x80 - /* refer to ra_tid */ #define IWLAGN_TX_RES_TID_POS 0 #define IWLAGN_TX_RES_TID_MSK 0x0f -- cgit v1.2.3-55-g7522 From 09856582bffba3b4a3f6286d53be16c2a569c3ef Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 1 Jun 2017 15:19:45 +0300 Subject: iwlwifi: mvm: refactor beacon template command code We currently support 3 different versions of the beacon template command and the code does some tricks in order to reuse what is possible across these versions. But it is a bit complicated to read and soon there will be one more variation that the driver needs implement, which would complicate it even further. Refactor the way we send beacon template commands, which increases the code size a bit, but makes it much easier to read. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/tx.h | 31 ++-- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 202 +++++++++++++--------- 2 files changed, 138 insertions(+), 95 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index d20baedead98..95dbed609f3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -766,7 +766,8 @@ struct iwl_mac_beacon_cmd_v6 { } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */ /** - * struct iwl_mac_beacon_cmd_data - data of beacon template with offloaded CSA + * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA + * @tx: the tx commands associated with the beacon frame * @template_id: currently equal to the mac context id of the coresponding * mac. * @tim_idx: the offset of the tim IE in the beacon @@ -775,23 +776,14 @@ struct iwl_mac_beacon_cmd_v6 { * @csa_offset: offset to the CSA IE if present * @frame: the template of the beacon frame */ -struct iwl_mac_beacon_cmd_data { +struct iwl_mac_beacon_cmd_v7 { + struct iwl_tx_cmd tx; __le32 template_id; __le32 tim_idx; __le32 tim_size; __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; -}; - -/** - * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA - * @tx: the tx commands associated with the beacon frame - * @data: see &iwl_mac_beacon_cmd_data - */ -struct iwl_mac_beacon_cmd_v7 { - struct iwl_tx_cmd tx; - struct iwl_mac_beacon_cmd_data data; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ /** @@ -799,13 +791,24 @@ struct iwl_mac_beacon_cmd_v7 { * @byte_cnt: byte count of the beacon frame * @flags: for future use * @reserved: reserved - * @data: see &iwl_mac_beacon_cmd_data + * @template_id: currently equal to the mac context id of the coresponding + * mac. + * @tim_idx: the offset of the tim IE in the beacon + * @tim_size: the length of the tim IE + * @ecsa_offset: offset to the ECSA IE if present + * @csa_offset: offset to the CSA IE if present + * @frame: the template of the beacon frame */ struct iwl_mac_beacon_cmd { __le16 byte_cnt; __le16 flags; __le64 reserved; - struct iwl_mac_beacon_cmd_data data; + __le32 template_id; + __le32 tim_idx; + __le32 tim_size; + __le32 ecsa_offset; + __le32 csa_offset; + struct ieee80211_hdr frame[0]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ struct iwl_beacon_notif { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index d130bdd76368..1546d54e2ebd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 - 2016 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1048,83 +1048,26 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) return ie - beacon; } -static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - struct sk_buff *beacon) +static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon, + struct iwl_tx_cmd *tx) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_host_cmd cmd = { - .id = BEACON_TEMPLATE_CMD, - .flags = CMD_ASYNC, - }; - union { - struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; - struct iwl_mac_beacon_cmd_v7 beacon_cmd; - } u = {}; - struct iwl_mac_beacon_cmd beacon_cmd = {}; struct ieee80211_tx_info *info; - u32 beacon_skb_len; u32 rate, tx_flags; - if (WARN_ON(!beacon)) - return -EINVAL; - - beacon_skb_len = beacon->len; - - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) { - u32 csa_offset, ecsa_offset; - - csa_offset = iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_CHANNEL_SWITCH, - beacon_skb_len); - ecsa_offset = - iwl_mvm_find_ie_offset(beacon->data, - WLAN_EID_EXT_CHANSWITCH_ANN, - beacon_skb_len); - - if (iwl_mvm_has_new_tx_api(mvm)) { - beacon_cmd.data.template_id = - cpu_to_le32((u32)mvmvif->id); - beacon_cmd.data.ecsa_offset = cpu_to_le32(ecsa_offset); - beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); - beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon_skb_len); - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, - &beacon_cmd.data.tim_idx, - &beacon_cmd.data.tim_size, - beacon->data, - beacon_skb_len); - cmd.len[0] = sizeof(beacon_cmd); - cmd.data[0] = &beacon_cmd; - goto send; - - } else { - u.beacon_cmd.data.ecsa_offset = - cpu_to_le32(ecsa_offset); - u.beacon_cmd.data.csa_offset = cpu_to_le32(csa_offset); - cmd.len[0] = sizeof(u.beacon_cmd); - cmd.data[0] = &u; - } - } else { - cmd.len[0] = sizeof(u.beacon_cmd_v6); - cmd.data[0] = &u; - } - - /* TODO: for now the beacon template id is set to be the mac context id. - * Might be better to handle it as another resource ... */ - u.beacon_cmd_v6.template_id = cpu_to_le32((u32)mvmvif->id); info = IEEE80211_SKB_CB(beacon); /* Set up TX command fields */ - u.beacon_cmd_v6.tx.len = cpu_to_le16((u16)beacon_skb_len); - u.beacon_cmd_v6.tx.sta_id = mvmvif->bcast_sta.sta_id; - u.beacon_cmd_v6.tx.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); + tx->len = cpu_to_le16((u16)beacon->len); + tx->sta_id = mvmvif->bcast_sta.sta_id; + tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF; tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) << TX_CMD_FLG_BT_PRIO_POS; - u.beacon_cmd_v6.tx.tx_flags = cpu_to_le32(tx_flags); + tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { @@ -1133,7 +1076,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, mvm->mgmt_last_antenna_idx); } - u.beacon_cmd_v6.tx.rate_n_flags = + tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); @@ -1141,29 +1084,126 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, rate = IWL_FIRST_OFDM_RATE; } else { rate = IWL_FIRST_CCK_RATE; - u.beacon_cmd_v6.tx.rate_n_flags |= - cpu_to_le32(RATE_MCS_CCK_MSK); + tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); } - u.beacon_cmd_v6.tx.rate_n_flags |= - cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); - /* Set up TX beacon command fields */ - if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, &u.beacon_cmd_v6.tim_idx, - &u.beacon_cmd_v6.tim_size, - beacon->data, - beacon_skb_len); + tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, + struct sk_buff *beacon, + void *data, int len) +{ + struct iwl_host_cmd cmd = { + .id = BEACON_TEMPLATE_CMD, + .flags = CMD_ASYNC, + }; -send: - /* Submit command */ + cmd.len[0] = len; + cmd.data[0] = data; cmd.dataflags[0] = 0; - cmd.len[1] = beacon_skb_len; + cmd.len[1] = beacon->len; cmd.data[1] = beacon->data; cmd.dataflags[1] = IWL_HCMD_DFL_DUP; return iwl_mvm_send_cmd(mvm, &cmd); } +static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd_v6 beacon_cmd = {}; + + iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); + + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd_v7 beacon_cmd = {}; + + iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx); + + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + beacon_cmd.csa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); + beacon_cmd.ecsa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon_v8(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mac_beacon_cmd beacon_cmd = {}; + + beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); + beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); + + if (vif->type == NL80211_IFTYPE_AP) + iwl_mvm_mac_ctxt_set_tim(mvm, + &beacon_cmd.tim_idx, + &beacon_cmd.tim_size, + beacon->data, beacon->len); + + beacon_cmd.csa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_CHANNEL_SWITCH, + beacon->len)); + beacon_cmd.ecsa_offset = + cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data, + WLAN_EID_EXT_CHANSWITCH_ANN, + beacon->len)); + + return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd, + sizeof(beacon_cmd)); +} + +static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct sk_buff *beacon) +{ + if (WARN_ON(!beacon)) + return -EINVAL; + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) + return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); + + if (!iwl_mvm_has_new_tx_api(mvm)) + return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); + + return iwl_mvm_mac_ctxt_send_beacon_v8(mvm, vif, beacon); +} + /* The beacon template for the AP/GO/IBSS has changed and needs update */ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -- cgit v1.2.3-55-g7522 From cf6c6ea352faadb15d1373d890bf857080b218a4 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 13 Jun 2017 13:18:48 +0300 Subject: iwlwifi: mvm: fix the FIFO numbers in A000 devices The FIFO numbering is different in A000 devices. This means that we routed BE packets to BK FIFO. Fix this. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/txq.h | 12 ++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 9 ++++++++- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 8 ++++++++ drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 6 +++--- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 2 +- 5 files changed, 32 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 805e161641aa..87c1e36a9c5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -111,6 +111,18 @@ enum iwl_mvm_tx_fifo { IWL_MVM_TX_FIFO_CMD = 7, }; +enum iwl_gen2_tx_fifo { + IWL_GEN2_TX_FIFO_CMD = 0, + IWL_GEN2_EDCA_TX_FIFO_BK, + IWL_GEN2_EDCA_TX_FIFO_BE, + IWL_GEN2_EDCA_TX_FIFO_VI, + IWL_GEN2_EDCA_TX_FIFO_VO, + IWL_GEN2_TRIG_TX_FIFO_BK, + IWL_GEN2_TRIG_TX_FIFO_BE, + IWL_GEN2_TRIG_TX_FIFO_VI, + IWL_GEN2_TRIG_TX_FIFO_VO, +}; + /** * enum iwl_tx_queue_cfg_actions - TXQ config options * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 1546d54e2ebd..604db47c8c21 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -80,6 +80,13 @@ const u8 iwl_mvm_ac_to_tx_fifo[] = { IWL_MVM_TX_FIFO_BK, }; +const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = { + IWL_GEN2_EDCA_TX_FIFO_VO, + IWL_GEN2_EDCA_TX_FIFO_VI, + IWL_GEN2_EDCA_TX_FIFO_BE, + IWL_GEN2_EDCA_TX_FIFO_BK, +}; + struct iwl_mvm_mac_iface_iterator_data { struct iwl_mvm *mvm; struct ieee80211_vif *vif; @@ -774,7 +781,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u8 txf = iwl_mvm_ac_to_tx_fifo[i]; + u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i); cmd->ac[txf].cw_min = cpu_to_le16(mvmvif->queue_params[i].cw_min); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index e9440ef3022f..74948f875a3f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1291,6 +1291,14 @@ static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) } extern const u8 iwl_mvm_ac_to_tx_fifo[]; +extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[]; + +static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm, + enum ieee80211_ac_numbers ac) +{ + return iwl_mvm_has_new_tx_api(mvm) ? + iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac]; +} struct iwl_rate_info { u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4df5f13fcdae..4adc8fecfaef 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -315,7 +315,7 @@ static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], mvmsta->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, + iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 0, wdg_timeout); mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); } @@ -745,7 +745,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_trans_txq_scd_cfg cfg = { - .fifo = iwl_mvm_ac_to_tx_fifo[ac], + .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), .sta_id = mvmsta->sta_id, .tid = tid, .frame_limit = IWL_FRAME_LIMIT, @@ -1303,7 +1303,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); cfg.tid = i; - cfg.fifo = iwl_mvm_ac_to_tx_fifo[ac]; + cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index f263a1902e27..cccff1073e3b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -751,7 +751,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, max_amsdu_len = sta->max_amsdu_len; /* the Tx FIFO to which this A-MSDU will be routed */ - txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]); /* * Don't send an AMSDU that will be longer than the TXF. -- cgit v1.2.3-55-g7522 From bec9522aa57421984cb4e1e87eee195bae1ccb25 Mon Sep 17 00:00:00 2001 From: Sharon Dvir Date: Mon, 12 Jun 2017 11:40:33 +0300 Subject: iwlwifi: mvm: fix uninitialized var while waiting for queues to empty While waiting for queues to empty, If txq_id == IWL_MVM_INVALID_QUEUE for all txq_ids, ret is used uninitialized. Found by Klocwork. Fixes: d6d517b7730c ("iwlwifi: add wait for tx queue empty") Signed-off-by: Sharon Dvir Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4adc8fecfaef..b196acd18252 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1619,10 +1619,11 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta) { - int i, ret; + int i; for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { u16 txq_id; + int ret; spin_lock_bh(&mvm_sta->lock); txq_id = mvm_sta->tid_data[i].txq_id; @@ -1633,10 +1634,10 @@ int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); if (ret) - break; + return ret; } - return ret; + return 0; } int iwl_mvm_rm_sta(struct iwl_mvm *mvm, -- cgit v1.2.3-55-g7522 From c1b68c194ed72a5df55e7344b19c50ccb357f690 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 14 Jun 2017 15:25:39 +0300 Subject: iwlwifi: pcie: fix A-MSDU on gen2 devices The return status check of iwl_pcie_gen2_build_amsdu was buggy. Fix it. Fixes: 6ffe5de35b05 ("iwlwifi: pcie: add AMSDU to gen2") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 5e85f29220f6..5dc785d4c167 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -422,9 +422,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, hdr_len = ieee80211_hdrlen(hdr->frame_control); if (amsdu) { - if (!iwl_pcie_gen2_build_amsdu(trans, skb, tfd, - tb1_len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) + if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, + tb1_len + IWL_FIRST_TB_SIZE, + hdr_len, dev_cmd)) goto out_err; /* -- cgit v1.2.3-55-g7522 From f6aa45f67aaa03bd294040508218bc72c5958ad6 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Mon, 5 Jun 2017 16:39:39 +0300 Subject: iwlwifi: mvm: support fw reading empty OTP If the OTP is empty, the NVM_GET_INFO command returns with flags' bit(0) on. This means the FW returns the default values for working with. This is allowed, so use this returned data. Fixes: e9e1ba3dbf00 ("iwlwifi: mvm: support getting nvm data from firmware") Signed-off-by: Liad Kaufman Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h | 10 +++++++++- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 7 ++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index d4c01f3dce32..00bc7a25dece 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -163,9 +163,17 @@ struct iwl_nvm_get_info { __le32 reserved; } __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */ +/** + * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp + * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty + */ +enum iwl_nvm_info_general_flags { + NVM_GENERAL_FLAGS_EMPTY_OTP = BIT(0), +}; + /** * struct iwl_nvm_get_info_general - general NVM data - * @flags: 1 - empty, 0 - valid + * @flags: bit 0: 1 - empty, 0 - non-empty * @nvm_version: nvm version * @board_type: board type * @reserved: reserved diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index dac7e542a190..5cc749261ce3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -576,11 +576,8 @@ int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm) } rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags)) { - IWL_ERR(mvm, "Invalid NVM data from FW\n"); - ret = -EINVAL; - goto out; - } + if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + IWL_INFO(mvm, "OTP is empty\n"); mvm->nvm_data = kzalloc(sizeof(*mvm->nvm_data) + sizeof(struct ieee80211_channel) * -- cgit v1.2.3-55-g7522 From f2e66c8df0d0f10c70ed7f5e14a939714e9ee6a9 Mon Sep 17 00:00:00 2001 From: Mordechai Goodstein Date: Tue, 13 Jun 2017 17:17:27 +0300 Subject: iwlwifi: implement fseq version mismatch warning During init, the FW checks whether the FSEQ value matches what it expects. If it doesn't match, we print a warning to let integrators clearly know that something is wrong. This can happen if another core (i.e. not WiFi) has updated the FSEQ version. This notification is only sent by the FW in production, for development firmwares, an assertion is triggered instead. Signed-off-by: Luca Coelho Signed-off-by: Mordechai Goodstein --- drivers/net/wireless/intel/iwlwifi/Makefile | 1 + drivers/net/wireless/intel/iwlwifi/fw/api/alive.h | 16 ++++ .../net/wireless/intel/iwlwifi/fw/api/commands.h | 7 ++ drivers/net/wireless/intel/iwlwifi/fw/common_rx.c | 88 ++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 3 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 +- 6 files changed, 118 insertions(+), 1 deletion(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/common_rx.c diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index fd12b7394c5c..5dcb4a848dba 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -12,6 +12,7 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o +iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index 6af6a9b32b69..3684a3e180e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -187,4 +187,20 @@ struct iwl_card_state_notif { __le32 flags; } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ +/** + * struct iwl_fseq_ver_mismatch_nty - Notification about version + * + * This notification does not have a direct impact on the init flow. + * It means that another core (not WiFi) has initiated the FSEQ flow + * and updated the FSEQ version. The driver only prints an error when + * this occurs. + * + * @aux_read_fseq_ver: auxiliary read FSEQ version + * @wifi_fseq_ver: FSEQ version (embedded in WiFi) + */ +struct iwl_fseq_ver_mismatch_ntf { + __le32 aux_read_fseq_ver; + __le32 wifi_fseq_ver; +} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */ + #endif /* __iwl_fw_api_alive_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 34fceb26447d..c7b8cffdf281 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -652,6 +652,13 @@ enum iwl_system_subcmd_ids { * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd */ INIT_EXTENDED_CFG_CMD = 0x03, + + /** + * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version + * mismatch during init. The format is specified in + * &struct iwl_fseq_ver_mismatch_ntf. + */ + FSEQ_VER_MISMATCH_NTF = 0xFF, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c new file mode 100644 index 000000000000..6f75985eea66 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c @@ -0,0 +1,88 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/commands.h" +#include "fw/api/alive.h" + +static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data; + + IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n", + __le32_to_cpu(fseq->aux_read_fseq_ver), + __le32_to_cpu(fseq->wifi_fseq_ver)); +} + +void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); + + switch (cmd) { + case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF): + iwl_fwrt_fseq_ver_mismatch(fwrt, rxb); + break; + default: + break; + } +} +IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 75575290a3e4..66bea6545690 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -150,4 +150,7 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); +void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, + struct iwl_rx_cmd_buffer *rxb); + #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4d591fdc61f2..d0ffdf5fa3a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1006,8 +1006,10 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); - break; + return; } + + iwl_fwrt_handle_notification(&mvm->fwrt, rxb); } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, -- cgit v1.2.3-55-g7522 From 6667e6589a02df8f27f8eebfabd8891c9d74152c Mon Sep 17 00:00:00 2001 From: Zamir, Roee Date: Wed, 14 Jun 2017 13:53:44 +0300 Subject: iwlwifi: mvm: add compile-time option to disable EBS For testing purposes, we may want to disable EBS scans at compile time. Signed-off-by: Roee Zamir Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 21845034d80d..a922a351c916 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -140,5 +140,6 @@ #define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ #define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ #define IWL_MVM_RS_TPC_TX_POWER_STEP 3 +#define IWL_MVM_ENABLE_EBS 1 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index cb44e4114655..50983615dce6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -743,7 +743,7 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm, * 4. it's not a p2p find operation. */ return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) && - mvm->last_ebs_successful && + mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS && vif->type != NL80211_IFTYPE_P2P_DEVICE); } -- cgit v1.2.3-55-g7522 From 9ad8fd0b4a24e64c34130bdfed95bfbf382c8e59 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 20 Jun 2017 15:10:31 +0200 Subject: iwlwifi: pcie: rename iwl_trans_check_hw_rf_kill() to pcie Rename this function to the more appropriate iwl_pcie_check_hw_rf_kill() since it's only a function in the pcie code and cannot be called from any other place. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 8 ++++---- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index f16c1bb9bf94..87712aeac31f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -805,11 +805,11 @@ static int iwl_pci_resume(struct device *device) /* * Enable rfkill interrupt (in order to keep track of the rfkill * status). Must be locked to avoid processing a possible rfkill - * interrupt while in iwl_trans_check_hw_rf_kill(). + * interrupt while in iwl_pcie_check_hw_rf_kill(). */ mutex_lock(&trans_pcie->mutex); iwl_enable_rfkill_int(trans); - iwl_trans_check_hw_rf_kill(trans); + iwl_pcie_check_hw_rf_kill(trans); mutex_unlock(&trans_pcie->mutex); return 0; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index fa315d84e98e..a8ffd4ca8cd8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -791,7 +791,7 @@ void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); void iwl_pcie_apm_config(struct iwl_trans *trans); int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); -bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans); +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index b84b78293e7b..c59f4581e972 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -307,7 +307,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -340,7 +340,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 92b3a55d0fbc..3fac3f29a139 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -986,7 +986,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, &first_ucode_section); } -bool iwl_trans_check_hw_rf_kill(struct iwl_trans *trans) +bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill = iwl_is_rfkill_set(trans); @@ -1252,7 +1252,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, mutex_lock(&trans_pcie->mutex); /* If platform's RF_KILL switch is NOT set to KILL */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) { ret = -ERFKILL; goto out; @@ -1300,7 +1300,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ret = iwl_pcie_load_given_ucode(trans, fw); /* re-check RF-Kill state since we may have missed the interrupt */ - hw_rfkill = iwl_trans_check_hw_rf_kill(trans); + hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) ret = -ERFKILL; @@ -1663,7 +1663,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) trans_pcie->is_down = false; /* ...rfkill can call stop_device and set it false if needed */ - iwl_trans_check_hw_rf_kill(trans); + iwl_pcie_check_hw_rf_kill(trans); /* Make sure we sync here, because we'll need full access later */ if (low_power) -- cgit v1.2.3-55-g7522 From 960f864b7b628f67bb57aeb071066fd2fe092bba Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 20 Jun 2017 15:18:43 +0200 Subject: iwlwifi: mvm: require AP_LINK_PS for TVQM Since the TXQ timer freeze code will not properly handle the large TVQM queue numbers, warn if we get into that code when we have TVQM. Also, just to catch this earlier, warn if the firmware image doesn't support AP_LINK_PS but we're running on HW using TVQM. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 44f144c58720..a2e0acc23adc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -445,8 +445,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); if (iwl_mvm_has_new_rx_api(mvm)) ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) + + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { ieee80211_hw_set(hw, AP_LINK_PS); + } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { + /* + * we absolutely need this for the new TX API since that comes + * with many more queues than the current code can deal with + * for station powersave + */ + return -EINVAL; + } if (mvm->trans->num_rx_queues > 1) ieee80211_hw_set(hw, USES_RSS); @@ -2399,6 +2409,14 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, unsigned long txqs = 0, tids = 0; int tid; + /* + * If we have TVQM then we get too high queue numbers - luckily + * we really shouldn't get here with that because such hardware + * should have firmware supporting buffer station offload. + */ + if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) + return; + spin_lock_bh(&mvmsta->lock); for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; -- cgit v1.2.3-55-g7522 From 7426ee33a29b3215357986378c77bb9949518154 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 20 Jun 2017 11:22:07 +0200 Subject: iwlwifi: mvm: simplify bufferable MMPDU check There's no need to spell out the cases when we can just use ieee80211_is_bufferable_mmpdu(). Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a2e0acc23adc..b8373923cfa9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -816,9 +816,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER && ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_deauth(hdr->frame_control) && - !ieee80211_is_disassoc(hdr->frame_control) && - !ieee80211_is_action(hdr->frame_control))) + !ieee80211_is_bufferable_mmpdu(hdr->frame_control))) sta = NULL; if (sta) { -- cgit v1.2.3-55-g7522 From d197358b755b56bd12c01044b6373986590878f6 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 22 Jun 2017 16:00:25 +0300 Subject: iwlwifi: mvm: rename p2p-specific sta functions to include p2p in the names The iwl_mvm_add_bcast_sta() and the iwl_mvm_rm_bcast_sta() functions are only called in P2P flows. Add _p2p_ to the function names to make this explicit. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b8373923cfa9..d0fd46f9597c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1428,7 +1428,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, if (ret) goto out_unref_phy; - ret = iwl_mvm_add_bcast_sta(mvm, vif); + ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); if (ret) goto out_unbind; @@ -1558,7 +1558,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { mvm->p2p_device_vif = NULL; - iwl_mvm_rm_bcast_sta(mvm, vif); + iwl_mvm_rm_p2p_bcast_sta(mvm, vif); iwl_mvm_binding_remove_vif(mvm, vif); iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); mvmvif->phy_ctxt = NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b196acd18252..5c707c62f1dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2106,7 +2106,7 @@ int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * @mvm: the mvm component * @vif: the interface to which the broadcast station is added * @bsta: the broadcast station to add. */ -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; @@ -2137,7 +2137,7 @@ void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) * Send the FW a request to remove the station from it's internal data * structures, and in addition remove it from the local data structure. */ -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { int ret; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 05fecbe87da4..6f1d358f3008 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -533,9 +533,9 @@ void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm); int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, -- cgit v1.2.3-55-g7522 From c8f54701bdbfcc78ebbc0d10f47a8a1b803f101f Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 19 Jun 2017 23:50:31 +0200 Subject: iwlwifi: mvm: remove non-DQA mode All the firmware versions the driver supports enable DQA, and thus the only way to get non-DQA mode is to modify the source. Remove this mode to simplify the code. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/txq.h | 6 - drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 16 +- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 164 +------- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 89 +---- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 40 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 31 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 437 +++++---------------- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 14 +- .../net/wireless/intel/iwlwifi/mvm/time-event.c | 5 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 133 ++----- 10 files changed, 158 insertions(+), 777 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h index 87c1e36a9c5a..87b4434224a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h @@ -62,12 +62,6 @@ #ifndef __iwl_fw_api_txq_h__ #define __iwl_fw_api_txq_h__ -/* Tx queue numbers for non-DQA mode */ -enum { - IWL_MVM_OFFCHANNEL_QUEUE = 8, - IWL_MVM_CMD_QUEUE = 9, -}; - /* * DQA queue numbers * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index e04bf2f7c1ba..ff44e1aa035e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -331,10 +331,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, */ memset(&mvm->queue_info, 0, sizeof(mvm->queue_info)); - if (iwl_mvm_is_dqa_supported(mvm)) - mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; - else - mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1; + mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1; for (i = 0; i < IEEE80211_MAX_QUEUES; i++) atomic_set(&mvm->mac80211_queue_stop_count[i], 0); @@ -1137,14 +1134,9 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* reset quota debouncing buffer - 0xff will yield invalid data */ memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd)); - /* Enable DQA-mode if required */ - if (iwl_mvm_is_dqa_supported(mvm)) { - ret = iwl_mvm_send_dqa_cmd(mvm); - if (ret) - goto error; - } else { - IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n"); - } + ret = iwl_mvm_send_dqa_cmd(mvm); + if (ret) + goto error; /* Add auxiliary station for scanning */ ret = iwl_mvm_add_aux_sta(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 604db47c8c21..8fe955d58c6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -241,32 +241,17 @@ static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac, data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif); } -static void iwl_mvm_mac_sta_hw_queues_iter(void *_data, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_hw_queues_iface_iterator_data *data = _data; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - - /* Mark the queues used by the sta */ - data->used_hw_queues |= mvmsta->tfd_queue_msk; -} - unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, struct ieee80211_vif *exclude_vif) { - u8 sta_id; struct iwl_mvm_hw_queues_iface_iterator_data data = { .exclude_vif = exclude_vif, .used_hw_queues = BIT(IWL_MVM_OFFCHANNEL_QUEUE) | - BIT(mvm->aux_queue), + BIT(mvm->aux_queue) | + BIT(IWL_MVM_DQA_GCAST_QUEUE), }; - if (iwl_mvm_is_dqa_supported(mvm)) - data.used_hw_queues |= BIT(IWL_MVM_DQA_GCAST_QUEUE); - else - data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE); - lockdep_assert_held(&mvm->mutex); /* mark all VIF used hw queues */ @@ -274,26 +259,6 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL, iwl_mvm_iface_hw_queues_iter, &data); - /* - * for DQA, the hw_queue in mac80211 is never really used for - * real traffic (only the few queue IDs covered above), so - * we can reuse the real HW queue IDs the stations use - */ - if (iwl_mvm_is_dqa_supported(mvm)) - return data.used_hw_queues; - - /* don't assign the same hw queues as TDLS stations */ - ieee80211_iterate_stations_atomic(mvm->hw, - iwl_mvm_mac_sta_hw_queues_iter, - &data); - - /* - * Some TDLS stations may be removed but are in the process of being - * drained. Don't touch their queues. - */ - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) - data.used_hw_queues |= mvm->tfd_drained[sta_id]; - return data.used_hw_queues; } @@ -344,8 +309,7 @@ void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, NUM_TSF_IDS); } -static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, - struct ieee80211_vif *vif) +int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_mac_iface_iterator_data data = { @@ -361,6 +325,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, int ret, i, queue_limit; unsigned long used_hw_queues; + lockdep_assert_held(&mvm->mutex); + /* * Allocate a MAC ID and a TSF for this MAC, along with the queues * and other resources. @@ -444,19 +410,14 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, return 0; } - if (iwl_mvm_is_dqa_supported(mvm)) { - /* - * queues in mac80211 almost entirely independent of - * the ones here - no real limit - */ - queue_limit = IEEE80211_MAX_QUEUES; - BUILD_BUG_ON(IEEE80211_MAX_QUEUES > - BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0])); - } else { - /* need to not use too many in this case */ - queue_limit = mvm->first_agg_queue; - } + /* + * queues in mac80211 almost entirely independent of + * the ones here - no real limit + */ + queue_limit = IEEE80211_MAX_QUEUES; + BUILD_BUG_ON(IEEE80211_MAX_QUEUES > + BITS_PER_BYTE * + sizeof(mvm->hw_queue_to_mac80211[0])); /* * Find available queues, and allocate them to the ACs. When in @@ -478,27 +439,12 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, /* Allocate the CAB queue for softAP and GO interfaces */ if (vif->type == NL80211_IFTYPE_AP) { - u8 queue; - - if (!iwl_mvm_is_dqa_supported(mvm)) { - queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate cab queue\n"); - ret = -EIO; - goto exit_fail; - } - } else { - queue = IWL_MVM_DQA_GCAST_QUEUE; - } - /* * For TVQM this will be overwritten later with the FW assigned * queue value (when queue is enabled). */ - mvmvif->cab_queue = queue; - vif->cab_queue = queue; + mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; + vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; } else { vif->cab_queue = IEEE80211_INVAL_HW_QUEUE; } @@ -519,78 +465,6 @@ exit_fail: return ret; } -int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, vif, false, false); - u32 ac; - int ret; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_mac_ctxt_allocate_resources(mvm, vif); - if (ret) - return ret; - - /* If DQA is supported - queues will be enabled when needed */ - if (iwl_mvm_is_dqa_supported(mvm)) - return 0; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_TX_FIFO_VO, 0, wdg_timeout); - break; - case NL80211_IFTYPE_AP: - iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - iwl_mvm_ac_to_tx_fifo[ac], 0, - wdg_timeout); - break; - } - - return 0; -} - -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) -{ - int ac; - - lockdep_assert_held(&mvm->mutex); - - /* - * If DQA is supported - queues were already disabled, since in - * DQA-mode the queues are a property of the STA and not of the - * vif, and at this point the STA was already deleted - */ - if (iwl_mvm_is_dqa_supported(mvm)) - return; - - switch (vif->type) { - case NL80211_IFTYPE_P2P_DEVICE: - iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MVM_OFFCHANNEL_QUEUE, - IWL_MAX_TID_COUNT, 0); - - break; - case NL80211_IFTYPE_AP: - iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue, - IWL_MAX_TID_COUNT, 0); - /* fall through */ - default: - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) - iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], - vif->hw_queue[ac], - IWL_MAX_TID_COUNT, 0); - } -} - static void iwl_mvm_ack_rates(struct iwl_mvm *mvm, struct ieee80211_vif *vif, enum nl80211_band band, @@ -914,18 +788,12 @@ static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm, { struct iwl_mac_ctx_cmd cmd = {}; u32 tfd_queue_msk = 0; - int ret, i; + int ret; WARN_ON(vif->type != NL80211_IFTYPE_MONITOR); iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action); - if (!iwl_mvm_is_dqa_supported(mvm)) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - tfd_queue_msk |= BIT(vif->hw_queue[i]); - } - cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC | MAC_FILTER_IN_CONTROL_AND_MGMT | MAC_FILTER_IN_BEACON | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d0fd46f9597c..aad75f1cea31 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -464,10 +464,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->trans->max_skb_frags) hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; - if (!iwl_mvm_is_dqa_supported(mvm)) - hw->queues = mvm->first_agg_queue; - else - hw->queues = IEEE80211_MAX_QUEUES; + hw->queues = IEEE80211_MAX_QUEUES; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | IEEE80211_RADIOTAP_MCS_HAVE_STBC; @@ -1067,9 +1064,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) iwl_mvm_reset_phy_ctxts(mvm); memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); - memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); - memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); @@ -1372,17 +1367,15 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, goto out_release; } - if (iwl_mvm_is_dqa_supported(mvm)) { - /* - * Only queue for this station is the mcast queue, - * which shouldn't be in TFD mask anyway - */ - ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, - 0, vif->type, - IWL_STA_MULTICAST); - if (ret) - goto out_release; - } + /* + * Only queue for this station is the mcast queue, + * which shouldn't be in TFD mask anyway + */ + ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, + 0, vif->type, + IWL_STA_MULTICAST); + if (ret) + goto out_release; iwl_mvm_vif_dbgfs_register(mvm, vif); goto out_unlock; @@ -1456,8 +1449,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, out_release: if (vif->type != NL80211_IFTYPE_P2P_DEVICE) mvm->vif_count--; - - iwl_mvm_mac_ctxt_release(mvm, vif); out_unlock: mutex_unlock(&mvm->mutex); @@ -1469,40 +1460,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif); - - if (tfd_msk && !iwl_mvm_is_dqa_supported(mvm)) { - /* - * mac80211 first removes all the stations of the vif and - * then removes the vif. When it removes a station it also - * flushes the AMPDU session. So by now, all the AMPDU sessions - * of all the stations of this vif are closed, and the queues - * of these AMPDU sessions are properly closed. - * We still need to take care of the shared queues of the vif. - * Flush them here. - * For DQA mode there is no need - broacast and multicast queue - * are flushed separately. - */ - mutex_lock(&mvm->mutex); - iwl_mvm_flush_tx_path(mvm, tfd_msk, 0); - mutex_unlock(&mvm->mutex); - - /* - * There are transports that buffer a few frames in the host. - * For these, the flush above isn't enough since while we were - * flushing, the transport might have sent more frames to the - * device. To solve this, wait here until the transport is - * empty. Technically, this could have replaced the flush - * above, but flush is much faster than draining. So flush - * first, and drain to make sure we have no frames in the - * transport anymore. - * If a station still had frames on the shared queues, it is - * already marked as draining, so to complete the draining, we - * just need to wait until the transport is empty. - */ - iwl_trans_wait_tx_queues_empty(mvm->trans, tfd_msk); - } - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { /* * Flush the ROC worker which will flush the OFFCHANNEL queue. @@ -1510,14 +1467,6 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, * queue are sent in ROC session. */ flush_work(&mvm->roc_done_wk); - } else { - /* - * By now, all the AC queues are empty. The AGG queues are - * empty too. We already got all the Tx responses for all the - * packets in the queues. The drain work can have been - * triggered. Flush it. - */ - flush_work(&mvm->sta_drained_wk); } } @@ -1571,7 +1520,6 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, iwl_mvm_mac_ctxt_remove(mvm, vif); out_release: - iwl_mvm_mac_ctxt_release(mvm, vif); mutex_unlock(&mvm->mutex); } @@ -2419,11 +2367,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; - if (!iwl_mvm_is_dqa_supported(mvm) && - tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) - continue; - if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) continue; @@ -2437,9 +2380,6 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, switch (cmd) { case STA_NOTIFY_SLEEP: - if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) - ieee80211_sta_block_awake(hw, sta, true); - for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) ieee80211_sta_set_buffered(sta, tid, true); @@ -2632,9 +2572,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) return -EINVAL; - /* if a STA is being removed, reuse its ID */ - flush_work(&mvm->sta_drained_wk); - /* * If we are in a STA removal flow and in DQA mode: * @@ -2649,8 +2586,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, * make sure the worker is no longer handling frames for this STA. */ if (old_state == IEEE80211_STA_NONE && - new_state == IEEE80211_STA_NOTEXIST && - iwl_mvm_is_dqa_supported(mvm)) { + new_state == IEEE80211_STA_NOTEXIST) { iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); flush_work(&mvm->add_stream_wk); @@ -4032,8 +3968,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, return; /* Make sure we're done with the deferred traffic before flushing */ - if (iwl_mvm_is_dqa_supported(mvm)) - flush_work(&mvm->add_stream_wk); + flush_work(&mvm->add_stream_wk); mutex_lock(&mvm->mutex); mvmvif = iwl_mvm_vif_from_mac80211(vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 74948f875a3f..4e38dae9e5c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -121,6 +121,9 @@ */ #define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3 +/* offchannel queue towards mac80211 */ +#define IWL_MVM_OFFCHANNEL_QUEUE 0 + extern const struct ieee80211_ops iwl_mvm_hw_ops; /** @@ -783,11 +786,7 @@ struct iwl_mvm { /* data related to data path */ struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; - struct work_struct sta_drained_wk; unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; - atomic_t pending_frames[IWL_MVM_STATION_COUNT]; - u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -960,9 +959,6 @@ struct iwl_mvm { u16 probe_queue; u16 p2p_dev_queue; - u8 first_agg_queue; - u8 last_agg_queue; - /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ unsigned int max_amsdu_len; /* used for debugfs only */ @@ -1125,12 +1121,6 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } -static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DQA_SUPPORT); -} - static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between @@ -1469,7 +1459,6 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef); /* MAC (virtual interface) programming */ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif); -void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool force_assoc_off, const u8 *bssid_override); @@ -1720,10 +1709,6 @@ bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue, u8 sta_id, u8 tid, unsigned int timeout); -/* - * Disable a TXQ. - * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored. - */ int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, u8 tid, u8 flags); int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); @@ -1733,25 +1718,8 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); */ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) { - u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE : - IWL_MVM_CMD_QUEUE; - return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & - ~BIT(cmd_queue)); -} - -static inline -void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, - u8 fifo, u16 ssn, unsigned int wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .tid = IWL_MAX_TID_COUNT, - .aggregate = false, - .frame_limit = IWL_FRAME_LIMIT, - }; - - iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); + ~BIT(IWL_MVM_DQA_CMD_QUEUE)); } static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d0ffdf5fa3a5..c387d5095bfe 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -623,27 +623,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0; - if (!iwl_mvm_is_dqa_supported(mvm)) { - mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1; - - if (mvm->cfg->base_params->num_of_queues == 16) { - mvm->aux_queue = 11; - mvm->first_agg_queue = 12; - BUILD_BUG_ON(BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0]) < 12); - } else { - mvm->aux_queue = 15; - mvm->first_agg_queue = 16; - BUILD_BUG_ON(BITS_PER_BYTE * - sizeof(mvm->hw_queue_to_mac80211[0]) < 16); - } - } else { - mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; - mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; - mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; - mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE; - mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE; - } + mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE; + mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + mvm->sf_state = SF_UNINIT; if (iwl_mvm_has_unified_ucode(mvm)) iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR); @@ -662,7 +645,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); - INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk); INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk); @@ -714,10 +696,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); - if (iwl_mvm_is_dqa_supported(mvm)) - trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; - else - trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE; + trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE; trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD; trans_cfg.scd_set_active = true; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 5c707c62f1dc..5506d6ac66d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -284,60 +284,6 @@ unlock: rcu_read_unlock(); } -static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - unsigned long used_hw_queues; - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned int wdg_timeout = - iwl_mvm_get_wd_timeout(mvm, NULL, true, false); - u32 ac; - - lockdep_assert_held(&mvm->mutex); - - used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); - - /* Find available queues, and allocate them to the ACs */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - u8 queue = find_first_zero_bit(&used_hw_queues, - mvm->first_agg_queue); - - if (queue >= mvm->first_agg_queue) { - IWL_ERR(mvm, "Failed to allocate STA queue\n"); - return -EBUSY; - } - - __set_bit(queue, &used_hw_queues); - mvmsta->hw_queue[ac] = queue; - } - - /* Found a place for all queues - enable them */ - for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], - mvmsta->hw_queue[ac], - iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 0, - wdg_timeout); - mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); - } - - return 0; -} - -static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, - struct ieee80211_sta *sta) -{ - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - unsigned long sta_msk; - int i; - - lockdep_assert_held(&mvm->mutex); - - /* disable the TDLS STA-specific queues */ - sta_msk = mvmsta->tfd_queue_msk; - for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); -} - /* Disable aggregations for a bitmap of TIDs for a given station */ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, unsigned long disable_agg_tids, @@ -1317,8 +1263,6 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; } } - - atomic_set(&mvm->pending_frames[mvm_sta->sta_id], 0); } int iwl_mvm_add_sta(struct iwl_mvm *mvm, @@ -1343,9 +1287,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, spin_lock_init(&mvm_sta->lock); - /* In DQA mode, if this is a HW restart, re-alloc existing queues */ - if (iwl_mvm_is_dqa_supported(mvm) && - test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + /* if this is a HW restart re-alloc existing queues */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); goto update_fw; } @@ -1363,33 +1306,15 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm->pending_frames[sta_id], 0); mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; - /* - * Allocate new queues for a TDLS station, unless we're in DQA mode, - * and then they'll be allocated dynamically - */ - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { - ret = iwl_mvm_tdls_sta_init(mvm, sta); - if (ret) - return ret; - } else if (!iwl_mvm_is_dqa_supported(mvm)) { - for (i = 0; i < IEEE80211_NUM_ACS; i++) - if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) - mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); - } - /* for HW restart - reset everything but the sequence number */ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; - if (!iwl_mvm_is_dqa_supported(mvm)) - continue; - /* * Mark all queues for this STA as unallocated and defer TX * frames until the queue is allocated @@ -1423,7 +1348,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { ret = iwl_mvm_reserve_sta_stream(mvm, sta, ieee80211_vif_type_p2p(vif)); if (ret) @@ -1449,8 +1374,6 @@ update_fw: return 0; err: - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) - iwl_mvm_tdls_sta_deinit(mvm, sta); return ret; } @@ -1523,79 +1446,6 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) return 0; } -void iwl_mvm_sta_drained_wk(struct work_struct *wk) -{ - struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); - u8 sta_id; - - /* - * The mutex is needed because of the SYNC cmd, but not only: if the - * work would run concurrently with iwl_mvm_rm_sta, it would run before - * iwl_mvm_rm_sta sets the station as busy, and exit. Then - * iwl_mvm_rm_sta would set the station as busy, and nobody will clean - * that later. - */ - mutex_lock(&mvm->mutex); - - for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { - int ret; - struct ieee80211_sta *sta = - rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], - lockdep_is_held(&mvm->mutex)); - - /* - * This station is in use or RCU-removed; the latter happens in - * managed mode, where mac80211 removes the station before we - * can remove it from firmware (we can only do that after the - * MAC is marked unassociated), and possibly while the deauth - * frame to disconnect from the AP is still queued. Then, the - * station pointer is -ENOENT when the last skb is reclaimed. - */ - if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) - continue; - - if (PTR_ERR(sta) == -EINVAL) { - IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", - sta_id); - continue; - } - - if (!sta) { - IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", - sta_id); - continue; - } - - WARN_ON(PTR_ERR(sta) != -EBUSY); - /* This station was removed and we waited until it got drained, - * we can now proceed and remove it. - */ - ret = iwl_mvm_rm_sta_common(mvm, sta_id); - if (ret) { - IWL_ERR(mvm, - "Couldn't remove sta %d after it was drained\n", - sta_id); - continue; - } - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); - clear_bit(sta_id, mvm->sta_drained); - - if (mvm->tfd_drained[sta_id]) { - unsigned long i, msk = mvm->tfd_drained[sta_id]; - - for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) - iwl_mvm_disable_txq(mvm, i, i, - IWL_MAX_TID_COUNT, 0); - - mvm->tfd_drained[sta_id] = 0; - IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", - sta_id, msk); - } - } - - mutex_unlock(&mvm->mutex); -} - static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_sta *mvm_sta) @@ -1654,79 +1504,65 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); - if ((vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) || - iwl_mvm_is_dqa_supported(mvm)){ - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - if (ret) - return ret; - /* flush its queues here since we are freeing mvm_sta */ - ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); - if (ret) - return ret; - if (iwl_mvm_has_new_tx_api(mvm)) { - ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); - } else { - u32 q_mask = mvm_sta->tfd_queue_msk; + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (ret) + return ret; - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - q_mask); - } - if (ret) - return ret; - ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - - /* If DQA is supported - the queues can be disabled now */ - if (iwl_mvm_is_dqa_supported(mvm)) { - iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); - /* - * If pending_frames is set at this point - it must be - * driver internal logic error, since queues are empty - * and removed successuly. - * warn on it but set it to 0 anyway to avoid station - * not being removed later in the function - */ - WARN_ON(atomic_xchg(&mvm->pending_frames[sta_id], 0)); - } + /* flush its queues here since we are freeing mvm_sta */ + ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); + if (ret) + return ret; + if (iwl_mvm_has_new_tx_api(mvm)) { + ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); + } else { + u32 q_mask = mvm_sta->tfd_queue_msk; - /* If there is a TXQ still marked as reserved - free it */ - if (iwl_mvm_is_dqa_supported(mvm) && - mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { - u8 reserved_txq = mvm_sta->reserved_queue; - enum iwl_mvm_queue_status *status; - - /* - * If no traffic has gone through the reserved TXQ - it - * is still marked as IWL_MVM_QUEUE_RESERVED, and - * should be manually marked as free again - */ - spin_lock_bh(&mvm->queue_info_lock); - status = &mvm->queue_info[reserved_txq].status; - if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && - (*status != IWL_MVM_QUEUE_FREE), - "sta_id %d reserved txq %d status %d", - sta_id, reserved_txq, *status)) { - spin_unlock_bh(&mvm->queue_info_lock); - return -EINVAL; - } + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + q_mask); + } + if (ret) + return ret; + + ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - *status = IWL_MVM_QUEUE_FREE; + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + + /* If there is a TXQ still marked as reserved - free it */ + if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { + u8 reserved_txq = mvm_sta->reserved_queue; + enum iwl_mvm_queue_status *status; + + /* + * If no traffic has gone through the reserved TXQ - it + * is still marked as IWL_MVM_QUEUE_RESERVED, and + * should be manually marked as free again + */ + spin_lock_bh(&mvm->queue_info_lock); + status = &mvm->queue_info[reserved_txq].status; + if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && + (*status != IWL_MVM_QUEUE_FREE), + "sta_id %d reserved txq %d status %d", + sta_id, reserved_txq, *status)) { spin_unlock_bh(&mvm->queue_info_lock); + return -EINVAL; } - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == sta_id) { - /* if associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; + *status = IWL_MVM_QUEUE_FREE; + spin_unlock_bh(&mvm->queue_info_lock); + } + + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == sta_id) { + /* if associated - we can't remove the AP STA now */ + if (vif->bss_conf.assoc) + return ret; - /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; + /* unassoc - go ahead - remove the AP STA now */ + mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; - /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; - } + /* clear d0i3_ap_sta_id if no longer relevant */ + if (mvm->d0i3_ap_sta_id == sta_id) + mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; } /* @@ -1743,32 +1579,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * calls the drain worker. */ spin_lock_bh(&mvm_sta->lock); + spin_unlock_bh(&mvm_sta->lock); - /* - * There are frames pending on the AC queues for this station. - * We need to wait until all the frames are drained... - */ - if (atomic_read(&mvm->pending_frames[sta_id])) { - rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], - ERR_PTR(-EBUSY)); - spin_unlock_bh(&mvm_sta->lock); - - /* disable TDLS sta queues on drain complete */ - if (sta->tdls) { - mvm->tfd_drained[sta_id] = mvm_sta->tfd_queue_msk; - IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", sta_id); - } - - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); - } else { - spin_unlock_bh(&mvm_sta->lock); - - if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) - iwl_mvm_tdls_sta_deinit(mvm, sta); - - ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); - RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); - } + ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); + RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); return ret; } @@ -1867,7 +1681,7 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) IWL_MAX_TID_COUNT, wdg_timeout); mvm->aux_queue = queue; - } else if (iwl_mvm_is_dqa_supported(mvm)) { + } else { struct iwl_trans_txq_scd_cfg cfg = { .fifo = IWL_MVM_TX_FIFO_MCAST, .sta_id = mvm->aux_sta.sta_id, @@ -1878,9 +1692,6 @@ static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, wdg_timeout); - } else { - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); } } @@ -1980,7 +1791,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) queue = mvm->probe_queue; @@ -2066,8 +1877,7 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_free_bcast_sta_queues(mvm, vif); + iwl_mvm_free_bcast_sta_queues(mvm, vif); ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); if (ret) @@ -2078,23 +1888,10 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 qmask = 0; lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) { - qmask = iwl_mvm_mac_get_queues_mask(vif); - - /* - * The firmware defines the TFD queue mask to only be relevant - * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. This only happens in NL80211_IFTYPE_AP vif type, - * so the next line will only have an effect there. - */ - qmask &= ~BIT(vif->cab_queue); - } - - return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, ieee80211_vif_type_p2p(vif), IWL_STA_GENERAL_PURPOSE); } @@ -2176,9 +1973,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) - return 0; - if (WARN_ON(vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC)) return -ENOTSUPP; @@ -2243,9 +2037,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); - if (!iwl_mvm_is_dqa_supported(mvm)) - return 0; - iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, @@ -2495,8 +2286,6 @@ int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvm_sta->tid_disable_agg &= ~BIT(tid); } else { /* In DQA-mode the queue isn't removed on agg termination */ - if (!iwl_mvm_is_dqa_supported(mvm)) - mvm_sta->tfd_queue_msk &= ~BIT(queue); mvm_sta->tid_disable_agg |= BIT(tid); } @@ -2599,19 +2388,17 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ret = -ENXIO; goto release_locks; } - } else if (iwl_mvm_is_dqa_supported(mvm) && - unlikely(mvm->queue_info[txq_id].status == + } else if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) { ret = -ENXIO; IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); goto release_locks; - } else if (!iwl_mvm_is_dqa_supported(mvm) || - mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + } else if (mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, - mvm->first_agg_queue, - mvm->last_agg_queue); + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); if (txq_id < 0) { ret = txq_id; IWL_ERR(mvm, "Failed to allocate agg queue\n"); @@ -2729,37 +2516,34 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, queue_status = mvm->queue_info[queue].status; spin_unlock_bh(&mvm->queue_info_lock); - /* In DQA mode, the existing queue might need to be reconfigured */ - if (iwl_mvm_is_dqa_supported(mvm)) { - /* Maybe there is no need to even alloc a queue... */ - if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) - alloc_queue = false; + /* Maybe there is no need to even alloc a queue... */ + if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) + alloc_queue = false; + /* + * Only reconfig the SCD for the queue if the window size has + * changed from current (become smaller) + */ + if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { /* - * Only reconfig the SCD for the queue if the window size has - * changed from current (become smaller) + * If reconfiguring an existing queue, it first must be + * drained */ - if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { - /* - * If reconfiguring an existing queue, it first must be - * drained - */ - ret = iwl_trans_wait_tx_queues_empty(mvm->trans, - BIT(queue)); - if (ret) { - IWL_ERR(mvm, - "Error draining queue before reconfig\n"); - return ret; - } + ret = iwl_trans_wait_tx_queues_empty(mvm->trans, + BIT(queue)); + if (ret) { + IWL_ERR(mvm, + "Error draining queue before reconfig\n"); + return ret; + } - ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, - mvmsta->sta_id, tid, - buf_size, ssn); - if (ret) { - IWL_ERR(mvm, - "Error reconfiguring TXQ #%d\n", queue); - return ret; - } + ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, + mvmsta->sta_id, tid, + buf_size, ssn); + if (ret) { + IWL_ERR(mvm, + "Error reconfiguring TXQ #%d\n", queue); + return ret; } } @@ -2855,18 +2639,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, "ssn = %d, next_recl = %d\n", tid_data->ssn, tid_data->next_reclaimed); - /* - * There are still packets for this RA / TID in the HW. - * Not relevant for DQA mode, since there is no need to disable - * the queue. - */ - if (!iwl_mvm_is_dqa_supported(mvm) && - tid_data->ssn != tid_data->next_reclaimed) { - tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; - err = 0; - break; - } - tid_data->ssn = 0xffff; tid_data->state = IWL_AGG_OFF; spin_unlock_bh(&mvmsta->lock); @@ -2874,12 +2646,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - if (!iwl_mvm_is_dqa_supported(mvm)) { - int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; - - iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); - } return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -2949,13 +2715,6 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_drain_sta(mvm, mvmsta, false); iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - - if (!iwl_mvm_is_dqa_supported(mvm)) { - int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; - - iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, - tid, 0); - } } return 0; @@ -3574,15 +3333,6 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, u16 n_queued; tid_data = &mvmsta->tid_data[tid]; - if (WARN(!iwl_mvm_is_dqa_supported(mvm) && - tid_data->state != IWL_AGG_ON && - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, - "TID %d state is %d\n", - tid, tid_data->state)) { - spin_unlock_bh(&mvmsta->lock); - ieee80211_sta_eosp(sta); - return; - } n_queued = iwl_mvm_tid_queued(mvm, tid_data); if (n_queued > remaining) { @@ -3676,13 +3426,8 @@ void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, mvm_sta->disable_tx = disable; - /* - * Tell mac80211 to start/stop queuing tx for this station, - * but don't stop queuing if there are still pending frames - * for this station. - */ - if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) - ieee80211_sta_block_awake(mvm->hw, sta, disable); + /* Tell mac80211 to start/stop queuing tx for this station */ + ieee80211_sta_block_awake(mvm->hw, sta, disable); iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 6f1d358f3008..005037aa3122 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -222,16 +222,7 @@ struct iwl_mvm_vif; * we remove the STA of the AP. The flush can be done synchronously against the * fw. * Drain means that the fw will drop all the frames sent to a specific station. - * This is useful when a client (if we are IBSS / GO or AP) disassociates. In - * that case, we need to drain all the frames for that client from the AC queues - * that are shared with the other clients. Only then, we can remove the STA in - * the fw. In order to do so, we track the non-AMPDU packets for each station. - * If mac80211 removes a STA and if it still has non-AMPDU packets pending in - * the queues, we mark this station as %EBUSY in %fw_id_to_mac_id, and drop all - * the frames for this STA (%iwl_mvm_rm_sta). When the last frame is dropped - * (we know about it with its Tx response), we remove the station in fw and set - * it as %NULL in %fw_id_to_mac_id: this is the purpose of - * %iwl_mvm_sta_drained_wk. + * This is useful when a client (if we are IBSS / GO or AP) disassociates. */ /** @@ -371,7 +362,6 @@ struct iwl_mvm_rxq_dup_data { * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) * @tfd_queue_msk: the tfd queues used by the station - * @hw_queue: per-AC mapping of the TFD queues used by station * @mac_id_n_color: the MAC context this station is linked to * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for * tid. @@ -409,7 +399,6 @@ struct iwl_mvm_rxq_dup_data { struct iwl_mvm_sta { u32 sta_id; u32 tfd_queue_msk; - u8 hw_queue[IEEE80211_NUM_ACS]; u32 mac_id_n_color; u16 tid_disable_agg; u8 max_agg_bufsize; @@ -548,7 +537,6 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif); void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm); -void iwl_mvm_sta_drained_wk(struct work_struct *wk); void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, struct ieee80211_sta *sta); void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index fcf2d1e4ff4e..65d8299108d5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -129,10 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk) * issue as it will have to complete before the next command is * executed, and a new time event means a new command. */ - if (iwl_mvm_is_dqa_supported(mvm)) - iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); - else - iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC); + iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC); } static void iwl_mvm_roc_finished(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index cccff1073e3b..6d7d1a66af81 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -552,9 +552,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif; - if (!iwl_mvm_is_dqa_supported(mvm)) - return info->hw_queue; - mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif); switch (info->control.vif->type) { @@ -653,8 +650,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; - } else if (iwl_mvm_is_dqa_supported(mvm) && - info.control.vif->type == NL80211_IFTYPE_MONITOR) { + } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->aux_queue; } } @@ -673,17 +669,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return -1; } - /* - * Increase the pending frames counter, so that later when a reply comes - * in and the counter is decreased - we don't start getting negative - * values. - * Note that we don't need to make sure it isn't agg'd, since we're - * TXing non-sta - * For DQA mode - we shouldn't increase it though - */ - if (!iwl_mvm_is_dqa_supported(mvm)) - atomic_inc(&mvm->pending_frames[sta_id]); - return 0; } @@ -994,22 +979,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, } } - if (iwl_mvm_is_dqa_supported(mvm) || is_ampdu) - txq_id = mvmsta->tid_data[tid].txq_id; - - if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) { - /* default to TID 0 for non-QoS packets */ - u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid; - - txq_id = mvmsta->hw_queue[tid_to_mac80211_ac[tdls_tid]]; - } + txq_id = mvmsta->tid_data[tid].txq_id; WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM); /* Check if TXQ needs to be allocated or re-activated */ if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE || - !mvmsta->tid_data[tid].is_tid_active) && - iwl_mvm_is_dqa_supported(mvm)) { + !mvmsta->tid_data[tid].is_tid_active)) { /* If TXQ needs to be allocated... */ if (txq_id == IWL_MVM_INVALID_QUEUE) { iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); @@ -1036,7 +1012,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, txq_id); } - if (iwl_mvm_is_dqa_supported(mvm) && !iwl_mvm_has_new_tx_api(mvm)) { + if (!iwl_mvm_has_new_tx_api(mvm)) { /* Keep track of the time of the last frame for this RA/TID */ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; @@ -1070,10 +1046,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - /* Increase pending frames count if this isn't AMPDU or DQA queue */ - if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) - atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); - return 0; drop_unlock_sta: @@ -1142,8 +1114,7 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, lockdep_assert_held(&mvmsta->lock); if ((tid_data->state == IWL_AGG_ON || - tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || - iwl_mvm_is_dqa_supported(mvm)) && + tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && iwl_mvm_tid_queued(mvm, tid_data) == 0) { /* * Now that this aggregation or DQA queue is empty tell @@ -1177,13 +1148,6 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, IWL_DEBUG_TX_QUEUES(mvm, "Can continue DELBA flow ssn = next_recl = %d\n", tid_data->next_reclaimed); - if (!iwl_mvm_is_dqa_supported(mvm)) { - u8 mac80211_ac = tid_to_mac80211_ac[tid]; - - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[mac80211_ac], tid, - CMD_ASYNC); - } tid_data->state = IWL_AGG_OFF; ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1381,10 +1345,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, info->flags |= IEEE80211_TX_STAT_ACK; break; case TX_STATUS_FAIL_DEST_PS: - /* In DQA, the FW should have stopped the queue and not + /* the FW should have stopped the queue and not * return this status */ - WARN_ON(iwl_mvm_is_dqa_supported(mvm)); + WARN_ON(1); info->flags |= IEEE80211_TX_STAT_TX_FILTERED; break; default: @@ -1440,26 +1404,21 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, ieee80211_tx_status(mvm->hw, skb); } - if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) { - /* If this is an aggregation queue, we use the ssn since: - * ssn = wifi seq_num % 256. - * The seq_ctl is the sequence control of the packet to which - * this Tx response relates. But if there is a hole in the - * bitmap of the BA we received, this Tx response may allow to - * reclaim the hole and all the subsequent packets that were - * already acked. In that case, seq_ctl != ssn, and the next - * packet to be reclaimed will be ssn and not seq_ctl. In that - * case, several packets will be reclaimed even if - * frame_count = 1. - * - * The ssn is the index (% 256) of the latest packet that has - * treated (acked / dropped) + 1. - */ - next_reclaimed = ssn; - } else { - /* The next packet to be reclaimed is the one after this one */ - next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10); - } + /* This is an aggregation queue or might become one, so we use + * the ssn since: ssn = wifi seq_num % 256. + * The seq_ctl is the sequence control of the packet to which + * this Tx response relates. But if there is a hole in the + * bitmap of the BA we received, this Tx response may allow to + * reclaim the hole and all the subsequent packets that were + * already acked. In that case, seq_ctl != ssn, and the next + * packet to be reclaimed will be ssn and not seq_ctl. In that + * case, several packets will be reclaimed even if + * frame_count = 1. + * + * The ssn is the index (% 256) of the latest packet that has + * treated (acked / dropped) + 1. + */ + next_reclaimed = ssn; IWL_DEBUG_TX_REPLY(mvm, "TXQ %d status %s (0x%08x)\n", @@ -1542,49 +1501,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, mvmsta = NULL; } - /* - * If the txq is not an AMPDU queue, there is no chance we freed - * several skbs. Check that out... - */ - if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) - goto out; - - /* We can't free more than one frame at once on a shared queue */ - WARN_ON(skb_freed > 1); - - /* If we have still frames for this STA nothing to do here */ - if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) - goto out; - - if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) { - - /* - * If there are no pending frames for this STA and - * the tx to this station is not disabled, notify - * mac80211 that this station can now wake up in its - * STA table. - * If mvmsta is not NULL, sta is valid. - */ - - spin_lock_bh(&mvmsta->lock); - - if (!mvmsta->disable_tx) - ieee80211_sta_block_awake(mvm->hw, sta, false); - - spin_unlock_bh(&mvmsta->lock); - } - - if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) { - /* - * We are draining and this was the last packet - pre_rcu_remove - * has been called already. We might be after the - * synchronize_net already. - * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues. - */ - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); - } - out: rcu_read_unlock(); } @@ -1648,9 +1564,8 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta; int queue = SEQ_TO_QUEUE(sequence); - if (WARN_ON_ONCE(queue < mvm->first_agg_queue && - (!iwl_mvm_is_dqa_supported(mvm) || - (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))) + if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE && + (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))) return; if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS)) -- cgit v1.2.3-55-g7522 From d20e30af7e65811955af5bc2a268ae75cd14e99e Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 25 Jun 2017 10:38:12 +0300 Subject: iwlwifi: mvm: don't retake the pointer to skb's CB We already have a such a pointer. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index aad75f1cea31..ef02919c2d6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -805,7 +805,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, goto drop; } - if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && + if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; -- cgit v1.2.3-55-g7522 From 39294c3df2a8dc5ec64513633446511f7e842acd Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 1 Aug 2017 18:27:28 +0200 Subject: Revert "ipv6: constify inet6_protocol structures" This reverts commit 3a3a4e3054137c5ff5d4d306ec834f6d25d7f95b. inet6_add_protocol and inet6_del_protocol include casts that remove the effect of the const annotation on their parameter, leading to possible runtime crashes. Reported-by: Eric Dumazet Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/ipv6/ip6_gre.c | 2 +- net/ipv6/tcp_ipv6.c | 2 +- net/ipv6/udp.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 33865d67bcb4..67ff2aaf5dcb 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -1080,7 +1080,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) } -static const struct inet6_protocol ip6gre_protocol = { +static struct inet6_protocol ip6gre_protocol __read_mostly = { .handler = gre_rcv, .err_handler = ip6gre_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 39ee8e7fc4bd..ced5dcf37465 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1944,7 +1944,7 @@ struct proto tcpv6_prot = { .diag_destroy = tcp_abort, }; -static const struct inet6_protocol tcpv6_protocol = { +static struct inet6_protocol tcpv6_protocol = { .early_demux = tcp_v6_early_demux, .early_demux_handler = tcp_v6_early_demux, .handler = tcp_v6_rcv, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 5f8b8d766c63..4a3e65626e8b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1448,7 +1448,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, } #endif -static const struct inet6_protocol udpv6_protocol = { +static struct inet6_protocol udpv6_protocol = { .early_demux = udp_v6_early_demux, .early_demux_handler = udp_v6_early_demux, .handler = udpv6_rcv, -- cgit v1.2.3-55-g7522 From e6eeb287e3b534ce1bd6477894b9df8e394b4fbc Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 1 Aug 2017 18:27:29 +0200 Subject: Revert "l2tp: constify inet6_protocol structures" This reverts commit d04916a48ad4a3db892b664fa9c3a2a693c378ad. inet6_add_protocol and inet6_del_protocol include casts that remove the effect of the const annotation on their parameter, leading to possible runtime crashes. Reported-by: Eric Dumazet Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/l2tp/l2tp_ip6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index d2efcd93e1e2..88b397c30d86 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -788,7 +788,7 @@ static struct inet_protosw l2tp_ip6_protosw = { .ops = &l2tp_ip6_ops, }; -static const struct inet6_protocol l2tp_ip6_protocol = { +static struct inet6_protocol l2tp_ip6_protocol __read_mostly = { .handler = l2tp_ip6_recv, }; -- cgit v1.2.3-55-g7522 From 306b13eb3cf9515a8214bbf5d69d811371d05792 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 28 Jul 2017 16:22:41 -0700 Subject: proto_ops: Add locked held versions of sendmsg and sendpage Add new proto_ops sendmsg_locked and sendpage_locked that can be called when the socket lock is already held. Correspondingly, add kernel_sendmsg_locked and kernel_sendpage_locked as front end functions. These functions will be used in zero proxy so that we can take the socket lock in a ULP sendmsg/sendpage and then directly call the backend transport proto_ops functions. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/net.h | 12 ++++++++++++ include/net/sock.h | 3 +++ include/net/tcp.h | 3 +++ net/core/sock.c | 22 ++++++++++++++++++++++ net/ipv4/af_inet.c | 2 ++ net/ipv4/tcp.c | 39 ++++++++++++++++++++++++++------------- net/socket.c | 27 +++++++++++++++++++++++++++ 7 files changed, 95 insertions(+), 13 deletions(-) diff --git a/include/linux/net.h b/include/linux/net.h index dda2cc939a53..b5c15b31709b 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -190,8 +190,16 @@ struct proto_ops { struct pipe_inode_info *pipe, size_t len, unsigned int flags); int (*set_peek_off)(struct sock *sk, int val); int (*peek_len)(struct socket *sock); + + /* The following functions are called internally by kernel with + * sock lock already held. + */ int (*read_sock)(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); + int (*sendpage_locked)(struct sock *sk, struct page *page, + int offset, size_t size, int flags); + int (*sendmsg_locked)(struct sock *sk, struct msghdr *msg, + size_t size); }; #define DECLARE_SOCKADDR(type, dst, src) \ @@ -279,6 +287,8 @@ do { \ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); +int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, + struct kvec *vec, size_t num, size_t len); int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len, int flags); @@ -297,6 +307,8 @@ int kernel_setsockopt(struct socket *sock, int level, int optname, char *optval, unsigned int optlen); int kernel_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); +int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, + size_t size, int flags); int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); diff --git a/include/net/sock.h b/include/net/sock.h index 7c0632c7e870..393c38e9f6aa 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1582,11 +1582,14 @@ int sock_no_shutdown(struct socket *, int); int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *); int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int); int sock_no_sendmsg(struct socket *, struct msghdr *, size_t); +int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len); int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int); int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma); ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags); +ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, + int offset, size_t size, int flags); /* * Functions to fill in entries in struct proto_ops when a protocol diff --git a/include/net/tcp.h b/include/net/tcp.h index 3ecb62811004..bb1881b4ce48 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -350,8 +350,11 @@ int tcp_v4_rcv(struct sk_buff *skb); int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); +int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size); int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags); +int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, + size_t size, int flags); ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags); void tcp_release_cb(struct sock *sk); diff --git a/net/core/sock.c b/net/core/sock.c index ac2a404c73eb..742f68c9c84a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2500,6 +2500,12 @@ int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len) } EXPORT_SYMBOL(sock_no_sendmsg); +int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *m, size_t len) +{ + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(sock_no_sendmsg_locked); + int sock_no_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) { @@ -2528,6 +2534,22 @@ ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, siz } EXPORT_SYMBOL(sock_no_sendpage); +ssize_t sock_no_sendpage_locked(struct sock *sk, struct page *page, + int offset, size_t size, int flags) +{ + ssize_t res; + struct msghdr msg = {.msg_flags = flags}; + struct kvec iov; + char *kaddr = kmap(page); + + iov.iov_base = kaddr + offset; + iov.iov_len = size; + res = kernel_sendmsg_locked(sk, &msg, &iov, 1, size); + kunmap(page); + return res; +} +EXPORT_SYMBOL(sock_no_sendpage_locked); + /* * Default Socket Callbacks */ diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 5ce44fb7d498..f0103ffe1cdb 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -944,6 +944,8 @@ const struct proto_ops inet_stream_ops = { .sendpage = inet_sendpage, .splice_read = tcp_splice_read, .read_sock = tcp_read_sock, + .sendmsg_locked = tcp_sendmsg_locked, + .sendpage_locked = tcp_sendpage_locked, .peek_len = tcp_peek_len, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 5326b50a3450..9dd6f4dba9b1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1046,23 +1046,29 @@ out_err: } EXPORT_SYMBOL_GPL(do_tcp_sendpages); -int tcp_sendpage(struct sock *sk, struct page *page, int offset, - size_t size, int flags) +int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, + size_t size, int flags) { - ssize_t res; - if (!(sk->sk_route_caps & NETIF_F_SG) || !sk_check_csum_caps(sk)) return sock_no_sendpage(sk->sk_socket, page, offset, size, flags); - lock_sock(sk); - tcp_rate_check_app_limited(sk); /* is sending application-limited? */ - res = do_tcp_sendpages(sk, page, offset, size, flags); + return do_tcp_sendpages(sk, page, offset, size, flags); +} + +int tcp_sendpage(struct sock *sk, struct page *page, int offset, + size_t size, int flags) +{ + int ret; + + lock_sock(sk); + ret = tcp_sendpage_locked(sk, page, offset, size, flags); release_sock(sk); - return res; + + return ret; } EXPORT_SYMBOL(tcp_sendpage); @@ -1156,7 +1162,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, return err; } -int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; @@ -1167,8 +1173,6 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) bool sg; long timeo; - lock_sock(sk); - flags = msg->msg_flags; if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); @@ -1377,7 +1381,6 @@ out: tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); } out_nopush: - release_sock(sk); return copied + copied_syn; do_fault: @@ -1401,9 +1404,19 @@ out_err: sk->sk_write_space(sk); tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); } - release_sock(sk); return err; } + +int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) +{ + int ret; + + lock_sock(sk); + ret = tcp_sendmsg_locked(sk, msg, size); + release_sock(sk); + + return ret; +} EXPORT_SYMBOL(tcp_sendmsg); /* diff --git a/net/socket.c b/net/socket.c index cb0fdf799f40..b332d1e8e4e4 100644 --- a/net/socket.c +++ b/net/socket.c @@ -652,6 +652,20 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, } EXPORT_SYMBOL(kernel_sendmsg); +int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, + struct kvec *vec, size_t num, size_t size) +{ + struct socket *sock = sk->sk_socket; + + if (!sock->ops->sendmsg_locked) + sock_no_sendmsg_locked(sk, msg, size); + + iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); + + return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg)); +} +EXPORT_SYMBOL(kernel_sendmsg_locked); + static bool skb_is_err_queue(const struct sk_buff *skb) { /* pkt_type of skbs enqueued on the error queue are set to @@ -3376,6 +3390,19 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, } EXPORT_SYMBOL(kernel_sendpage); +int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, + size_t size, int flags) +{ + struct socket *sock = sk->sk_socket; + + if (sock->ops->sendpage_locked) + return sock->ops->sendpage_locked(sk, page, offset, size, + flags); + + return sock_no_sendpage_locked(sk, page, offset, size, flags); +} +EXPORT_SYMBOL(kernel_sendpage_locked); + int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) { mm_segment_t oldfs = get_fs(); -- cgit v1.2.3-55-g7522 From 20bf50de3028cb15fa81e1d1e63ab6e0c85257fc Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 28 Jul 2017 16:22:42 -0700 Subject: skbuff: Function to send an skbuf on a socket Add skb_send_sock to send an skbuff on a socket within the kernel. Arguments include an offset so that an skbuf might be sent in mulitple calls (e.g. send buffer limit is hit). Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- include/linux/skbuff.h | 3 ++ net/core/skbuff.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 4093552be1de..18e76bf9574e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3113,6 +3113,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, struct pipe_inode_info *pipe, unsigned int len, unsigned int flags); +int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, + int len); +int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len); void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); unsigned int skb_zerocopy_headlen(const struct sk_buff *from); int skb_zerocopy(struct sk_buff *to, struct sk_buff *from, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index c27da51d14e4..9c0e015ff3fe 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1982,6 +1982,107 @@ int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, } EXPORT_SYMBOL_GPL(skb_splice_bits); +/* Send skb data on a socket. Socket must be locked. */ +int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, + int len) +{ + unsigned int orig_len = len; + struct sk_buff *head = skb; + unsigned short fragidx; + int slen, ret; + +do_frag_list: + + /* Deal with head data */ + while (offset < skb_headlen(skb) && len) { + struct kvec kv; + struct msghdr msg; + + slen = min_t(int, len, skb_headlen(skb) - offset); + kv.iov_base = skb->data + offset; + kv.iov_len = len; + memset(&msg, 0, sizeof(msg)); + + ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); + if (ret <= 0) + goto error; + + offset += ret; + len -= ret; + } + + /* All the data was skb head? */ + if (!len) + goto out; + + /* Make offset relative to start of frags */ + offset -= skb_headlen(skb); + + /* Find where we are in frag list */ + for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; + + if (offset < frag->size) + break; + + offset -= frag->size; + } + + for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; + + slen = min_t(size_t, len, frag->size - offset); + + while (slen) { + ret = kernel_sendpage_locked(sk, frag->page.p, + frag->page_offset + offset, + slen, MSG_DONTWAIT); + if (ret <= 0) + goto error; + + len -= ret; + offset += ret; + slen -= ret; + } + + offset = 0; + } + + if (len) { + /* Process any frag lists */ + + if (skb == head) { + if (skb_has_frag_list(skb)) { + skb = skb_shinfo(skb)->frag_list; + goto do_frag_list; + } + } else if (skb->next) { + skb = skb->next; + goto do_frag_list; + } + } + +out: + return orig_len - len; + +error: + return orig_len == len ? ret : orig_len - len; +} +EXPORT_SYMBOL_GPL(skb_send_sock_locked); + +/* Send skb data on a socket. */ +int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) +{ + int ret = 0; + + lock_sock(sk); + ret = skb_send_sock_locked(sk, skb, offset, len); + release_sock(sk); + + return ret; +} +EXPORT_SYMBOL_GPL(skb_send_sock); + /** * skb_store_bits - store bits from kernel buffer to skb * @skb: destination buffer -- cgit v1.2.3-55-g7522 From bbb03029a899679d73e62d7e6ae80348cc5d0054 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 28 Jul 2017 16:22:43 -0700 Subject: strparser: Generalize strparser Generalize strparser from more than just being used in conjunction with read_sock. strparser will also be used in the send path with zero proxy. The primary change is to create strp_process function that performs the critical processing on skbs. The documentation is also updated to reflect the new uses. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- Documentation/networking/strparser.txt | 207 +++++++++++++++------- include/net/strparser.h | 119 +++++++------ net/kcm/kcmproc.c | 34 ++-- net/kcm/kcmsock.c | 38 ++-- net/strparser/strparser.c | 313 ++++++++++++++++++++------------- 5 files changed, 424 insertions(+), 287 deletions(-) diff --git a/Documentation/networking/strparser.txt b/Documentation/networking/strparser.txt index a0bf573dfa61..fe01302471ae 100644 --- a/Documentation/networking/strparser.txt +++ b/Documentation/networking/strparser.txt @@ -1,45 +1,107 @@ -Stream Parser -------------- +Stream Parser (strparser) + +Introduction +============ The stream parser (strparser) is a utility that parses messages of an -application layer protocol running over a TCP connection. The stream +application layer protocol running over a data stream. The stream parser works in conjunction with an upper layer in the kernel to provide kernel support for application layer messages. For instance, Kernel Connection Multiplexor (KCM) uses the Stream Parser to parse messages using a BPF program. +The strparser works in one of two modes: receive callback or general +mode. + +In receive callback mode, the strparser is called from the data_ready +callback of a TCP socket. Messages are parsed and delivered as they are +received on the socket. + +In general mode, a sequence of skbs are fed to strparser from an +outside source. Message are parsed and delivered as the sequence is +processed. This modes allows strparser to be applied to arbitrary +streams of data. + Interface ---------- +========= The API includes a context structure, a set of callbacks, utility -functions, and a data_ready function. The callbacks include -a parse_msg function that is called to perform parsing (e.g. -BPF parsing in case of KCM), and a rcv_msg function that is called -when a full message has been completed. +functions, and a data_ready function for receive callback mode. The +callbacks include a parse_msg function that is called to perform +parsing (e.g. BPF parsing in case of KCM), and a rcv_msg function +that is called when a full message has been completed. -A stream parser can be instantiated for a TCP connection. This is done -by: +Functions +========= -strp_init(struct strparser *strp, struct sock *csk, +strp_init(struct strparser *strp, struct sock *sk, struct strp_callbacks *cb) -strp is a struct of type strparser that is allocated by the upper layer. -csk is the TCP socket associated with the stream parser. Callbacks are -called by the stream parser. + Called to initialize a stream parser. strp is a struct of type + strparser that is allocated by the upper layer. sk is the TCP + socket associated with the stream parser for use with receive + callback mode; in general mode this is set to NULL. Callbacks + are called by the stream parser (the callbacks are listed below). + +void strp_pause(struct strparser *strp) + + Temporarily pause a stream parser. Message parsing is suspended + and no new messages are delivered to the upper layer. + +void strp_pause(struct strparser *strp) + + Unpause a paused stream parser. + +void strp_stop(struct strparser *strp); + + strp_stop is called to completely stop stream parser operations. + This is called internally when the stream parser encounters an + error, and it is called from the upper layer to stop parsing + operations. + +void strp_done(struct strparser *strp); + + strp_done is called to release any resources held by the stream + parser instance. This must be called after the stream processor + has been stopped. + +int strp_process(struct strparser *strp, struct sk_buff *orig_skb, + unsigned int orig_offset, size_t orig_len, + size_t max_msg_size, long timeo) + + strp_process is called in general mode for a stream parser to + parse an sk_buff. The number of bytes processed or a negative + error number is returned. Note that strp_process does not + consume the sk_buff. max_msg_size is maximum size the stream + parser will parse. timeo is timeout for completing a message. + +void strp_data_ready(struct strparser *strp); + + The upper layer calls strp_tcp_data_ready when data is ready on + the lower socket for strparser to process. This should be called + from a data_ready callback that is set on the socket. Note that + maximum messages size is the limit of the receive socket + buffer and message timeout is the receive timeout for the socket. + +void strp_check_rcv(struct strparser *strp); + + strp_check_rcv is called to check for new messages on the socket. + This is normally called at initialization of a stream parser + instance or after strp_unpause. Callbacks ---------- +========= -There are four callbacks: +There are six callbacks: int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); parse_msg is called to determine the length of the next message in the stream. The upper layer must implement this function. It should parse the sk_buff as containing the headers for the - next application layer messages in the stream. + next application layer message in the stream. - The skb->cb in the input skb is a struct strp_rx_msg. Only + The skb->cb in the input skb is a struct strp_msg. Only the offset field is relevant in parse_msg and gives the offset where the message starts in the skb. @@ -50,26 +112,41 @@ int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); -ESTRPIPE : current message should not be processed by the kernel, return control of the socket to userspace which can proceed to read the messages itself - other < 0 : Error is parsing, give control back to userspace + other < 0 : Error in parsing, give control back to userspace assuming that synchronization is lost and the stream is unrecoverable (application expected to close TCP socket) In the case that an error is returned (return value is less than - zero) the stream parser will set the error on TCP socket and wake - it up. If parse_msg returned -ESTRPIPE and the stream parser had - previously read some bytes for the current message, then the error - set on the attached socket is ENODATA since the stream is - unrecoverable in that case. + zero) and the parser is in receive callback mode, then it will set + the error on TCP socket and wake it up. If parse_msg returned + -ESTRPIPE and the stream parser had previously read some bytes for + the current message, then the error set on the attached socket is + ENODATA since the stream is unrecoverable in that case. + +void (*lock)(struct strparser *strp) + + The lock callback is called to lock the strp structure when + the strparser is performing an asynchronous operation (such as + processing a timeout). In receive callback mode the default + function is to lock_sock for the associated socket. In general + mode the callback must be set appropriately. + +void (*unlock)(struct strparser *strp) + + The unlock callback is called to release the lock obtained + by the lock callback. In receive callback mode the default + function is release_sock for the associated socket. In general + mode the callback must be set appropriately. void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); rcv_msg is called when a full message has been received and is queued. The callee must consume the sk_buff; it can call strp_pause to prevent any further messages from being - received in rcv_msg (see strp_pause below). This callback + received in rcv_msg (see strp_pause above). This callback must be set. - The skb->cb in the input skb is a struct strp_rx_msg. This + The skb->cb in the input skb is a struct strp_msg. This struct contains two fields: offset and full_len. Offset is where the message starts in the skb, and full_len is the the length of the message. skb->len - offset may be greater @@ -78,59 +155,53 @@ void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); int (*read_sock_done)(struct strparser *strp, int err); read_sock_done is called when the stream parser is done reading - the TCP socket. The stream parser may read multiple messages - in a loop and this function allows cleanup to occur when existing - the loop. If the callback is not set (NULL in strp_init) a - default function is used. + the TCP socket in receive callback mode. The stream parser may + read multiple messages in a loop and this function allows cleanup + to occur when exiting the loop. If the callback is not set (NULL + in strp_init) a default function is used. void (*abort_parser)(struct strparser *strp, int err); This function is called when stream parser encounters an error - in parsing. The default function stops the stream parser for the - TCP socket and sets the error in the socket. The default function - can be changed by setting the callback to non-NULL in strp_init. + in parsing. The default function stops the stream parser and + sets the error in the socket if the parser is in receive callback + mode. The default function can be changed by setting the callback + to non-NULL in strp_init. -Functions ---------- +Statistics +========== -The upper layer calls strp_tcp_data_ready when data is ready on the lower -socket for strparser to process. This should be called from a data_ready -callback that is set on the socket. +Various counters are kept for each stream parser instance. These are in +the strp_stats structure. strp_aggr_stats is a convenience structure for +accumulating statistics for multiple stream parser instances. +save_strp_stats and aggregate_strp_stats are helper functions to save +and aggregate statistics. -strp_stop is called to completely stop stream parser operations. This -is called internally when the stream parser encounters an error, and -it is called from the upper layer when unattaching a TCP socket. +Message assembly limits +======================= -strp_done is called to unattach the stream parser from the TCP socket. -This must be called after the stream processor has be stopped. +The stream parser provide mechanisms to limit the resources consumed by +message assembly. -strp_check_rcv is called to check for new messages on the socket. This -is normally called at initialization of the a stream parser instance -of after strp_unpause. +A timer is set when assembly starts for a new message. In receive +callback mode the message timeout is taken from rcvtime for the +associated TCP socket. In general mode, the timeout is passed as an +argument in strp_process. If the timer fires before assembly completes +the stream parser is aborted and the ETIMEDOUT error is set on the TCP +socket if in receive callback mode. -Statistics ----------- +In receive callback mode, message length is limited to the receive +buffer size of the associated TCP socket. If the length returned by +parse_msg is greater than the socket buffer size then the stream parser +is aborted with EMSGSIZE error set on the TCP socket. Note that this +makes the maximum size of receive skbuffs for a socket with a stream +parser to be 2*sk_rcvbuf of the TCP socket. -Various counters are kept for each stream parser for a TCP socket. -These are in the strp_stats structure. strp_aggr_stats is a convenience -structure for accumulating statistics for multiple stream parser -instances. save_strp_stats and aggregate_strp_stats are helper functions -to save and aggregate statistics. +In general mode the message length limit is passed in as an argument +to strp_process. -Message assembly limits ------------------------ +Author +====== -The stream parser provide mechanisms to limit the resources consumed by -message assembly. +Tom Herbert (tom@quantonium.net) -A timer is set when assembly starts for a new message. The message -timeout is taken from rcvtime for the associated TCP socket. If the -timer fires before assembly completes the stream parser is aborted -and the ETIMEDOUT error is set on the TCP socket. - -Message length is limited to the receive buffer size of the associated -TCP socket. If the length returned by parse_msg is greater than -the socket buffer size then the stream parser is aborted with -EMSGSIZE error set on the TCP socket. Note that this makes the -maximum size of receive skbuffs for a socket with a stream parser -to be 2*sk_rcvbuf of the TCP socket. diff --git a/include/net/strparser.h b/include/net/strparser.h index 0c28ad97c52f..4fe966a0ad92 100644 --- a/include/net/strparser.h +++ b/include/net/strparser.h @@ -18,26 +18,26 @@ #define STRP_STATS_INCR(stat) ((stat)++) struct strp_stats { - unsigned long long rx_msgs; - unsigned long long rx_bytes; - unsigned int rx_mem_fail; - unsigned int rx_need_more_hdr; - unsigned int rx_msg_too_big; - unsigned int rx_msg_timeouts; - unsigned int rx_bad_hdr_len; + unsigned long long msgs; + unsigned long long bytes; + unsigned int mem_fail; + unsigned int need_more_hdr; + unsigned int msg_too_big; + unsigned int msg_timeouts; + unsigned int bad_hdr_len; }; struct strp_aggr_stats { - unsigned long long rx_msgs; - unsigned long long rx_bytes; - unsigned int rx_mem_fail; - unsigned int rx_need_more_hdr; - unsigned int rx_msg_too_big; - unsigned int rx_msg_timeouts; - unsigned int rx_bad_hdr_len; - unsigned int rx_aborts; - unsigned int rx_interrupted; - unsigned int rx_unrecov_intr; + unsigned long long msgs; + unsigned long long bytes; + unsigned int mem_fail; + unsigned int need_more_hdr; + unsigned int msg_too_big; + unsigned int msg_timeouts; + unsigned int bad_hdr_len; + unsigned int aborts; + unsigned int interrupted; + unsigned int unrecov_intr; }; struct strparser; @@ -48,16 +48,18 @@ struct strp_callbacks { void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); int (*read_sock_done)(struct strparser *strp, int err); void (*abort_parser)(struct strparser *strp, int err); + void (*lock)(struct strparser *strp); + void (*unlock)(struct strparser *strp); }; -struct strp_rx_msg { +struct strp_msg { int full_len; int offset; }; -static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb) +static inline struct strp_msg *strp_msg(struct sk_buff *skb) { - return (struct strp_rx_msg *)((void *)skb->cb + + return (struct strp_msg *)((void *)skb->cb + offsetof(struct qdisc_skb_cb, data)); } @@ -65,18 +67,18 @@ static inline struct strp_rx_msg *strp_rx_msg(struct sk_buff *skb) struct strparser { struct sock *sk; - u32 rx_stopped : 1; - u32 rx_paused : 1; - u32 rx_aborted : 1; - u32 rx_interrupted : 1; - u32 rx_unrecov_intr : 1; - - struct sk_buff **rx_skb_nextp; - struct timer_list rx_msg_timer; - struct sk_buff *rx_skb_head; - unsigned int rx_need_bytes; - struct delayed_work rx_delayed_work; - struct work_struct rx_work; + u32 stopped : 1; + u32 paused : 1; + u32 aborted : 1; + u32 interrupted : 1; + u32 unrecov_intr : 1; + + struct sk_buff **skb_nextp; + struct timer_list msg_timer; + struct sk_buff *skb_head; + unsigned int need_bytes; + struct delayed_work delayed_work; + struct work_struct work; struct strp_stats stats; struct strp_callbacks cb; }; @@ -84,7 +86,7 @@ struct strparser { /* Must be called with lock held for attached socket */ static inline void strp_pause(struct strparser *strp) { - strp->rx_paused = 1; + strp->paused = 1; } /* May be called without holding lock for attached socket */ @@ -97,37 +99,37 @@ static inline void save_strp_stats(struct strparser *strp, #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += \ strp->stats._stat) - SAVE_PSOCK_STATS(rx_msgs); - SAVE_PSOCK_STATS(rx_bytes); - SAVE_PSOCK_STATS(rx_mem_fail); - SAVE_PSOCK_STATS(rx_need_more_hdr); - SAVE_PSOCK_STATS(rx_msg_too_big); - SAVE_PSOCK_STATS(rx_msg_timeouts); - SAVE_PSOCK_STATS(rx_bad_hdr_len); + SAVE_PSOCK_STATS(msgs); + SAVE_PSOCK_STATS(bytes); + SAVE_PSOCK_STATS(mem_fail); + SAVE_PSOCK_STATS(need_more_hdr); + SAVE_PSOCK_STATS(msg_too_big); + SAVE_PSOCK_STATS(msg_timeouts); + SAVE_PSOCK_STATS(bad_hdr_len); #undef SAVE_PSOCK_STATS - if (strp->rx_aborted) - agg_stats->rx_aborts++; - if (strp->rx_interrupted) - agg_stats->rx_interrupted++; - if (strp->rx_unrecov_intr) - agg_stats->rx_unrecov_intr++; + if (strp->aborted) + agg_stats->aborts++; + if (strp->interrupted) + agg_stats->interrupted++; + if (strp->unrecov_intr) + agg_stats->unrecov_intr++; } static inline void aggregate_strp_stats(struct strp_aggr_stats *stats, struct strp_aggr_stats *agg_stats) { #define SAVE_PSOCK_STATS(_stat) (agg_stats->_stat += stats->_stat) - SAVE_PSOCK_STATS(rx_msgs); - SAVE_PSOCK_STATS(rx_bytes); - SAVE_PSOCK_STATS(rx_mem_fail); - SAVE_PSOCK_STATS(rx_need_more_hdr); - SAVE_PSOCK_STATS(rx_msg_too_big); - SAVE_PSOCK_STATS(rx_msg_timeouts); - SAVE_PSOCK_STATS(rx_bad_hdr_len); - SAVE_PSOCK_STATS(rx_aborts); - SAVE_PSOCK_STATS(rx_interrupted); - SAVE_PSOCK_STATS(rx_unrecov_intr); + SAVE_PSOCK_STATS(msgs); + SAVE_PSOCK_STATS(bytes); + SAVE_PSOCK_STATS(mem_fail); + SAVE_PSOCK_STATS(need_more_hdr); + SAVE_PSOCK_STATS(msg_too_big); + SAVE_PSOCK_STATS(msg_timeouts); + SAVE_PSOCK_STATS(bad_hdr_len); + SAVE_PSOCK_STATS(aborts); + SAVE_PSOCK_STATS(interrupted); + SAVE_PSOCK_STATS(unrecov_intr); #undef SAVE_PSOCK_STATS } @@ -135,8 +137,11 @@ static inline void aggregate_strp_stats(struct strp_aggr_stats *stats, void strp_done(struct strparser *strp); void strp_stop(struct strparser *strp); void strp_check_rcv(struct strparser *strp); -int strp_init(struct strparser *strp, struct sock *csk, +int strp_init(struct strparser *strp, struct sock *sk, struct strp_callbacks *cb); void strp_data_ready(struct strparser *strp); +int strp_process(struct strparser *strp, struct sk_buff *orig_skb, + unsigned int orig_offset, size_t orig_len, + size_t max_msg_size, long timeo); #endif /* __NET_STRPARSER_H_ */ diff --git a/net/kcm/kcmproc.c b/net/kcm/kcmproc.c index c343ac60bf50..c748e8a6a72c 100644 --- a/net/kcm/kcmproc.c +++ b/net/kcm/kcmproc.c @@ -155,8 +155,8 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq, seq_printf(seq, " psock-%-5u %-10llu %-16llu %-10llu %-16llu %-8d %-8d %-8d %-8d ", psock->index, - psock->strp.stats.rx_msgs, - psock->strp.stats.rx_bytes, + psock->strp.stats.msgs, + psock->strp.stats.bytes, psock->stats.tx_msgs, psock->stats.tx_bytes, psock->sk->sk_receive_queue.qlen, @@ -170,22 +170,22 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq, if (psock->tx_stopped) seq_puts(seq, "TxStop "); - if (psock->strp.rx_stopped) + if (psock->strp.stopped) seq_puts(seq, "RxStop "); if (psock->tx_kcm) seq_printf(seq, "Rsvd-%d ", psock->tx_kcm->index); - if (!psock->strp.rx_paused && !psock->ready_rx_msg) { + if (!psock->strp.paused && !psock->ready_rx_msg) { if (psock->sk->sk_receive_queue.qlen) { - if (psock->strp.rx_need_bytes) + if (psock->strp.need_bytes) seq_printf(seq, "RxWait=%u ", - psock->strp.rx_need_bytes); + psock->strp.need_bytes); else seq_printf(seq, "RxWait "); } } else { - if (psock->strp.rx_paused) + if (psock->strp.paused) seq_puts(seq, "RxPause "); if (psock->ready_rx_msg) @@ -371,20 +371,20 @@ static int kcm_stats_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%-8s %-10llu %-16llu %-10llu %-16llu %-10llu %-10llu %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u %-10u\n", "", - strp_stats.rx_msgs, - strp_stats.rx_bytes, + strp_stats.msgs, + strp_stats.bytes, psock_stats.tx_msgs, psock_stats.tx_bytes, psock_stats.reserved, psock_stats.unreserved, - strp_stats.rx_aborts, - strp_stats.rx_interrupted, - strp_stats.rx_unrecov_intr, - strp_stats.rx_mem_fail, - strp_stats.rx_need_more_hdr, - strp_stats.rx_bad_hdr_len, - strp_stats.rx_msg_too_big, - strp_stats.rx_msg_timeouts, + strp_stats.aborts, + strp_stats.interrupted, + strp_stats.unrecov_intr, + strp_stats.mem_fail, + strp_stats.need_more_hdr, + strp_stats.bad_hdr_len, + strp_stats.msg_too_big, + strp_stats.msg_timeouts, psock_stats.tx_aborts); return 0; diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c index da49191f7ad0..88ce73288247 100644 --- a/net/kcm/kcmsock.c +++ b/net/kcm/kcmsock.c @@ -96,12 +96,12 @@ static void kcm_update_rx_mux_stats(struct kcm_mux *mux, struct kcm_psock *psock) { STRP_STATS_ADD(mux->stats.rx_bytes, - psock->strp.stats.rx_bytes - + psock->strp.stats.bytes - psock->saved_rx_bytes); mux->stats.rx_msgs += - psock->strp.stats.rx_msgs - psock->saved_rx_msgs; - psock->saved_rx_msgs = psock->strp.stats.rx_msgs; - psock->saved_rx_bytes = psock->strp.stats.rx_bytes; + psock->strp.stats.msgs - psock->saved_rx_msgs; + psock->saved_rx_msgs = psock->strp.stats.msgs; + psock->saved_rx_bytes = psock->strp.stats.bytes; } static void kcm_update_tx_mux_stats(struct kcm_mux *mux, @@ -1118,7 +1118,7 @@ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, struct kcm_sock *kcm = kcm_sk(sk); int err = 0; long timeo; - struct strp_rx_msg *rxm; + struct strp_msg *stm; int copied = 0; struct sk_buff *skb; @@ -1132,26 +1132,26 @@ static int kcm_recvmsg(struct socket *sock, struct msghdr *msg, /* Okay, have a message on the receive queue */ - rxm = strp_rx_msg(skb); + stm = strp_msg(skb); - if (len > rxm->full_len) - len = rxm->full_len; + if (len > stm->full_len) + len = stm->full_len; - err = skb_copy_datagram_msg(skb, rxm->offset, msg, len); + err = skb_copy_datagram_msg(skb, stm->offset, msg, len); if (err < 0) goto out; copied = len; if (likely(!(flags & MSG_PEEK))) { KCM_STATS_ADD(kcm->stats.rx_bytes, copied); - if (copied < rxm->full_len) { + if (copied < stm->full_len) { if (sock->type == SOCK_DGRAM) { /* Truncated message */ msg->msg_flags |= MSG_TRUNC; goto msg_finished; } - rxm->offset += copied; - rxm->full_len -= copied; + stm->offset += copied; + stm->full_len -= copied; } else { msg_finished: /* Finished with message */ @@ -1175,7 +1175,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, struct sock *sk = sock->sk; struct kcm_sock *kcm = kcm_sk(sk); long timeo; - struct strp_rx_msg *rxm; + struct strp_msg *stm; int err = 0; ssize_t copied; struct sk_buff *skb; @@ -1192,12 +1192,12 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, /* Okay, have a message on the receive queue */ - rxm = strp_rx_msg(skb); + stm = strp_msg(skb); - if (len > rxm->full_len) - len = rxm->full_len; + if (len > stm->full_len) + len = stm->full_len; - copied = skb_splice_bits(skb, sk, rxm->offset, pipe, len, flags); + copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags); if (copied < 0) { err = copied; goto err_out; @@ -1205,8 +1205,8 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos, KCM_STATS_ADD(kcm->stats.rx_bytes, copied); - rxm->offset += copied; - rxm->full_len -= copied; + stm->offset += copied; + stm->full_len -= copied; /* We have no way to return MSG_EOR. If all the bytes have been * read we still leave the message in the receive socket buffer. diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index b5c279b22680..0d18fbc6f870 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -29,44 +29,46 @@ static struct workqueue_struct *strp_wq; -struct _strp_rx_msg { - /* Internal cb structure. struct strp_rx_msg must be first for passing +struct _strp_msg { + /* Internal cb structure. struct strp_msg must be first for passing * to upper layer. */ - struct strp_rx_msg strp; + struct strp_msg strp; int accum_len; int early_eaten; }; -static inline struct _strp_rx_msg *_strp_rx_msg(struct sk_buff *skb) +static inline struct _strp_msg *_strp_msg(struct sk_buff *skb) { - return (struct _strp_rx_msg *)((void *)skb->cb + + return (struct _strp_msg *)((void *)skb->cb + offsetof(struct qdisc_skb_cb, data)); } /* Lower lock held */ -static void strp_abort_rx_strp(struct strparser *strp, int err) +static void strp_abort_strp(struct strparser *strp, int err) { - struct sock *csk = strp->sk; - /* Unrecoverable error in receive */ - del_timer(&strp->rx_msg_timer); + del_timer(&strp->msg_timer); - if (strp->rx_stopped) + if (strp->stopped) return; - strp->rx_stopped = 1; + strp->stopped = 1; + + if (strp->sk) { + struct sock *sk = strp->sk; - /* Report an error on the lower socket */ - csk->sk_err = err; - csk->sk_error_report(csk); + /* Report an error on the lower socket */ + sk->sk_err = err; + sk->sk_error_report(sk); + } } -static void strp_start_rx_timer(struct strparser *strp) +static void strp_start_timer(struct strparser *strp, long timeo) { - if (strp->sk->sk_rcvtimeo) - mod_timer(&strp->rx_msg_timer, strp->sk->sk_rcvtimeo); + if (timeo) + mod_timer(&strp->msg_timer, timeo); } /* Lower lock held */ @@ -74,46 +76,55 @@ static void strp_parser_err(struct strparser *strp, int err, read_descriptor_t *desc) { desc->error = err; - kfree_skb(strp->rx_skb_head); - strp->rx_skb_head = NULL; + kfree_skb(strp->skb_head); + strp->skb_head = NULL; strp->cb.abort_parser(strp, err); } static inline int strp_peek_len(struct strparser *strp) { - struct socket *sock = strp->sk->sk_socket; + if (strp->sk) { + struct socket *sock = strp->sk->sk_socket; + + return sock->ops->peek_len(sock); + } + + /* If we don't have an associated socket there's nothing to peek. + * Return int max to avoid stopping the strparser. + */ - return sock->ops->peek_len(sock); + return INT_MAX; } /* Lower socket lock held */ -static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, - unsigned int orig_offset, size_t orig_len) +static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, + unsigned int orig_offset, size_t orig_len, + size_t max_msg_size, long timeo) { struct strparser *strp = (struct strparser *)desc->arg.data; - struct _strp_rx_msg *rxm; + struct _strp_msg *stm; struct sk_buff *head, *skb; size_t eaten = 0, cand_len; ssize_t extra; int err; bool cloned_orig = false; - if (strp->rx_paused) + if (strp->paused) return 0; - head = strp->rx_skb_head; + head = strp->skb_head; if (head) { /* Message already in progress */ - rxm = _strp_rx_msg(head); - if (unlikely(rxm->early_eaten)) { + stm = _strp_msg(head); + if (unlikely(stm->early_eaten)) { /* Already some number of bytes on the receive sock - * data saved in rx_skb_head, just indicate they + * data saved in skb_head, just indicate they * are consumed. */ - eaten = orig_len <= rxm->early_eaten ? - orig_len : rxm->early_eaten; - rxm->early_eaten -= eaten; + eaten = orig_len <= stm->early_eaten ? + orig_len : stm->early_eaten; + stm->early_eaten -= eaten; return eaten; } @@ -126,12 +137,12 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, */ orig_skb = skb_clone(orig_skb, GFP_ATOMIC); if (!orig_skb) { - STRP_STATS_INCR(strp->stats.rx_mem_fail); + STRP_STATS_INCR(strp->stats.mem_fail); desc->error = -ENOMEM; return 0; } if (!pskb_pull(orig_skb, orig_offset)) { - STRP_STATS_INCR(strp->stats.rx_mem_fail); + STRP_STATS_INCR(strp->stats.mem_fail); kfree_skb(orig_skb); desc->error = -ENOMEM; return 0; @@ -140,13 +151,13 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, orig_offset = 0; } - if (!strp->rx_skb_nextp) { + if (!strp->skb_nextp) { /* We are going to append to the frags_list of head. * Need to unshare the frag_list. */ err = skb_unclone(head, GFP_ATOMIC); if (err) { - STRP_STATS_INCR(strp->stats.rx_mem_fail); + STRP_STATS_INCR(strp->stats.mem_fail); desc->error = err; return 0; } @@ -165,20 +176,20 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, skb = alloc_skb(0, GFP_ATOMIC); if (!skb) { - STRP_STATS_INCR(strp->stats.rx_mem_fail); + STRP_STATS_INCR(strp->stats.mem_fail); desc->error = -ENOMEM; return 0; } skb->len = head->len; skb->data_len = head->len; skb->truesize = head->truesize; - *_strp_rx_msg(skb) = *_strp_rx_msg(head); - strp->rx_skb_nextp = &head->next; + *_strp_msg(skb) = *_strp_msg(head); + strp->skb_nextp = &head->next; skb_shinfo(skb)->frag_list = head; - strp->rx_skb_head = skb; + strp->skb_head = skb; head = skb; } else { - strp->rx_skb_nextp = + strp->skb_nextp = &skb_shinfo(head)->frag_list; } } @@ -188,112 +199,112 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, /* Always clone since we will consume something */ skb = skb_clone(orig_skb, GFP_ATOMIC); if (!skb) { - STRP_STATS_INCR(strp->stats.rx_mem_fail); + STRP_STATS_INCR(strp->stats.mem_fail); desc->error = -ENOMEM; break; } cand_len = orig_len - eaten; - head = strp->rx_skb_head; + head = strp->skb_head; if (!head) { head = skb; - strp->rx_skb_head = head; - /* Will set rx_skb_nextp on next packet if needed */ - strp->rx_skb_nextp = NULL; - rxm = _strp_rx_msg(head); - memset(rxm, 0, sizeof(*rxm)); - rxm->strp.offset = orig_offset + eaten; + strp->skb_head = head; + /* Will set skb_nextp on next packet if needed */ + strp->skb_nextp = NULL; + stm = _strp_msg(head); + memset(stm, 0, sizeof(*stm)); + stm->strp.offset = orig_offset + eaten; } else { /* Unclone since we may be appending to an skb that we * already share a frag_list with. */ err = skb_unclone(skb, GFP_ATOMIC); if (err) { - STRP_STATS_INCR(strp->stats.rx_mem_fail); + STRP_STATS_INCR(strp->stats.mem_fail); desc->error = err; break; } - rxm = _strp_rx_msg(head); - *strp->rx_skb_nextp = skb; - strp->rx_skb_nextp = &skb->next; + stm = _strp_msg(head); + *strp->skb_nextp = skb; + strp->skb_nextp = &skb->next; head->data_len += skb->len; head->len += skb->len; head->truesize += skb->truesize; } - if (!rxm->strp.full_len) { + if (!stm->strp.full_len) { ssize_t len; len = (*strp->cb.parse_msg)(strp, head); if (!len) { /* Need more header to determine length */ - if (!rxm->accum_len) { + if (!stm->accum_len) { /* Start RX timer for new message */ - strp_start_rx_timer(strp); + strp_start_timer(strp, timeo); } - rxm->accum_len += cand_len; + stm->accum_len += cand_len; eaten += cand_len; - STRP_STATS_INCR(strp->stats.rx_need_more_hdr); + STRP_STATS_INCR(strp->stats.need_more_hdr); WARN_ON(eaten != orig_len); break; } else if (len < 0) { - if (len == -ESTRPIPE && rxm->accum_len) { + if (len == -ESTRPIPE && stm->accum_len) { len = -ENODATA; - strp->rx_unrecov_intr = 1; + strp->unrecov_intr = 1; } else { - strp->rx_interrupted = 1; + strp->interrupted = 1; } strp_parser_err(strp, len, desc); break; - } else if (len > strp->sk->sk_rcvbuf) { + } else if (len > max_msg_size) { /* Message length exceeds maximum allowed */ - STRP_STATS_INCR(strp->stats.rx_msg_too_big); + STRP_STATS_INCR(strp->stats.msg_too_big); strp_parser_err(strp, -EMSGSIZE, desc); break; } else if (len <= (ssize_t)head->len - - skb->len - rxm->strp.offset) { + skb->len - stm->strp.offset) { /* Length must be into new skb (and also * greater than zero) */ - STRP_STATS_INCR(strp->stats.rx_bad_hdr_len); + STRP_STATS_INCR(strp->stats.bad_hdr_len); strp_parser_err(strp, -EPROTO, desc); break; } - rxm->strp.full_len = len; + stm->strp.full_len = len; } - extra = (ssize_t)(rxm->accum_len + cand_len) - - rxm->strp.full_len; + extra = (ssize_t)(stm->accum_len + cand_len) - + stm->strp.full_len; if (extra < 0) { /* Message not complete yet. */ - if (rxm->strp.full_len - rxm->accum_len > + if (stm->strp.full_len - stm->accum_len > strp_peek_len(strp)) { - /* Don't have the whole messages in the socket - * buffer. Set strp->rx_need_bytes to wait for + /* Don't have the whole message in the socket + * buffer. Set strp->need_bytes to wait for * the rest of the message. Also, set "early * eaten" since we've already buffered the skb * but don't consume yet per strp_read_sock. */ - if (!rxm->accum_len) { + if (!stm->accum_len) { /* Start RX timer for new message */ - strp_start_rx_timer(strp); + strp_start_timer(strp, timeo); } - strp->rx_need_bytes = rxm->strp.full_len - - rxm->accum_len; - rxm->accum_len += cand_len; - rxm->early_eaten = cand_len; - STRP_STATS_ADD(strp->stats.rx_bytes, cand_len); + strp->need_bytes = stm->strp.full_len - + stm->accum_len; + stm->accum_len += cand_len; + stm->early_eaten = cand_len; + STRP_STATS_ADD(strp->stats.bytes, cand_len); desc->count = 0; /* Stop reading socket */ break; } - rxm->accum_len += cand_len; + stm->accum_len += cand_len; eaten += cand_len; WARN_ON(eaten != orig_len); break; @@ -308,14 +319,14 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, eaten += (cand_len - extra); /* Hurray, we have a new message! */ - del_timer(&strp->rx_msg_timer); - strp->rx_skb_head = NULL; - STRP_STATS_INCR(strp->stats.rx_msgs); + del_timer(&strp->msg_timer); + strp->skb_head = NULL; + STRP_STATS_INCR(strp->stats.msgs); /* Give skb to upper layer */ strp->cb.rcv_msg(strp, head); - if (unlikely(strp->rx_paused)) { + if (unlikely(strp->paused)) { /* Upper layer paused strp */ break; } @@ -324,11 +335,33 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, if (cloned_orig) kfree_skb(orig_skb); - STRP_STATS_ADD(strp->stats.rx_bytes, eaten); + STRP_STATS_ADD(strp->stats.bytes, eaten); return eaten; } +int strp_process(struct strparser *strp, struct sk_buff *orig_skb, + unsigned int orig_offset, size_t orig_len, + size_t max_msg_size, long timeo) +{ + read_descriptor_t desc; /* Dummy arg to strp_recv */ + + desc.arg.data = strp; + + return __strp_recv(&desc, orig_skb, orig_offset, orig_len, + max_msg_size, timeo); +} +EXPORT_SYMBOL_GPL(strp_process); + +static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, + unsigned int orig_offset, size_t orig_len) +{ + struct strparser *strp = (struct strparser *)desc->arg.data; + + return __strp_recv(desc, orig_skb, orig_offset, orig_len, + strp->sk->sk_rcvbuf, strp->sk->sk_rcvtimeo); +} + static int default_read_sock_done(struct strparser *strp, int err) { return err; @@ -355,101 +388,129 @@ static int strp_read_sock(struct strparser *strp) /* Lower sock lock held */ void strp_data_ready(struct strparser *strp) { - if (unlikely(strp->rx_stopped)) + if (unlikely(strp->stopped)) return; - /* This check is needed to synchronize with do_strp_rx_work. - * do_strp_rx_work acquires a process lock (lock_sock) whereas + /* This check is needed to synchronize with do_strp_work. + * do_strp_work acquires a process lock (lock_sock) whereas * the lock held here is bh_lock_sock. The two locks can be * held by different threads at the same time, but bh_lock_sock * allows a thread in BH context to safely check if the process * lock is held. In this case, if the lock is held, queue work. */ if (sock_owned_by_user(strp->sk)) { - queue_work(strp_wq, &strp->rx_work); + queue_work(strp_wq, &strp->work); return; } - if (strp->rx_paused) + if (strp->paused) return; - if (strp->rx_need_bytes) { - if (strp_peek_len(strp) >= strp->rx_need_bytes) - strp->rx_need_bytes = 0; + if (strp->need_bytes) { + if (strp_peek_len(strp) >= strp->need_bytes) + strp->need_bytes = 0; else return; } if (strp_read_sock(strp) == -ENOMEM) - queue_work(strp_wq, &strp->rx_work); + queue_work(strp_wq, &strp->work); } EXPORT_SYMBOL_GPL(strp_data_ready); -static void do_strp_rx_work(struct strparser *strp) +static void do_strp_work(struct strparser *strp) { read_descriptor_t rd_desc; - struct sock *csk = strp->sk; /* We need the read lock to synchronize with strp_data_ready. We * need the socket lock for calling strp_read_sock. */ - lock_sock(csk); + strp->cb.lock(strp); - if (unlikely(strp->rx_stopped)) + if (unlikely(strp->stopped)) goto out; - if (strp->rx_paused) + if (strp->paused) goto out; rd_desc.arg.data = strp; if (strp_read_sock(strp) == -ENOMEM) - queue_work(strp_wq, &strp->rx_work); + queue_work(strp_wq, &strp->work); out: - release_sock(csk); + strp->cb.unlock(strp); } -static void strp_rx_work(struct work_struct *w) +static void strp_work(struct work_struct *w) { - do_strp_rx_work(container_of(w, struct strparser, rx_work)); + do_strp_work(container_of(w, struct strparser, work)); } -static void strp_rx_msg_timeout(unsigned long arg) +static void strp_msg_timeout(unsigned long arg) { struct strparser *strp = (struct strparser *)arg; /* Message assembly timed out */ - STRP_STATS_INCR(strp->stats.rx_msg_timeouts); - lock_sock(strp->sk); + STRP_STATS_INCR(strp->stats.msg_timeouts); + strp->cb.lock(strp); strp->cb.abort_parser(strp, ETIMEDOUT); + strp->cb.unlock(strp); +} + +static void strp_sock_lock(struct strparser *strp) +{ + lock_sock(strp->sk); +} + +static void strp_sock_unlock(struct strparser *strp) +{ release_sock(strp->sk); } -int strp_init(struct strparser *strp, struct sock *csk, +int strp_init(struct strparser *strp, struct sock *sk, struct strp_callbacks *cb) { - struct socket *sock = csk->sk_socket; if (!cb || !cb->rcv_msg || !cb->parse_msg) return -EINVAL; - if (!sock->ops->read_sock || !sock->ops->peek_len) - return -EAFNOSUPPORT; + /* The sk (sock) arg determines the mode of the stream parser. + * + * If the sock is set then the strparser is in receive callback mode. + * The upper layer calls strp_data_ready to kick receive processing + * and strparser calls the read_sock function on the socket to + * get packets. + * + * If the sock is not set then the strparser is in general mode. + * The upper layer calls strp_process for each skb to be parsed. + */ - memset(strp, 0, sizeof(*strp)); + if (sk) { + struct socket *sock = sk->sk_socket; - strp->sk = csk; + if (!sock->ops->read_sock || !sock->ops->peek_len) + return -EAFNOSUPPORT; + } else { + if (!cb->lock || !cb->unlock) + return -EINVAL; + } - setup_timer(&strp->rx_msg_timer, strp_rx_msg_timeout, - (unsigned long)strp); + memset(strp, 0, sizeof(*strp)); - INIT_WORK(&strp->rx_work, strp_rx_work); + strp->sk = sk; + strp->cb.lock = cb->lock ? : strp_sock_lock; + strp->cb.unlock = cb->unlock ? : strp_sock_unlock; strp->cb.rcv_msg = cb->rcv_msg; strp->cb.parse_msg = cb->parse_msg; strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; - strp->cb.abort_parser = cb->abort_parser ? : strp_abort_rx_strp; + strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; + + setup_timer(&strp->msg_timer, strp_msg_timeout, + (unsigned long)strp); + + INIT_WORK(&strp->work, strp_work); return 0; } @@ -457,12 +518,12 @@ EXPORT_SYMBOL_GPL(strp_init); void strp_unpause(struct strparser *strp) { - strp->rx_paused = 0; + strp->paused = 0; - /* Sync setting rx_paused with RX work */ + /* Sync setting paused with RX work */ smp_mb(); - queue_work(strp_wq, &strp->rx_work); + queue_work(strp_wq, &strp->work); } EXPORT_SYMBOL_GPL(strp_unpause); @@ -471,27 +532,27 @@ EXPORT_SYMBOL_GPL(strp_unpause); */ void strp_done(struct strparser *strp) { - WARN_ON(!strp->rx_stopped); + WARN_ON(!strp->stopped); - del_timer_sync(&strp->rx_msg_timer); - cancel_work_sync(&strp->rx_work); + del_timer_sync(&strp->msg_timer); + cancel_work_sync(&strp->work); - if (strp->rx_skb_head) { - kfree_skb(strp->rx_skb_head); - strp->rx_skb_head = NULL; + if (strp->skb_head) { + kfree_skb(strp->skb_head); + strp->skb_head = NULL; } } EXPORT_SYMBOL_GPL(strp_done); void strp_stop(struct strparser *strp) { - strp->rx_stopped = 1; + strp->stopped = 1; } EXPORT_SYMBOL_GPL(strp_stop); void strp_check_rcv(struct strparser *strp) { - queue_work(strp_wq, &strp->rx_work); + queue_work(strp_wq, &strp->work); } EXPORT_SYMBOL_GPL(strp_check_rcv); -- cgit v1.2.3-55-g7522 From da779b4011c872a28c4ab369e923e786b19055a0 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Mon, 31 Jul 2017 18:05:08 +0800 Subject: dt-bindings: net: mediatek: add support for MediaTek MT7623 and MT7622 SoC The patch adds the supplements in the dt-binding document for MediaTek MT7622 SoC with extra SGMII system controller and relevant clock consumers listed as the requirements for those SoCs equipped with the SGMII circuit. Also, add the missing binding information for MT7623 SoC here which relies on the fallback binding of MT2701. Signed-off-by: Sean Wang Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/mediatek-net.txt | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt index c7194e87d5f4..1d1168b805cc 100644 --- a/Documentation/devicetree/bindings/net/mediatek-net.txt +++ b/Documentation/devicetree/bindings/net/mediatek-net.txt @@ -7,24 +7,30 @@ have dual GMAC each represented by a child node.. * Ethernet controller node Required properties: -- compatible: Should be "mediatek,mt2701-eth" +- compatible: Should be + "mediatek,mt2701-eth": for MT2701 SoC + "mediatek,mt7623-eth", "mediatek,mt2701-eth": for MT7623 SoC + "mediatek,mt7622-eth": for MT7622 SoC - reg: Address and length of the register set for the device - interrupts: Should contain the three frame engines interrupts in numeric order. These are fe_int0, fe_int1 and fe_int2. - clocks: the clock used by the core - clock-names: the names of the clock listed in the clocks property. These are - "ethif", "esw", "gp2", "gp1" + "ethif", "esw", "gp2", "gp1" : For MT2701 and MT7623 SoC + "ethif", "esw", "gp0", "gp1", "gp2", "sgmii_tx250m", "sgmii_rx250m", + "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" : For MT7622 SoC - power-domains: phandle to the power domain that the ethernet is part of - resets: Should contain a phandle to the ethsys reset signal - reset-names: Should contain the reset signal name "eth" - mediatek,ethsys: phandle to the syscon node that handles the port setup +- mediatek,sgmiisys: phandle to the syscon node that handles the SGMII setup + which is required for those SoCs equipped with SGMII such as MT7622 SoC. - mediatek,pctl: phandle to the syscon node that handles the ports slew rate and driver current Optional properties: - interrupt-parent: Should be the phandle for the interrupt controller that services interrupts for this device - * Ethernet MAC node Required properties: -- cgit v1.2.3-55-g7522 From 2ec50f574e8198b7d8ba677d1edcab566fa9e84f Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Mon, 31 Jul 2017 18:05:09 +0800 Subject: net-next: mediatek: add platform data to adapt into various hardware This patch is the preparation patch in order to adapt into various hardware through adding platform data which holds specific characteristics among MediaTek SoCs and introducing the unified clock handler for those distinct clock requirements depending on different features such as TRGMII and SGMII getting support on the target SoC. And finally, add enhancement with given the generic description for Kconfig and remove the unnecessary machine type dependency in Makefile. Signed-off-by: Sean Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/Kconfig | 6 +-- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 75 ++++++++++++++++++++++++----- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 23 ++++++++- 3 files changed, 88 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index 698bb89aa901..f9149d2a4694 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -7,11 +7,11 @@ config NET_VENDOR_MEDIATEK if NET_VENDOR_MEDIATEK config NET_MEDIATEK_SOC - tristate "MediaTek MT7623 Gigabit ethernet support" - depends on NET_VENDOR_MEDIATEK && (MACH_MT7623 || MACH_MT2701) + tristate "MediaTek SoC Gigabit Ethernet support" + depends on NET_VENDOR_MEDIATEK select PHYLIB ---help--- This driver supports the gigabit ethernet MACs in the - MediaTek MT2701/MT7623 chipset family. + MediaTek SoC family. endif #NET_VENDOR_MEDIATEK diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index f1886e1bdd82..0337fe99500a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -185,7 +185,8 @@ static void mtk_phy_link_adjust(struct net_device *dev) break; }; - if (mac->id == 0 && !mac->trgmii) + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) && + !mac->id && !mac->trgmii) mtk_gmac0_rgmii_adjust(mac->hw, dev->phydev->speed); if (dev->phydev->link) @@ -1834,9 +1835,36 @@ static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) mdelay(10); } +static void mtk_clk_disable(struct mtk_eth *eth) +{ + int clk; + + for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) + clk_disable_unprepare(eth->clks[clk]); +} + +static int mtk_clk_enable(struct mtk_eth *eth) +{ + int clk, ret; + + for (clk = 0; clk < MTK_CLK_MAX ; clk++) { + ret = clk_prepare_enable(eth->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(eth->clks[clk]); + + return ret; +} + static int mtk_hw_init(struct mtk_eth *eth) { - int i, val; + int i, val, ret; if (test_and_set_bit(MTK_HW_INIT, ð->state)) return 0; @@ -1844,10 +1872,10 @@ static int mtk_hw_init(struct mtk_eth *eth) pm_runtime_enable(eth->dev); pm_runtime_get_sync(eth->dev); - clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]); - clk_prepare_enable(eth->clks[MTK_CLK_ESW]); - clk_prepare_enable(eth->clks[MTK_CLK_GP1]); - clk_prepare_enable(eth->clks[MTK_CLK_GP2]); + ret = mtk_clk_enable(eth); + if (ret) + goto err_disable_pm; + ethsys_reset(eth, RSTCTRL_FE); ethsys_reset(eth, RSTCTRL_PPE); @@ -1915,6 +1943,12 @@ static int mtk_hw_init(struct mtk_eth *eth) } return 0; + +err_disable_pm: + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return ret; } static int mtk_hw_deinit(struct mtk_eth *eth) @@ -1922,10 +1956,7 @@ static int mtk_hw_deinit(struct mtk_eth *eth) if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) return 0; - clk_disable_unprepare(eth->clks[MTK_CLK_GP2]); - clk_disable_unprepare(eth->clks[MTK_CLK_GP1]); - clk_disable_unprepare(eth->clks[MTK_CLK_ESW]); - clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]); + mtk_clk_disable(eth); pm_runtime_put_sync(eth->dev); pm_runtime_disable(eth->dev); @@ -2403,6 +2434,7 @@ static int mtk_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct device_node *mac_np; + const struct of_device_id *match; struct mtk_eth *eth; int err; int i; @@ -2411,6 +2443,9 @@ static int mtk_probe(struct platform_device *pdev) if (!eth) return -ENOMEM; + match = of_match_device(of_mtk_match, &pdev->dev); + eth->soc = (struct mtk_soc_data *)match->data; + eth->dev = &pdev->dev; eth->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(eth->base)) @@ -2447,7 +2482,12 @@ static int mtk_probe(struct platform_device *pdev) if (IS_ERR(eth->clks[i])) { if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) return -EPROBE_DEFER; - return -ENODEV; + if (eth->soc->required_clks & BIT(i)) { + dev_err(&pdev->dev, "clock %s not found\n", + mtk_clks_source_name[i]); + return -EINVAL; + } + eth->clks[i] = NULL; } } @@ -2550,8 +2590,19 @@ static int mtk_remove(struct platform_device *pdev) return 0; } +static const struct mtk_soc_data mt2701_data = { + .caps = MTK_GMAC1_TRGMII, + .required_clks = MT7623_CLKS_BITMAP +}; + +static const struct mtk_soc_data mt7623_data = { + .caps = MTK_GMAC1_TRGMII, + .required_clks = MT7623_CLKS_BITMAP +}; + const struct of_device_id of_mtk_match[] = { - { .compatible = "mediatek,mt2701-eth" }, + { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, + { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, {}, }; MODULE_DEVICE_TABLE(of, of_mtk_match); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 5868a09f623a..8ade23dfcc77 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -443,6 +443,9 @@ enum mtk_clks_map { MTK_CLK_MAX }; +#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_TRGPLL)) enum mtk_dev_state { MTK_HW_INIT, MTK_RESETTING @@ -511,6 +514,21 @@ struct mtk_rx_ring { u32 crx_idx_reg; }; +#define MTK_TRGMII BIT(0) +#define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII) +#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) + +/* struct mtk_soc_data - This is the structure holding all differences + * among various plaforms + * @caps Flags shown the extra capability for the SoC + * @required_clks Flags shown the bitmap for required clocks on + * the target SoC + */ +struct mtk_soc_data { + u32 caps; + u32 required_clks; +}; + /* currently no SoC has more than 2 macs */ #define MTK_MAX_DEVS 2 @@ -542,7 +560,8 @@ struct mtk_rx_ring { * @clks: clock array for all clocks required * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring - * @state Initialization and runtime state of the device. + * @state Initialization and runtime state of the device + * @soc: Holding specific data among vaious SoCs */ struct mtk_eth { @@ -574,6 +593,8 @@ struct mtk_eth { struct mii_bus *mii_bus; struct work_struct pending_work; unsigned long state; + + const struct mtk_soc_data *soc; }; /* struct mtk_mac - the structure that holds the info about the MACs of the -- cgit v1.2.3-55-g7522 From 42c03844e93d564cf6c44dcb7d91c5dc602c4857 Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Mon, 31 Jul 2017 18:05:10 +0800 Subject: net-next: mediatek: add support for MediaTek MT7622 SoC This patch adds the driver for ethernet controller on MT7622 SoC. It has the similar handling logic as the previously MT7623 does, but there are additions against with MT7623 SoC, the shared SGMII given for the dual GMACs and including 5-ports 10/100 embedded switch support (ESW) as the GMAC1 option, thus more clocks consumers for the extra feature are introduced here. So for ease portability and maintenance, those differences all are being kept inside the platform data as other drivers usually do. Currently testing successfully is done with those patches for the conditions such as GMAC2 with IP1001 PHY via RGMII and GMAC1/2 with RTL8211F PHY via SGMII. Signed-off-by: Sean Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 67 ++++++++++++++++++++++++++++- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 54 ++++++++++++++++++++++- 2 files changed, 117 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 0337fe99500a..acf2b3b8009c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -53,7 +53,8 @@ static const struct mtk_ethtool_stats { }; static const char * const mtk_clks_source_name[] = { - "ethif", "esw", "gp1", "gp2", "trgpll" + "ethif", "esw", "gp0", "gp1", "gp2", "trgpll", "sgmii_tx250m", + "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", "sgmii_ck", "eth2pll" }; void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) @@ -163,6 +164,47 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed) mtk_w32(eth, val, TRGMII_TCK_CTRL); } +static void mtk_gmac_sgmii_hw_setup(struct mtk_eth *eth, int mac_id) +{ + u32 val; + + /* Setup the link timer and QPHY power up inside SGMIISYS */ + regmap_write(eth->sgmiisys, SGMSYS_PCS_LINK_TIMER, + SGMII_LINK_TIMER_DEFAULT); + + regmap_read(eth->sgmiisys, SGMSYS_SGMII_MODE, &val); + val |= SGMII_REMOTE_FAULT_DIS; + regmap_write(eth->sgmiisys, SGMSYS_SGMII_MODE, val); + + regmap_read(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(eth->sgmiisys, SGMSYS_PCS_CONTROL_1, val); + + regmap_read(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val &= ~SGMII_PHYA_PWD; + regmap_write(eth->sgmiisys, SGMSYS_QPHY_PWR_STATE_CTRL, val); + + /* Determine MUX for which GMAC uses the SGMII interface */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_DUAL_GMAC_SHARED_SGMII)) { + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_SGMII_MASK; + val |= !mac_id ? SYSCFG0_SGMII_GMAC1 : SYSCFG0_SGMII_GMAC2; + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + dev_info(eth->dev, "setup shared sgmii for gmac=%d\n", + mac_id); + } + + /* Setup the GMAC1 going through SGMII path when SoC also support + * ESW on GMAC1 + */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GMAC1_ESW | MTK_GMAC1_SGMII) && + !mac_id) { + mtk_w32(eth, 0, MTK_MAC_MISC); + dev_info(eth->dev, "setup gmac1 going through sgmii"); + } +} + static void mtk_phy_link_adjust(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); @@ -270,6 +312,7 @@ static int mtk_phy_connect(struct net_device *dev) if (!np) return -ENODEV; + mac->ge_mode = 0; switch (of_get_phy_mode(np)) { case PHY_INTERFACE_MODE_TRGMII: mac->trgmii = true; @@ -277,7 +320,10 @@ static int mtk_phy_connect(struct net_device *dev) case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII: - mac->ge_mode = 0; + break; + case PHY_INTERFACE_MODE_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) + mtk_gmac_sgmii_hw_setup(eth, mac->id); break; case PHY_INTERFACE_MODE_MII: mac->ge_mode = 1; @@ -2423,6 +2469,7 @@ static int mtk_get_chip_id(struct mtk_eth *eth, u32 *chip_id) static bool mtk_is_hwlro_supported(struct mtk_eth *eth) { switch (eth->chip_id) { + case MT7622_ETH: case MT7623_ETH: return true; } @@ -2462,6 +2509,16 @@ static int mtk_probe(struct platform_device *pdev) return PTR_ERR(eth->ethsys); } + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + eth->sgmiisys = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,sgmiisys"); + if (IS_ERR(eth->sgmiisys)) { + dev_err(&pdev->dev, "no sgmiisys regmap found\n"); + return PTR_ERR(eth->sgmiisys); + } + } + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mediatek,pctl"); if (IS_ERR(eth->pctl)) { @@ -2595,6 +2652,11 @@ static const struct mtk_soc_data mt2701_data = { .required_clks = MT7623_CLKS_BITMAP }; +static const struct mtk_soc_data mt7622_data = { + .caps = MTK_DUAL_GMAC_SHARED_SGMII | MTK_GMAC1_ESW, + .required_clks = MT7622_CLKS_BITMAP +}; + static const struct mtk_soc_data mt7623_data = { .caps = MTK_GMAC1_TRGMII, .required_clks = MT7623_CLKS_BITMAP @@ -2602,6 +2664,7 @@ static const struct mtk_soc_data mt7623_data = { const struct of_device_id of_mtk_match[] = { { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, + { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, {}, }; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 8ade23dfcc77..4594862e5a9b 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -302,6 +302,9 @@ #define PHY_IAC_REG_SHIFT 25 #define PHY_IAC_TIMEOUT HZ +#define MTK_MAC_MISC 0x1000c +#define MTK_MUX_TO_ESW BIT(0) + /* Mac control registers */ #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) #define MAC_MCR_MAX_RX_1536 BIT(24) @@ -357,11 +360,15 @@ #define ETHSYS_CHIPID0_3 0x0 #define ETHSYS_CHIPID4_7 0x4 #define MT7623_ETH 7623 +#define MT7622_ETH 7622 /* ethernet subsystem config register */ #define ETHSYS_SYSCFG0 0x14 #define SYSCFG0_GE_MASK 0x3 #define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) +#define SYSCFG0_SGMII_MASK (3 << 8) +#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & GENMASK(9, 8)) +#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & GENMASK(9, 8)) /* ethernet subsystem clock register */ #define ETHSYS_CLKCFG0 0x2c @@ -372,6 +379,23 @@ #define RSTCTRL_FE BIT(6) #define RSTCTRL_PPE BIT(31) +/* SGMII subsystem config registers */ +/* Register to auto-negotiation restart */ +#define SGMSYS_PCS_CONTROL_1 0x0 +#define SGMII_AN_RESTART BIT(9) + +/* Register to programmable link timer, the unit in 2 * 8ns */ +#define SGMSYS_PCS_LINK_TIMER 0x18 +#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0)) + +/* Register to control remote fault */ +#define SGMSYS_SGMII_MODE 0x20 +#define SGMII_REMOTE_FAULT_DIS BIT(8) + +/* Register to power up QPHY */ +#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8 +#define SGMII_PHYA_PWD BIT(4) + struct mtk_rx_dma { unsigned int rxd1; unsigned int rxd2; @@ -437,15 +461,31 @@ enum mtk_tx_flags { enum mtk_clks_map { MTK_CLK_ETHIF, MTK_CLK_ESW, + MTK_CLK_GP0, MTK_CLK_GP1, MTK_CLK_GP2, MTK_CLK_TRGPLL, + MTK_CLK_SGMII_TX_250M, + MTK_CLK_SGMII_RX_250M, + MTK_CLK_SGMII_CDR_REF, + MTK_CLK_SGMII_CDR_FB, + MTK_CLK_SGMII_CK, + MTK_CLK_ETH2PLL, MTK_CLK_MAX }; #define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ BIT(MTK_CLK_TRGPLL)) +#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII_CK) | \ + BIT(MTK_CLK_ETH2PLL)) enum mtk_dev_state { MTK_HW_INIT, MTK_RESETTING @@ -516,9 +556,16 @@ struct mtk_rx_ring { #define MTK_TRGMII BIT(0) #define MTK_GMAC1_TRGMII (BIT(1) | MTK_TRGMII) +#define MTK_ESW BIT(4) +#define MTK_GMAC1_ESW (BIT(5) | MTK_ESW) +#define MTK_SGMII BIT(8) +#define MTK_GMAC1_SGMII (BIT(9) | MTK_SGMII) +#define MTK_GMAC2_SGMII (BIT(10) | MTK_SGMII) +#define MTK_DUAL_GMAC_SHARED_SGMII (BIT(11) | MTK_GMAC1_SGMII | \ + MTK_GMAC2_SGMII) #define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) -/* struct mtk_soc_data - This is the structure holding all differences +/* struct mtk_eth_data - This is the structure holding all differences * among various plaforms * @caps Flags shown the extra capability for the SoC * @required_clks Flags shown the bitmap for required clocks on @@ -547,6 +594,8 @@ struct mtk_soc_data { * @msg_enable: Ethtool msg level * @ethsys: The register map pointing at the range used to setup * MII modes + * @sgmiisys: The register map pointing at the range used to setup + * SGMII modes * @pctl: The register map pointing at the range used to setup * GMAC port drive/slew values * @dma_refcnt: track how many netdevs are using the DMA engine @@ -560,7 +609,7 @@ struct mtk_soc_data { * @clks: clock array for all clocks required * @mii_bus: If there is a bus we need to create an instance for it * @pending_work: The workqueue used to reset the dma ring - * @state Initialization and runtime state of the device + * @state: Initialization and runtime state of the device * @soc: Holding specific data among vaious SoCs */ @@ -577,6 +626,7 @@ struct mtk_eth { u32 msg_enable; unsigned long sysclk; struct regmap *ethsys; + struct regmap *sgmiisys; struct regmap *pctl; u32 chip_id; bool hwlro; -- cgit v1.2.3-55-g7522 From 0857d6f86527dd8cf166e0ec570865d981d3d06d Mon Sep 17 00:00:00 2001 From: Sean Wang Date: Mon, 31 Jul 2017 18:05:11 +0800 Subject: MAINTAINERS: add Sean/Nelson as MediaTek ethernet maintainers Sean and Nelson work for MediaTek on maintaining the MediaTek ethernet driver for the existing SoCs and adding support for the following SoCs. In the past, Sean has been active at making most of the qualifications , stress test and submitting a lot of patches for the driver while Nelson was looking into the aspects more on hardware additions and details such as introducing PDMA with Hardware LRO to the driver. Also update John's up-to-date mail address in the patch. Cc: John Crispin Signed-off-by: Sean Wang Signed-off-by: Nelson Chang Acked-by: John Crispin Signed-off-by: David S. Miller --- MAINTAINERS | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 8c5707f5860a..207e45310620 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8425,7 +8425,9 @@ F: include/uapi/linux/uvcvideo.h MEDIATEK ETHERNET DRIVER M: Felix Fietkau -M: John Crispin +M: John Crispin +M: Sean Wang +M: Nelson Chang L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/mediatek/ -- cgit v1.2.3-55-g7522 From c613c209c3f351d47158f728271d0c73b6dd24c6 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 31 Jul 2017 08:15:47 -0400 Subject: net: add skb_frag_foreach_page and use with kmap_atomic Skb frags may contain compound pages. Various operations map frags temporarily using kmap_atomic, but this function works on single pages, not whole compound pages. The distinction is only relevant for high mem pages that require temporary mappings. Introduce a looping mechanism that for compound highmem pages maps one page at a time, does not change behavior on other pages. Use the loop in the kmap_atomic callers in net/core/skbuff.c. Verified by triggering skb_copy_bits with tcpdump -n -c 100 -i ${DEV} -w /dev/null & netperf -t TCP_STREAM -H ${HOST} and by triggering __skb_checksum with ethtool -K ${DEV} tx off repeated the tests with looping on a non-highmem platform (x86_64) by making skb_frag_must_loop always return true. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 36 +++++++++++++++++++++ net/core/skbuff.c | 88 +++++++++++++++++++++++++++++++++----------------- 2 files changed, 95 insertions(+), 29 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 18e76bf9574e..6f9f1b2715ec 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -345,6 +345,42 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) frag->size -= delta; } +static inline bool skb_frag_must_loop(struct page *p) +{ +#if defined(CONFIG_HIGHMEM) + if (PageHighMem(p)) + return true; +#endif + return false; +} + +/** + * skb_frag_foreach_page - loop over pages in a fragment + * + * @f: skb frag to operate on + * @f_off: offset from start of f->page.p + * @f_len: length from f_off to loop over + * @p: (temp var) current page + * @p_off: (temp var) offset from start of current page, + * non-zero only on first page. + * @p_len: (temp var) length in current page, + * < PAGE_SIZE only on first and last page. + * @copied: (temp var) length so far, excluding current p_len. + * + * A fragment can hold a compound page, in which case per-page + * operations, notably kmap_atomic, must be called for each + * regular page. + */ +#define skb_frag_foreach_page(f, f_off, f_len, p, p_off, p_len, copied) \ + for (p = skb_frag_page(f) + ((f_off) >> PAGE_SHIFT), \ + p_off = (f_off) & (PAGE_SIZE - 1), \ + p_len = skb_frag_must_loop(p) ? \ + min_t(u32, f_len, PAGE_SIZE - p_off) : f_len, \ + copied = 0; \ + copied < f_len; \ + copied += p_len, p++, p_off = 0, \ + p_len = min_t(u32, f_len - copied, PAGE_SIZE)) \ + #define HAVE_HW_TIME_STAMP /** diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9c0e015ff3fe..0f0933b338d7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -938,8 +938,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; for (i = 0; i < num_frags; i++) { - u8 *vaddr; skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u32 p_off, p_len, copied; + struct page *p; + u8 *vaddr; page = alloc_page(gfp_mask); if (!page) { @@ -950,10 +952,15 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) } return -ENOMEM; } - vaddr = kmap_atomic(skb_frag_page(f)); - memcpy(page_address(page), - vaddr + f->page_offset, skb_frag_size(f)); - kunmap_atomic(vaddr); + + skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f), + p, p_off, p_len, copied) { + vaddr = kmap_atomic(p); + memcpy(page_address(page) + copied, vaddr + p_off, + p_len); + kunmap_atomic(vaddr); + } + set_page_private(page, (unsigned long)head); head = page; } @@ -1753,16 +1760,20 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) end = start + skb_frag_size(f); if ((copy = end - offset) > 0) { + u32 p_off, p_len, copied; + struct page *p; u8 *vaddr; if (copy > len) copy = len; - vaddr = kmap_atomic(skb_frag_page(f)); - memcpy(to, - vaddr + f->page_offset + offset - start, - copy); - kunmap_atomic(vaddr); + skb_frag_foreach_page(f, + f->page_offset + offset - start, + copy, p, p_off, p_len, copied) { + vaddr = kmap_atomic(p); + memcpy(to + copied, vaddr + p_off, p_len); + kunmap_atomic(vaddr); + } if ((len -= copy) == 0) return 0; @@ -2122,15 +2133,20 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { + u32 p_off, p_len, copied; + struct page *p; u8 *vaddr; if (copy > len) copy = len; - vaddr = kmap_atomic(skb_frag_page(frag)); - memcpy(vaddr + frag->page_offset + offset - start, - from, copy); - kunmap_atomic(vaddr); + skb_frag_foreach_page(frag, + frag->page_offset + offset - start, + copy, p, p_off, p_len, copied) { + vaddr = kmap_atomic(p); + memcpy(vaddr + p_off, from + copied, p_len); + kunmap_atomic(vaddr); + } if ((len -= copy) == 0) return 0; @@ -2195,20 +2211,27 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { + u32 p_off, p_len, copied; + struct page *p; __wsum csum2; u8 *vaddr; if (copy > len) copy = len; - vaddr = kmap_atomic(skb_frag_page(frag)); - csum2 = ops->update(vaddr + frag->page_offset + - offset - start, copy, 0); - kunmap_atomic(vaddr); - csum = ops->combine(csum, csum2, pos, copy); + + skb_frag_foreach_page(frag, + frag->page_offset + offset - start, + copy, p, p_off, p_len, copied) { + vaddr = kmap_atomic(p); + csum2 = ops->update(vaddr + p_off, p_len, 0); + kunmap_atomic(vaddr); + csum = ops->combine(csum, csum2, pos, p_len); + pos += p_len; + } + if (!(len -= copy)) return csum; offset += copy; - pos += copy; } start = end; } @@ -2281,24 +2304,31 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); if ((copy = end - offset) > 0) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 p_off, p_len, copied; + struct page *p; __wsum csum2; u8 *vaddr; - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (copy > len) copy = len; - vaddr = kmap_atomic(skb_frag_page(frag)); - csum2 = csum_partial_copy_nocheck(vaddr + - frag->page_offset + - offset - start, to, - copy, 0); - kunmap_atomic(vaddr); - csum = csum_block_add(csum, csum2, pos); + + skb_frag_foreach_page(frag, + frag->page_offset + offset - start, + copy, p, p_off, p_len, copied) { + vaddr = kmap_atomic(p); + csum2 = csum_partial_copy_nocheck(vaddr + p_off, + to + copied, + p_len, 0); + kunmap_atomic(vaddr); + csum = csum_block_add(csum, csum2, pos); + pos += p_len; + } + if (!(len -= copy)) return csum; offset += copy; to += copy; - pos += copy; } start = end; } -- cgit v1.2.3-55-g7522 From 3e3d647715d40173e359c91c69059b9cec7ba6ca Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Tue, 1 Aug 2017 12:14:36 +0200 Subject: ravb: add wake-on-lan support via magic packet WoL is enabled in the suspend callback by setting MagicPacket detection and disabling all interrupts expect MagicPacket. In the resume path the driver needs to reset the hardware to rearm the WoL logic, this prevents the driver from simply restoring the registers and to take advantage of that ravb was not suspended to reduce resume time. To reset the hardware the driver closes the device, sets it in reset mode and reopens the device just like it would do in a normal suspend/resume scenario without WoL enabled, but it both closes and opens the device in the resume callback since the device needs to be reset for WoL to work. One quirk needed for WoL is that the module clock needs to be prevented from being switched off by Runtime PM. To keep the clock alive the suspend callback need to call clk_enable() directly to increase the usage count of the clock. Then when Runtime PM decreases the clock usage count it won't reach 0 and be switched off. Signed-off-by: Niklas Söderlund Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 2 + drivers/net/ethernet/renesas/ravb_main.c | 108 +++++++++++++++++++++++++++++-- 2 files changed, 106 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 0525bd696d5d..96a27b00c90e 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -991,6 +991,7 @@ struct ravb_private { struct net_device *ndev; struct platform_device *pdev; void __iomem *addr; + struct clk *clk; struct mdiobb_ctrl mdiobb; u32 num_rx_ring[NUM_RX_QUEUE]; u32 num_tx_ring[NUM_TX_QUEUE]; @@ -1033,6 +1034,7 @@ struct ravb_private { unsigned no_avb_link:1; unsigned avb_link_active_low:1; + unsigned wol_enabled:1; }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 5931e859876c..6d10db1b5146 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -680,6 +680,9 @@ static void ravb_emac_interrupt_unlocked(struct net_device *ndev) ecsr = ravb_read(ndev, ECSR); ravb_write(ndev, ecsr, ECSR); /* clear interrupt */ + + if (ecsr & ECSR_MPD) + pm_wakeup_event(&priv->pdev->dev, 0); if (ecsr & ECSR_ICD) ndev->stats.tx_carrier_errors++; if (ecsr & ECSR_LCHNG) { @@ -1330,6 +1333,33 @@ static int ravb_get_ts_info(struct net_device *ndev, return 0; } +static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + wol->supported = 0; + wol->wolopts = 0; + + if (priv->clk) { + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; + } +} + +static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) +{ + struct ravb_private *priv = netdev_priv(ndev); + + if (!priv->clk || wol->wolopts & ~WAKE_MAGIC) + return -EOPNOTSUPP; + + priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); + + device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); + + return 0; +} + static const struct ethtool_ops ravb_ethtool_ops = { .nway_reset = ravb_nway_reset, .get_msglevel = ravb_get_msglevel, @@ -1343,6 +1373,8 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ts_info = ravb_get_ts_info, .get_link_ksettings = ravb_get_link_ksettings, .set_link_ksettings = ravb_set_link_ksettings, + .get_wol = ravb_get_wol, + .set_wol = ravb_set_wol, }; static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, @@ -2041,6 +2073,11 @@ static int ravb_probe(struct platform_device *pdev) priv->chip_id = chip_id; + /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */ + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + priv->clk = NULL; + /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; ndev->ethtool_ops = &ravb_ethtool_ops; @@ -2107,6 +2144,9 @@ static int ravb_probe(struct platform_device *pdev) if (error) goto out_napi_del; + if (priv->clk) + device_set_wakeup_capable(&pdev->dev, 1); + /* Print device information */ netdev_info(ndev, "Base address at %#x, %pM, IRQ %d.\n", (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); @@ -2160,15 +2200,66 @@ static int ravb_remove(struct platform_device *pdev) return 0; } +static int ravb_wol_setup(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + + /* Disable interrupts by clearing the interrupt masks. */ + ravb_write(ndev, 0, RIC0); + ravb_write(ndev, 0, RIC2); + ravb_write(ndev, 0, TIC); + + /* Only allow ECI interrupts */ + synchronize_irq(priv->emac_irq); + napi_disable(&priv->napi[RAVB_NC]); + napi_disable(&priv->napi[RAVB_BE]); + ravb_write(ndev, ECSIPR_MPDIP, ECSIPR); + + /* Enable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); + + /* Increased clock usage so device won't be suspended */ + clk_enable(priv->clk); + + return enable_irq_wake(priv->emac_irq); +} + +static int ravb_wol_restore(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + int ret; + + napi_enable(&priv->napi[RAVB_NC]); + napi_enable(&priv->napi[RAVB_BE]); + + /* Disable MagicPacket */ + ravb_modify(ndev, ECMR, ECMR_MPDE, 0); + + ret = ravb_close(ndev); + if (ret < 0) + return ret; + + /* Restore clock usage count */ + clk_disable(priv->clk); + + return disable_irq_wake(priv->emac_irq); +} + static int __maybe_unused ravb_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); - int ret = 0; + struct ravb_private *priv = netdev_priv(ndev); + int ret; - if (netif_running(ndev)) { - netif_device_detach(ndev); + if (!netif_running(ndev)) + return 0; + + netif_device_detach(ndev); + + if (priv->wol_enabled) + ret = ravb_wol_setup(ndev); + else ret = ravb_close(ndev); - } return ret; } @@ -2179,6 +2270,10 @@ static int __maybe_unused ravb_resume(struct device *dev) struct ravb_private *priv = netdev_priv(ndev); int ret = 0; + /* If WoL is enabled set reset mode to rearm the WoL logic */ + if (priv->wol_enabled) + ravb_write(ndev, CCC_OPC_RESET, CCC); + /* All register have been reset to default values. * Restore all registers which where setup at probe time and * reopen device if it was running before system suspended. @@ -2202,6 +2297,11 @@ static int __maybe_unused ravb_resume(struct device *dev) ravb_write(ndev, priv->desc_bat_dma, DBAT); if (netif_running(ndev)) { + if (priv->wol_enabled) { + ret = ravb_wol_restore(ndev); + if (ret) + return ret; + } ret = ravb_open(ndev); if (ret < 0) return ret; -- cgit v1.2.3-55-g7522 From fbf3d034f2ff6264183cfa6845770e8cc2a986c8 Mon Sep 17 00:00:00 2001 From: Niklas Söderlund Date: Tue, 1 Aug 2017 12:14:37 +0200 Subject: ravb: add workaround for clock when resuming with WoL enabled The renesas-cpg-mssr clock driver are not yet aware of PSCI sleep where power is cut to the SoC. When resuming from this state with WoL enabled the enable count of the ravb clock is 1 and the clock driver thinks the clock is already on when PM core enables the clock and increments the enable count to 2. This will result in the ravb driver failing to talk to the hardware since the module clock is off. Work around this by forcing the enable count to 0 and then back to 2 when resuming with WoL enabled. This workaround should be reverted once the renesas-cpg-mssr clock driver becomes aware of this PSCI sleep behavior. Signed-off-by: Niklas Söderlund Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 6d10db1b5146..fdf30bfa403b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2270,9 +2270,32 @@ static int __maybe_unused ravb_resume(struct device *dev) struct ravb_private *priv = netdev_priv(ndev); int ret = 0; - /* If WoL is enabled set reset mode to rearm the WoL logic */ - if (priv->wol_enabled) + if (priv->wol_enabled) { + /* Reduce the usecount of the clock to zero and then + * restore it to its original value. This is done to force + * the clock to be re-enabled which is a workaround + * for renesas-cpg-mssr driver which do not enable clocks + * when resuming from PSCI suspend/resume. + * + * Without this workaround the driver fails to communicate + * with the hardware if WoL was enabled when the system + * entered PSCI suspend. This is due to that if WoL is enabled + * we explicitly keep the clock from being turned off when + * suspending, but in PSCI sleep power is cut so the clock + * is disabled anyhow, the clock driver is not aware of this + * so the clock is not turned back on when resuming. + * + * TODO: once the renesas-cpg-mssr suspend/resume is working + * this clock dance should be removed. + */ + clk_disable(priv->clk); + clk_disable(priv->clk); + clk_enable(priv->clk); + clk_enable(priv->clk); + + /* Set reset mode to rearm the WoL logic */ ravb_write(ndev, CCC_OPC_RESET, CCC); + } /* All register have been reset to default values. * Restore all registers which where setup at probe time and -- cgit v1.2.3-55-g7522 From 7b9cc73843e89ea1ac215511c13c259a70d1b4f7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:31 -0400 Subject: net: dsa: PHY device is mandatory for EEE The port's PHY and MAC are both implied in EEE. The current code does not call the PHY operations if the related device is NULL. Change that by returning -ENODEV if there's no PHY device attached to the interface. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/slave.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 9507bd38cf04..7df55d597740 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -648,6 +648,10 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) struct dsa_switch *ds = p->dp->ds; int ret; + /* Port's PHY and MAC both need to be EEE capable */ + if (!p->phy) + return -ENODEV; + if (!ds->ops->set_eee) return -EOPNOTSUPP; @@ -655,10 +659,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) if (ret) return ret; - if (p->phy) - ret = phy_ethtool_set_eee(p->phy, e); - - return ret; + return phy_ethtool_set_eee(p->phy, e); } static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) @@ -667,6 +668,10 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) struct dsa_switch *ds = p->dp->ds; int ret; + /* Port's PHY and MAC both need to be EEE capable */ + if (!p->phy) + return -ENODEV; + if (!ds->ops->get_eee) return -EOPNOTSUPP; @@ -674,10 +679,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) if (ret) return ret; - if (p->phy) - ret = phy_ethtool_get_eee(p->phy, e); - - return ret; + return phy_ethtool_get_eee(p->phy, e); } #ifdef CONFIG_NET_POLL_CONTROLLER -- cgit v1.2.3-55-g7522 From 06d7b5c3acee7b42cd9aa47165587b9c1fb992d3 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:32 -0400 Subject: net: dsa: qca8k: fix EEE init The qca8k obviously copied code from the sf2 driver as how to set EEE: if (e->eee_enabled) { p->eee_enabled = qca8k_eee_init(ds, port, phydev); if (!p->eee_enabled) ret = -EOPNOTSUPP; } But it did not use the same logic for the EEE init routine, which is "Returns 0 if EEE was not enabled, or 1 otherwise". This results in returning -EOPNOTSUPP on success and caching EEE enabled on failure. This patch fixes the returned value of qca8k_eee_init. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index b3bee7eab45f..e076ab23d4df 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -666,11 +666,11 @@ qca8k_eee_init(struct dsa_switch *ds, int port, ret = phy_init_eee(phy, 0); if (ret) - return ret; + return 0; qca8k_eee_enable_set(ds, port, true); - return 0; + return 1; } static int -- cgit v1.2.3-55-g7522 From dca20989313aba3288e3a1d3a88007ccafb37e25 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:33 -0400 Subject: net: dsa: qca8k: enable EEE once If EEE is queried enabled, qca8k_set_eee calls qca8k_eee_enable_set twice (because it is already called in qca8k_eee_init). Fix that. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index e076ab23d4df..9d6b5d2f7a4a 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -684,12 +684,13 @@ qca8k_set_eee(struct dsa_switch *ds, int port, p->eee_enabled = e->eee_enabled; - if (e->eee_enabled) { + if (!p->eee_enabled) { + qca8k_eee_enable_set(ds, port, false); + } else { p->eee_enabled = qca8k_eee_init(ds, port, phydev); if (!p->eee_enabled) ret = -EOPNOTSUPP; } - qca8k_eee_enable_set(ds, port, p->eee_enabled); return ret; } -- cgit v1.2.3-55-g7522 From a2444ab83eb4ada01a3b35749cfc581130027816 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:34 -0400 Subject: net: dsa: qca8k: do not cache unneeded EEE fields The qca8k driver is currently caching a bitfield of the supported member of a ethtool_eee private structure, which is unused. Only the eee_enabled field of the private ethtool_eee copy is updated, thus using p->advertised and p->lp_advertised is also erroneous. Remove the usage of these private ethtool_eee members and only rely on phy_ethtool_get_eee to assign the eee_active member. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 9d6b5d2f7a4a..c316c55aabc6 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -658,12 +658,8 @@ static int qca8k_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; int ret; - p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); - ret = phy_init_eee(phy, 0); if (ret) return 0; @@ -705,12 +701,7 @@ qca8k_get_eee(struct dsa_switch *ds, int port, int ret; ret = phy_ethtool_get_eee(netdev->phydev, p); - if (!ret) - e->eee_active = - !!(p->supported & p->advertised & p->lp_advertised); - else - e->eee_active = 0; - + e->eee_active = p->eee_active; e->eee_enabled = p->eee_enabled; return ret; -- cgit v1.2.3-55-g7522 From 193da90e6075fceb48dc22d9cab0cea557fb0713 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:35 -0400 Subject: net: dsa: qca8k: empty qca8k_get_eee phy_ethtool_get_eee is already called by the DSA layer, thus remove the duplicated call in the qca8k driver. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/qca8k.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index c316c55aabc6..8cd4634c6985 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -695,16 +695,8 @@ static int qca8k_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - struct net_device *netdev = ds->ports[port].netdev; - int ret; - - ret = phy_ethtool_get_eee(netdev->phydev, p); - e->eee_active = p->eee_active; - e->eee_enabled = p->eee_enabled; - - return ret; + /* Nothing to do on the port's MAC */ + return 0; } static void -- cgit v1.2.3-55-g7522 From b11af0ce11d1ca3d6292cda01fc2609309247f7a Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:36 -0400 Subject: net: dsa: bcm_sf2: remove unneeded supported flags The SF2 driver is masking the supported bitfield of its private copy of the ports' ethtool_eee structures. It is used nowhere, thus remove it. Signed-off-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 648f91b58d1e..aef475f1ce06 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -327,12 +327,8 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port, static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, struct phy_device *phy) { - struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); - struct ethtool_eee *p = &priv->port_sts[port].eee; int ret; - p->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full); - ret = phy_init_eee(phy, 0); if (ret) return 0; -- cgit v1.2.3-55-g7522 From 7f5c6d7cdda7b67b11659cb5d2cfbaf5210d2fa8 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:37 -0400 Subject: net: dsa: mv88e6xxx: call phy_init_eee It is safer to init the EEE before the DSA layer call phy_ethtool_set_eee, as sf2 and qca8k are doing. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 647d5d45c1d6..b531d4a3bab5 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -855,6 +855,12 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int err; + if (e->eee_enabled) { + err = phy_init_eee(phydev, 0); + if (err) + return err; + } + mutex_lock(&chip->reg_lock); err = mv88e6xxx_energy_detect_write(chip, port, e); mutex_unlock(&chip->reg_lock); -- cgit v1.2.3-55-g7522 From c48f7eb3021c993cfb15ee4963df2fd6683c2f0d Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:38 -0400 Subject: net: dsa: call phy_init_eee in DSA layer All DSA drivers are calling phy_init_eee if eee_enabled is true. Move up this statement in the DSA layer to simplify the DSA drivers. qca8k does not require to cache the ethtool_eee structures from now on. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 9 +-------- drivers/net/dsa/mv88e6xxx/chip.c | 6 ------ drivers/net/dsa/qca8k.c | 31 ++----------------------------- drivers/net/dsa/qca8k.h | 1 - net/dsa/slave.c | 6 ++++++ 5 files changed, 9 insertions(+), 44 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index aef475f1ce06..9d10aac8f241 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -360,14 +360,7 @@ static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, struct ethtool_eee *p = &priv->port_sts[port].eee; p->eee_enabled = e->eee_enabled; - - if (!p->eee_enabled) { - bcm_sf2_eee_enable_set(ds, port, false); - } else { - p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); - if (!p->eee_enabled) - return -EOPNOTSUPP; - } + bcm_sf2_eee_enable_set(ds, port, e->eee_enabled); return 0; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index b531d4a3bab5..647d5d45c1d6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -855,12 +855,6 @@ static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct mv88e6xxx_chip *chip = ds->priv; int err; - if (e->eee_enabled) { - err = phy_init_eee(phydev, 0); - if (err) - return err; - } - mutex_lock(&chip->reg_lock); err = mv88e6xxx_energy_detect_write(chip, port, e); mutex_unlock(&chip->reg_lock); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 8cd4634c6985..bfe0172ae6cc 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -654,41 +654,14 @@ qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable) mutex_unlock(&priv->reg_mutex); } -static int -qca8k_eee_init(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - int ret; - - ret = phy_init_eee(phy, 0); - if (ret) - return 0; - - qca8k_eee_enable_set(ds, port, true); - - return 1; -} - static int qca8k_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e) { - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - struct ethtool_eee *p = &priv->port_sts[port].eee; - int ret = 0; + qca8k_eee_enable_set(ds, port, e->eee_enabled); - p->eee_enabled = e->eee_enabled; - - if (!p->eee_enabled) { - qca8k_eee_enable_set(ds, port, false); - } else { - p->eee_enabled = qca8k_eee_init(ds, port, phydev); - if (!p->eee_enabled) - ret = -EOPNOTSUPP; - } - - return ret; + return 0; } static int diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 1ed4fac6cd6d..1cf8a920d4ff 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -156,7 +156,6 @@ enum qca8k_fdb_cmd { }; struct ar8xxx_port_status { - struct ethtool_eee eee; int enabled; }; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 7df55d597740..ad5caaf384d7 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -659,6 +659,12 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) if (ret) return ret; + if (e->eee_enabled) { + ret = phy_init_eee(p->phy, 0); + if (ret) + return ret; + } + return phy_ethtool_set_eee(p->phy, e); } -- cgit v1.2.3-55-g7522 From 46587e4a312780a63132483bc8678c397eca6aff Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:39 -0400 Subject: net: dsa: remove PHY device argument from .set_eee The DSA switch operations for EEE are only meant to configure a port's MAC EEE settings. The port's PHY EEE settings are accessed by the DSA layer and must be made available via a proper PHY driver. In order to reduce this confusion, remove the phy_device argument from the .set_eee operation. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 1 - drivers/net/dsa/mv88e6xxx/chip.c | 2 +- drivers/net/dsa/qca8k.c | 14 +++----------- include/net/dsa.h | 1 - net/dsa/slave.c | 2 +- 5 files changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 9d10aac8f241..ce886345d8d2 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -353,7 +353,6 @@ static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, } static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, struct ethtool_eee *e) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 647d5d45c1d6..aaa96487f21f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -850,7 +850,7 @@ static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, } static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, struct ethtool_eee *e) + struct ethtool_eee *e) { struct mv88e6xxx_chip *chip = ds->priv; int err; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index bfe0172ae6cc..e209e229ed4c 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -637,8 +637,8 @@ qca8k_get_sset_count(struct dsa_switch *ds) return ARRAY_SIZE(ar8327_mib); } -static void -qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable) +static int +qca8k_set_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); @@ -646,20 +646,12 @@ qca8k_eee_enable_set(struct dsa_switch *ds, int port, bool enable) mutex_lock(&priv->reg_mutex); reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL); - if (enable) + if (eee->eee_enabled) reg |= lpi_en; else reg &= ~lpi_en; qca8k_write(priv, QCA8K_REG_EEE_CTRL, reg); mutex_unlock(&priv->reg_mutex); -} - -static int -qca8k_set_eee(struct dsa_switch *ds, int port, - struct phy_device *phydev, - struct ethtool_eee *e) -{ - qca8k_eee_enable_set(ds, port, e->eee_enabled); return 0; } diff --git a/include/net/dsa.h b/include/net/dsa.h index 88da272d20d0..ce46db323394 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -335,7 +335,6 @@ struct dsa_switch_ops { * EEE setttings */ int (*set_eee)(struct dsa_switch *ds, int port, - struct phy_device *phydev, struct ethtool_eee *e); int (*get_eee)(struct dsa_switch *ds, int port, struct ethtool_eee *e); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ad5caaf384d7..9ddc584e70b0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -655,7 +655,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) if (!ds->ops->set_eee) return -EOPNOTSUPP; - ret = ds->ops->set_eee(ds, p->dp->index, p->phy, e); + ret = ds->ops->set_eee(ds, p->dp->index, e); if (ret) return ret; -- cgit v1.2.3-55-g7522 From 5480db6985640a44ff904d4b6ef7ec668b785ec2 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:40 -0400 Subject: net: dsa: mv88e6xxx: remove EEE support The PHY's EEE settings are already accessed by the DSA layer through the Marvell PHY driver and there is nothing to be done for switch's MACs. Remove all EEE support from the mv88e6xxx driver and simply return 0 from the EEE ops. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 74 ++----------------------------- drivers/net/dsa/mv88e6xxx/chip.h | 6 --- drivers/net/dsa/mv88e6xxx/phy.c | 96 ---------------------------------------- drivers/net/dsa/mv88e6xxx/phy.h | 22 --------- drivers/net/dsa/mv88e6xxx/port.c | 17 ------- drivers/net/dsa/mv88e6xxx/port.h | 3 -- 6 files changed, 4 insertions(+), 214 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index aaa96487f21f..aa0c5493fb9d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -810,56 +810,18 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int mv88e6xxx_energy_detect_read(struct mv88e6xxx_chip *chip, int port, - struct ethtool_eee *eee) -{ - int err; - - if (!chip->info->ops->phy_energy_detect_read) - return -EOPNOTSUPP; - - /* assign eee->eee_enabled and eee->tx_lpi_enabled */ - err = chip->info->ops->phy_energy_detect_read(chip, port, eee); - if (err) - return err; - - /* assign eee->eee_active */ - return mv88e6xxx_port_status_eee(chip, port, eee); -} - -static int mv88e6xxx_energy_detect_write(struct mv88e6xxx_chip *chip, int port, - struct ethtool_eee *eee) -{ - if (!chip->info->ops->phy_energy_detect_write) - return -EOPNOTSUPP; - - return chip->info->ops->phy_energy_detect_write(chip, port, eee); -} - static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_energy_detect_read(chip, port, e); - mutex_unlock(&chip->reg_lock); - - return err; + /* Nothing to do on the port's MAC */ + return 0; } static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_energy_detect_write(chip, port, e); - mutex_unlock(&chip->reg_lock); - - return err; + /* Nothing to do on the port's MAC */ + return 0; } static u16 mv88e6xxx_port_vlan(struct mv88e6xxx_chip *chip, int dev, int port) @@ -2521,8 +2483,6 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2648,8 +2608,6 @@ static const struct mv88e6xxx_ops mv88e6172_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -2719,8 +2677,6 @@ static const struct mv88e6xxx_ops mv88e6176_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -2784,8 +2740,6 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2821,8 +2775,6 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2858,8 +2810,6 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2895,8 +2845,6 @@ static const struct mv88e6xxx_ops mv88e6240_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -2933,8 +2881,6 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -2971,8 +2917,6 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3006,8 +2950,6 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_speed = mv88e6185_port_set_speed, @@ -3039,8 +2981,6 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -3142,8 +3082,6 @@ static const struct mv88e6xxx_ops mv88e6352_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6352_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6352_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay, @@ -3180,8 +3118,6 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, @@ -3220,8 +3156,6 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .set_switch_mac = mv88e6xxx_g2_set_switch_mac, .phy_read = mv88e6xxx_g2_smi_phy_read, .phy_write = mv88e6xxx_g2_smi_phy_write, - .phy_energy_detect_read = mv88e6390_phy_energy_detect_read, - .phy_energy_detect_write = mv88e6390_phy_energy_detect_write, .port_set_link = mv88e6xxx_port_set_link, .port_set_duplex = mv88e6xxx_port_set_duplex, .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay, diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index 9111e1316250..334f6f7544ba 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -239,12 +239,6 @@ struct mv88e6xxx_ops { struct mii_bus *bus, int addr, int reg, u16 val); - /* Copper Energy Detect operations */ - int (*phy_energy_detect_read)(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee); - int (*phy_energy_detect_write)(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee); - /* Priority Override Table operations */ int (*pot_clear)(struct mv88e6xxx_chip *chip); diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 317ae89cfa68..436668bd50dc 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -246,99 +246,3 @@ int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip) { return mv88e6xxx_phy_ppu_enable(chip); } - -/* Page 0, Register 16: Copper Specific Control Register 1 */ - -int mv88e6352_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee) -{ - u16 val; - int err; - - err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); - if (err) - return err; - - val &= MV88E6352_PHY_CSCTL1_ENERGY_DETECT_MASK; - - eee->eee_enabled = false; - eee->tx_lpi_enabled = false; - - switch (val) { - case MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP: - eee->tx_lpi_enabled = true; - /* fall through... */ - case MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV: - eee->eee_enabled = true; - } - - return 0; -} - -int mv88e6352_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee) -{ - u16 val; - int err; - - err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); - if (err) - return err; - - val &= ~MV88E6352_PHY_CSCTL1_ENERGY_DETECT_MASK; - - if (eee->eee_enabled) - val |= MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV; - if (eee->tx_lpi_enabled) - val |= MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP; - - return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_CSCTL1, val); -} - -int mv88e6390_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee) -{ - u16 val; - int err; - - err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); - if (err) - return err; - - val &= MV88E6390_PHY_CSCTL1_ENERGY_DETECT_MASK; - - eee->eee_enabled = false; - eee->tx_lpi_enabled = false; - - switch (val) { - case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_AUTO: - case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_SW: - eee->tx_lpi_enabled = true; - /* fall through... */ - case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_AUTO: - case MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_SW: - eee->eee_enabled = true; - } - - return 0; -} - -int mv88e6390_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee) -{ - u16 val; - int err; - - err = mv88e6xxx_phy_read(chip, phy, MV88E6XXX_PHY_CSCTL1, &val); - if (err) - return err; - - val &= ~MV88E6390_PHY_CSCTL1_ENERGY_DETECT_MASK; - - if (eee->eee_enabled) - val |= MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_AUTO; - if (eee->tx_lpi_enabled) - val |= MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_AUTO; - - return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_CSCTL1, val); -} diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h index 988802799ad6..556b74a0502a 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.h +++ b/drivers/net/dsa/mv88e6xxx/phy.h @@ -17,19 +17,6 @@ #define MV88E6XXX_PHY_PAGE 0x16 #define MV88E6XXX_PHY_PAGE_COPPER 0x00 -/* Page 0, Register 16: Copper Specific Control Register 1 */ -#define MV88E6XXX_PHY_CSCTL1 16 -#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_MASK 0x0300 -#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_OFF_MASK 0x0100 /* 0x */ -#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV 0x0200 -#define MV88E6352_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP 0x0300 -#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_MASK 0x0380 -#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_OFF_MASK 0x0180 /* 0xx */ -#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_AUTO 0x0200 -#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_RCV_SW 0x0280 -#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_AUTO 0x0300 -#define MV88E6390_PHY_CSCTL1_ENERGY_DETECT_SENSE_NLP_SW 0x0380 - /* PHY Registers accesses implementations */ int mv88e6165_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, int addr, int reg, u16 *val); @@ -53,13 +40,4 @@ void mv88e6xxx_phy_init(struct mv88e6xxx_chip *chip); void mv88e6xxx_phy_destroy(struct mv88e6xxx_chip *chip); int mv88e6xxx_phy_setup(struct mv88e6xxx_chip *chip); -int mv88e6352_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee); -int mv88e6352_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee); -int mv88e6390_phy_energy_detect_read(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee); -int mv88e6390_phy_energy_detect_write(struct mv88e6xxx_chip *chip, int phy, - struct ethtool_eee *eee); - #endif /*_MV88E6XXX_PHY_H */ diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index 2837a9128557..a7801f6668a5 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -35,23 +35,6 @@ int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, return mv88e6xxx_write(chip, addr, reg, val); } -/* Offset 0x00: Port Status Register */ - -int mv88e6xxx_port_status_eee(struct mv88e6xxx_chip *chip, int port, - struct ethtool_eee *eee) -{ - u16 val; - int err; - - err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val); - if (err) - return err; - - eee->eee_active = !!(val & MV88E6352_PORT_STS_EEE); - - return 0; -} - /* Offset 0x01: MAC (or PCS or Physical) Control Register * * Link, Duplex and Flow Control have one force bit, one value bit. diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 6fcab309cd85..b16d5f0e6e9c 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -241,9 +241,6 @@ int mv88e6xxx_port_read(struct mv88e6xxx_chip *chip, int port, int reg, int mv88e6xxx_port_write(struct mv88e6xxx_chip *chip, int port, int reg, u16 val); -int mv88e6xxx_port_status_eee(struct mv88e6xxx_chip *chip, int port, - struct ethtool_eee *eee); - int mv88e6352_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6390_port_set_rgmii_delay(struct mv88e6xxx_chip *chip, int port, -- cgit v1.2.3-55-g7522 From 08f500610f39809c107f206cba1f799c98c38054 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 1 Aug 2017 16:32:41 -0400 Subject: net: dsa: rename switch EEE ops To avoid confusion with the PHY EEE settings, rename the .set_eee and .get_eee ops to respectively .set_mac_eee and .get_mac_eee. Signed-off-by: Vivien Didelot Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 12 ++++++------ drivers/net/dsa/mv88e6xxx/chip.c | 12 ++++++------ drivers/net/dsa/qca8k.c | 9 ++++----- include/net/dsa.h | 10 +++++----- net/dsa/slave.c | 8 ++++---- 5 files changed, 25 insertions(+), 26 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index ce886345d8d2..6bbfa6ea1efb 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -338,8 +338,8 @@ static int bcm_sf2_eee_init(struct dsa_switch *ds, int port, return 1; } -static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int bcm_sf2_sw_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; @@ -352,8 +352,8 @@ static int bcm_sf2_sw_get_eee(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_sw_set_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int bcm_sf2_sw_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_eee *p = &priv->port_sts[port].eee; @@ -1011,8 +1011,8 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .set_wol = bcm_sf2_sw_set_wol, .port_enable = bcm_sf2_port_setup, .port_disable = bcm_sf2_port_disable, - .get_eee = bcm_sf2_sw_get_eee, - .set_eee = bcm_sf2_sw_set_eee, + .get_mac_eee = bcm_sf2_sw_get_mac_eee, + .set_mac_eee = bcm_sf2_sw_set_mac_eee, .port_bridge_join = b53_br_join, .port_bridge_leave = b53_br_leave, .port_stp_state_set = b53_br_set_stp_state, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index aa0c5493fb9d..521738c4cd17 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -810,15 +810,15 @@ static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port, mutex_unlock(&chip->reg_lock); } -static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int mv88e6xxx_get_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { /* Nothing to do on the port's MAC */ return 0; } -static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +static int mv88e6xxx_set_mac_eee(struct dsa_switch *ds, int port, + struct ethtool_eee *e) { /* Nothing to do on the port's MAC */ return 0; @@ -3890,8 +3890,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .get_sset_count = mv88e6xxx_get_sset_count, .port_enable = mv88e6xxx_port_enable, .port_disable = mv88e6xxx_port_disable, - .set_eee = mv88e6xxx_set_eee, - .get_eee = mv88e6xxx_get_eee, + .get_mac_eee = mv88e6xxx_get_mac_eee, + .set_mac_eee = mv88e6xxx_set_mac_eee, .get_eeprom_len = mv88e6xxx_get_eeprom_len, .get_eeprom = mv88e6xxx_get_eeprom, .set_eeprom = mv88e6xxx_set_eeprom, diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index e209e229ed4c..36c169b0c705 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -638,7 +638,7 @@ qca8k_get_sset_count(struct dsa_switch *ds) } static int -qca8k_set_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) +qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u32 lpi_en = QCA8K_REG_EEE_CTRL_LPI_EN(port); @@ -657,8 +657,7 @@ qca8k_set_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee) } static int -qca8k_get_eee(struct dsa_switch *ds, int port, - struct ethtool_eee *e) +qca8k_get_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e) { /* Nothing to do on the port's MAC */ return 0; @@ -863,8 +862,8 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .phy_write = qca8k_phy_write, .get_ethtool_stats = qca8k_get_ethtool_stats, .get_sset_count = qca8k_get_sset_count, - .get_eee = qca8k_get_eee, - .set_eee = qca8k_set_eee, + .get_mac_eee = qca8k_get_mac_eee, + .set_mac_eee = qca8k_set_mac_eee, .port_enable = qca8k_port_enable, .port_disable = qca8k_port_disable, .port_stp_state_set = qca8k_port_stp_state_set, diff --git a/include/net/dsa.h b/include/net/dsa.h index ce46db323394..0b1a0622b33c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -332,12 +332,12 @@ struct dsa_switch_ops { struct phy_device *phy); /* - * EEE setttings + * Port's MAC EEE settings */ - int (*set_eee)(struct dsa_switch *ds, int port, - struct ethtool_eee *e); - int (*get_eee)(struct dsa_switch *ds, int port, - struct ethtool_eee *e); + int (*set_mac_eee)(struct dsa_switch *ds, int port, + struct ethtool_eee *e); + int (*get_mac_eee)(struct dsa_switch *ds, int port, + struct ethtool_eee *e); /* EEPROM access */ int (*get_eeprom_len)(struct dsa_switch *ds); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 9ddc584e70b0..cc4bad3dadb4 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -652,10 +652,10 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) if (!p->phy) return -ENODEV; - if (!ds->ops->set_eee) + if (!ds->ops->set_mac_eee) return -EOPNOTSUPP; - ret = ds->ops->set_eee(ds, p->dp->index, e); + ret = ds->ops->set_mac_eee(ds, p->dp->index, e); if (ret) return ret; @@ -678,10 +678,10 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) if (!p->phy) return -ENODEV; - if (!ds->ops->get_eee) + if (!ds->ops->get_mac_eee) return -EOPNOTSUPP; - ret = ds->ops->get_eee(ds, p->dp->index, e); + ret = ds->ops->get_mac_eee(ds, p->dp->index, e); if (ret) return ret; -- cgit v1.2.3-55-g7522 From ec9567a9e008d1248e4d88f7ff1026ba68133621 Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Tue, 1 Aug 2017 12:49:04 +0300 Subject: esp4: Support RX checksum with crypto offload Keep the device's reported ip_summed indication in case crypto was offloaded by the device. Subtract the csum values of the stripped parts (esp header+iv, esp trailer+auth_data) to keep value correct. Note: CHECKSUM_COMPLETE should be indicated only if skb->csum has the post-decryption offload csum value. Signed-off-by: Ariel Levkovich Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- net/ipv4/esp4.c | 14 +++++++++++--- net/ipv4/esp4_offload.c | 4 +++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 0cbee0a666ff..741acd7b9646 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -510,7 +510,8 @@ int esp_input_done2(struct sk_buff *skb, int err) int elen = skb->len - hlen; int ihl; u8 nexthdr[2]; - int padlen; + int padlen, trimlen; + __wsum csumdiff; if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) kfree(ESP_SKB_CB(skb)->tmp); @@ -568,8 +569,15 @@ int esp_input_done2(struct sk_buff *skb, int err) skb->ip_summed = CHECKSUM_UNNECESSARY; } - pskb_trim(skb, skb->len - alen - padlen - 2); - __skb_pull(skb, hlen); + trimlen = alen + padlen + 2; + if (skb->ip_summed == CHECKSUM_COMPLETE) { + csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); + skb->csum = csum_block_sub(skb->csum, csumdiff, + skb->len - trimlen); + } + pskb_trim(skb, skb->len - trimlen); + + skb_pull_rcsum(skb, hlen); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index e0666016a764..05831dea00f4 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -182,11 +182,13 @@ out: static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) { struct crypto_aead *aead = x->data; + struct xfrm_offload *xo = xfrm_offload(skb); if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) return -EINVAL; - skb->ip_summed = CHECKSUM_NONE; + if (!(xo->flags & CRYPTO_DONE)) + skb->ip_summed = CHECKSUM_NONE; return esp_input_done2(skb, 0); } -- cgit v1.2.3-55-g7522 From e51a64727079f46fc3a99f380de384d5ab01fffa Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Tue, 1 Aug 2017 12:49:05 +0300 Subject: esp6: Support RX checksum with crypto offload Keep the device's reported ip_summed indication in case crypto was offloaded by the device. Subtract the csum values of the stripped parts (esp header+iv, esp trailer+auth_data) to keep value correct. Note: CHECKSUM_COMPLETE should be indicated only if skb->csum has the post-decryption offload csum value. Signed-off-by: Ariel Levkovich Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- net/ipv6/esp6.c | 14 +++++++++++--- net/ipv6/esp6_offload.c | 4 +++- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 9ed35473dcb5..0ca1db62e381 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -470,7 +470,8 @@ int esp6_input_done2(struct sk_buff *skb, int err) int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); int elen = skb->len - hlen; int hdr_len = skb_network_header_len(skb); - int padlen; + int padlen, trimlen; + __wsum csumdiff; u8 nexthdr[2]; if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) @@ -492,8 +493,15 @@ int esp6_input_done2(struct sk_buff *skb, int err) /* ... check padding bits here. Silly. :-) */ - pskb_trim(skb, skb->len - alen - padlen - 2); - __skb_pull(skb, hlen); + trimlen = alen + padlen + 2; + if (skb->ip_summed == CHECKSUM_COMPLETE) { + csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); + skb->csum = csum_block_sub(skb->csum, csumdiff, + skb->len - trimlen); + } + pskb_trim(skb, skb->len - trimlen); + + skb_pull_rcsum(skb, hlen); if (x->props.mode == XFRM_MODE_TUNNEL) skb_reset_transport_header(skb); else diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index f02f131f6435..eec3add177fe 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -209,11 +209,13 @@ out: static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) { struct crypto_aead *aead = x->data; + struct xfrm_offload *xo = xfrm_offload(skb); if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) return -EINVAL; - skb->ip_summed = CHECKSUM_NONE; + if (!(xo->flags & CRYPTO_DONE)) + skb->ip_summed = CHECKSUM_NONE; return esp6_input_done2(skb, 0); } -- cgit v1.2.3-55-g7522 From e9cba69448df7686e1c35e74be48fc715d41ac45 Mon Sep 17 00:00:00 2001 From: Yossi Kuperman Date: Tue, 1 Aug 2017 12:49:06 +0300 Subject: xfrm6: Fix CHECKSUM_COMPLETE after IPv6 header push xfrm6_transport_finish rebuilds the IPv6 header based on the original one and pushes it back without fixing skb->csum. Therefore, CHECKSUM_COMPLETE is no longer valid and the packet gets dropped. Fix skb->csum by calling skb_postpush_rcsum. Note: A valid IPv4 header has checksum 0, unlike IPv6. Thus, the change is not needed in the sibling xfrm4_transport_finish function. Signed-off-by: Yossi Kuperman Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- net/ipv6/xfrm6_input.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index 3ef5d913e7a3..f95943a13abc 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -34,6 +34,7 @@ EXPORT_SYMBOL(xfrm6_rcv_spi); int xfrm6_transport_finish(struct sk_buff *skb, int async) { struct xfrm_offload *xo = xfrm_offload(skb); + int nhlen = skb->data - skb_network_header(skb); skb_network_header(skb)[IP6CB(skb)->nhoff] = XFRM_MODE_SKB_CB(skb)->protocol; @@ -43,8 +44,9 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) return 1; #endif - __skb_push(skb, skb->data - skb_network_header(skb)); + __skb_push(skb, nhlen); ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_postpush_rcsum(skb, skb_network_header(skb), nhlen); if (xo && (xo->flags & XFRM_GRO)) { skb_mac_header_rebuild(skb); -- cgit v1.2.3-55-g7522 From a9b28c2bf05d9d9998d5d3c6453fd75bc4cf8a6d Mon Sep 17 00:00:00 2001 From: Yossi Kuperman Date: Tue, 1 Aug 2017 12:49:07 +0300 Subject: esp6: Fix RX checksum after header pull Both ip6_input_finish (non-GRO) and esp6_gro_receive (GRO) strip the IPv6 header without adjusting skb->csum accordingly. As a result CHECKSUM_COMPLETE breaks and "hw csum failure" is written to the kernel log by netdev_rx_csum_fault (dev.c). Fix skb->csum by substracting the checksum value of the pulled IPv6 header using a call to skb_postpull_rcsum. This affects both transport and tunnel modes. Note that the fix occurs far from the place that the header was pulled. This is based on existing code, see: ipv6_srh_rcv() in exthdrs.c and rawv6_rcv() in raw.c Signed-off-by: Yossi Kuperman Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- net/ipv6/esp6.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 0ca1db62e381..74bde202eb9a 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -495,6 +495,8 @@ int esp6_input_done2(struct sk_buff *skb, int err) trimlen = alen + padlen + 2; if (skb->ip_summed == CHECKSUM_COMPLETE) { + skb_postpull_rcsum(skb, skb_network_header(skb), + skb_network_header_len(skb)); csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); skb->csum = csum_block_sub(skb->csum, csumdiff, skb->len - trimlen); -- cgit v1.2.3-55-g7522 From ffdb5211da1c20354f1b40c204b6cf6c29c68161 Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Tue, 1 Aug 2017 12:49:08 +0300 Subject: xfrm: Auto-load xfrm offload modules IPSec crypto offload depends on the protocol-specific offload module (such as esp_offload.ko). When the user installs an SA with crypto-offload, load the offload module automatically, in the same way that the protocol module is loaded (such as esp.ko) Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 4 +++- net/ipv4/esp4_offload.c | 1 + net/ipv6/esp6_offload.c | 1 + net/xfrm/xfrm_device.c | 2 +- net/xfrm/xfrm_state.c | 16 ++++++++++++---- net/xfrm/xfrm_user.c | 2 +- 6 files changed, 19 insertions(+), 7 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index afb4929d7232..5a360100136c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -43,6 +43,8 @@ MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap)) #define MODULE_ALIAS_XFRM_TYPE(family, proto) \ MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto)) +#define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \ + MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto)) #ifdef CONFIG_XFRM_STATISTICS #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) @@ -1558,7 +1560,7 @@ void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); int xfrm_init_replay(struct xfrm_state *x); int xfrm_state_mtu(struct xfrm_state *x, int mtu); -int __xfrm_init_state(struct xfrm_state *x, bool init_replay); +int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); int xfrm_init_state(struct xfrm_state *x); int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb); int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index 05831dea00f4..aca1c85f0795 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -305,3 +305,4 @@ module_init(esp4_offload_init); module_exit(esp4_offload_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert "); +MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index eec3add177fe..8d4e2ba9163d 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -334,3 +334,4 @@ module_init(esp6_offload_init); module_exit(esp6_offload_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert "); +MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP); diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 5cd7a244e88d..1904127f5fb8 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -63,7 +63,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, xfrm_address_t *daddr; if (!x->type_offload) - return 0; + return -EINVAL; /* We don't yet support UDP encapsulation, TFC padding and ESN. */ if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN)) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 82cbbce69b79..a41e2ef789c0 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -296,12 +296,14 @@ int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, } EXPORT_SYMBOL(xfrm_unregister_type_offload); -static const struct xfrm_type_offload *xfrm_get_type_offload(u8 proto, unsigned short family) +static const struct xfrm_type_offload * +xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) { struct xfrm_state_afinfo *afinfo; const struct xfrm_type_offload **typemap; const struct xfrm_type_offload *type; +retry: afinfo = xfrm_state_get_afinfo(family); if (unlikely(afinfo == NULL)) return NULL; @@ -311,6 +313,12 @@ static const struct xfrm_type_offload *xfrm_get_type_offload(u8 proto, unsigned if ((type && !try_module_get(type->owner))) type = NULL; + if (!type && try_load) { + request_module("xfrm-offload-%d-%d", family, proto); + try_load = 0; + goto retry; + } + rcu_read_unlock(); return type; } @@ -2165,7 +2173,7 @@ int xfrm_state_mtu(struct xfrm_state *x, int mtu) return mtu - x->props.header_len; } -int __xfrm_init_state(struct xfrm_state *x, bool init_replay) +int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) { struct xfrm_state_afinfo *afinfo; struct xfrm_mode *inner_mode; @@ -2230,7 +2238,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) if (x->type == NULL) goto error; - x->type_offload = xfrm_get_type_offload(x->id.proto, family); + x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload); err = x->type->init_state(x); if (err) @@ -2258,7 +2266,7 @@ EXPORT_SYMBOL(__xfrm_init_state); int xfrm_init_state(struct xfrm_state *x) { - return __xfrm_init_state(x, true); + return __xfrm_init_state(x, true, false); } EXPORT_SYMBOL(xfrm_init_state); diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 1b539b7dcfab..ffe8d5ef09eb 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -584,7 +584,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, xfrm_mark_get(attrs, &x->mark); - err = __xfrm_init_state(x, false); + err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); if (err) goto error; -- cgit v1.2.3-55-g7522 From 7e9e9202bccc3a8224ae10ad5d69cac8627f9c7b Mon Sep 17 00:00:00 2001 From: Ilan Tayari Date: Tue, 1 Aug 2017 12:49:09 +0300 Subject: xfrm: Clear RX SKB secpath xfrm_offload If an incoming packet undergoes XFRM crypto-offload, its secpath is filled with xfrm_offload struct denoting offload information. If the SKB is then forwarded to a device which supports crypto- offload, the stack wrongfully attempts to offload it (even though the output SA may not exist on the device) due to the leftover secpath xo. Clear the ingress xo by zeroizing secpath->olen just before delivering the decapsulated packet to the network stack. Fixes: d77e38e612a0 ("xfrm: Add an IPsec hardware offloading API") Signed-off-by: Ilan Tayari Signed-off-by: Steffen Klassert --- net/xfrm/xfrm_input.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 923205e279f7..f07eec59dcae 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -424,6 +424,7 @@ resume: nf_reset(skb); if (decaps) { + skb->sp->olen = 0; skb_dst_drop(skb); gro_cells_receive(&gro_cells, skb); return 0; @@ -434,6 +435,7 @@ resume: err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async); if (xfrm_gro) { + skb->sp->olen = 0; skb_dst_drop(skb); gro_cells_receive(&gro_cells, skb); return err; -- cgit v1.2.3-55-g7522 From f70f250a77313b542531e1ff7a449cd0ccd83ec0 Mon Sep 17 00:00:00 2001 From: Steffen Klassert Date: Tue, 1 Aug 2017 12:49:10 +0300 Subject: net: Allow IPsec GSO for local sockets This patch allows local sockets to make use of XFRM GSO code path. Signed-off-by: Steffen Klassert Signed-off-by: Ilan Tayari --- include/net/xfrm.h | 19 +++++++++++++++++++ net/core/sock.c | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 5a360100136c..18d7de34a5c3 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1858,6 +1858,20 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); +static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) +{ + struct xfrm_state *x = dst->xfrm; + + if (!x || !x->type_offload) + return false; + + if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) && + !dst->child->xfrm) + return true; + + return false; +} + static inline void xfrm_dev_state_delete(struct xfrm_state *x) { struct xfrm_state_offload *xso = &x->xso; @@ -1900,6 +1914,11 @@ static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x { return false; } + +static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) +{ + return false; +} #endif static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) diff --git a/net/core/sock.c b/net/core/sock.c index 742f68c9c84a..564f835f408a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1757,7 +1757,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; sk->sk_route_caps &= ~sk->sk_route_nocaps; if (sk_can_gso(sk)) { - if (dst->header_len) { + if (dst->header_len && !xfrm_dst_offload_ok(dst)) { sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; -- cgit v1.2.3-55-g7522 From 5b9ccdcb98429b7e5c814772e3d9448c76441d87 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Sat, 29 Jul 2017 20:31:00 +0900 Subject: netfilter: xtables: Remove unused variable in compat_copy_entry_from_user() The target variable is not used in the compat_copy_entry_from_user(). So It can be removed. Signed-off-by: Taehee Yoo Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/arp_tables.c | 2 -- net/ipv4/netfilter/ip_tables.c | 2 -- 2 files changed, 4 deletions(-) diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 0bc3c3d73e61..cf520d30cb94 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -1117,7 +1117,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; - struct xt_target *target; struct arpt_entry *de; unsigned int origsize; int h; @@ -1132,7 +1131,6 @@ compat_copy_entry_from_user(struct compat_arpt_entry *e, void **dstptr, de->target_offset = e->target_offset - (origsize - *size); t = compat_arpt_get_target(e); - target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 2a55a40211cb..f47e8dad5e95 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1355,7 +1355,6 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; - struct xt_target *target; struct ipt_entry *de; unsigned int origsize; int h; @@ -1374,7 +1373,6 @@ compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr, de->target_offset = e->target_offset - (origsize - *size); t = compat_ipt_get_target(e); - target = t->u.kernel.target; xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); -- cgit v1.2.3-55-g7522 From 2a04aabf5c96c9e25df488949b21223bcc623815 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 1 Aug 2017 12:25:01 +0200 Subject: netfilter: constify nf_conntrack_l3/4proto parameters When a nf_conntrack_l3/4proto parameter is not on the left hand side of an assignment, its address is not taken, and it is not passed to a function that may modify its fields, then it can be declared as const. This change is useful from a documentation point of view, and can possibly facilitate making some nf_conntrack_l3/4proto structures const subsequently. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_l4proto.h | 14 +++++++------- include/net/netfilter/nf_conntrack_timeout.h | 2 +- net/netfilter/nf_conntrack_core.c | 8 ++++---- net/netfilter/nf_conntrack_netlink.c | 6 +++--- net/netfilter/nf_conntrack_proto.c | 24 ++++++++++++------------ net/netfilter/nfnetlink_cttimeout.c | 5 +++-- 6 files changed, 30 insertions(+), 29 deletions(-) diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h index 7032e044bbe2..b6e27cafb1d9 100644 --- a/include/net/netfilter/nf_conntrack_l4proto.h +++ b/include/net/netfilter/nf_conntrack_l4proto.h @@ -125,23 +125,23 @@ struct nf_conntrack_l4proto *__nf_ct_l4proto_find(u_int16_t l3proto, struct nf_conntrack_l4proto *nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto); -void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p); +void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p); /* Protocol pernet registration. */ int nf_ct_l4proto_pernet_register_one(struct net *net, - struct nf_conntrack_l4proto *proto); + const struct nf_conntrack_l4proto *proto); void nf_ct_l4proto_pernet_unregister_one(struct net *net, - struct nf_conntrack_l4proto *proto); + const struct nf_conntrack_l4proto *proto); int nf_ct_l4proto_pernet_register(struct net *net, - struct nf_conntrack_l4proto *proto[], + struct nf_conntrack_l4proto *const proto[], unsigned int num_proto); void nf_ct_l4proto_pernet_unregister(struct net *net, - struct nf_conntrack_l4proto *proto[], - unsigned int num_proto); + struct nf_conntrack_l4proto *const proto[], + unsigned int num_proto); /* Protocol global registration. */ int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *proto); -void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *proto); +void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto); int nf_ct_l4proto_register(struct nf_conntrack_l4proto *proto[], unsigned int num_proto); void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *proto[], diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index d40b89355fdd..b222957062b5 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -68,7 +68,7 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct, static inline unsigned int * nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l4proto *l4proto) { #ifdef CONFIG_NF_CONNTRACK_TIMEOUT struct nf_conn_timeout *timeout_ext; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 2bc499186186..f2f00eaf217d 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1176,8 +1176,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free); static noinline struct nf_conntrack_tuple_hash * init_conntrack(struct net *net, struct nf_conn *tmpl, const struct nf_conntrack_tuple *tuple, - struct nf_conntrack_l3proto *l3proto, - struct nf_conntrack_l4proto *l4proto, + const struct nf_conntrack_l3proto *l3proto, + const struct nf_conntrack_l4proto *l4proto, struct sk_buff *skb, unsigned int dataoff, u32 hash) { @@ -1288,8 +1288,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, - struct nf_conntrack_l3proto *l3proto, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l3proto *l3proto, + const struct nf_conntrack_l4proto *l4proto) { const struct nf_conntrack_zone *zone; struct nf_conntrack_tuple tuple; diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 4922c8aefb2a..f4ca48817f66 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -61,8 +61,8 @@ MODULE_LICENSE("GPL"); static char __initdata version[] = "0.93"; static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, - const struct nf_conntrack_tuple *tuple, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_tuple *tuple, + const struct nf_conntrack_l4proto *l4proto) { int ret = 0; struct nlattr *nest_parms; @@ -86,7 +86,7 @@ nla_put_failure: static int ctnetlink_dump_tuples_ip(struct sk_buff *skb, const struct nf_conntrack_tuple *tuple, - struct nf_conntrack_l3proto *l3proto) + const struct nf_conntrack_l3proto *l3proto) { int ret = 0; struct nlattr *nest_parms; diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 7c89dade6fd3..27810cf816a6 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -188,7 +188,7 @@ nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num) } EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get); -void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p) +void nf_ct_l4proto_put(const struct nf_conntrack_l4proto *p) { module_put(p->me); } @@ -257,7 +257,7 @@ void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto) EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister); static struct nf_proto_net *nf_ct_l4proto_net(struct net *net, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l4proto *l4proto) { if (l4proto->get_net_proto) { /* statically built-in protocols use static per-net */ @@ -272,7 +272,7 @@ static struct nf_proto_net *nf_ct_l4proto_net(struct net *net, static int nf_ct_l4proto_register_sysctl(struct net *net, struct nf_proto_net *pn, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l4proto *l4proto) { int err = 0; @@ -295,8 +295,8 @@ int nf_ct_l4proto_register_sysctl(struct net *net, static void nf_ct_l4proto_unregister_sysctl(struct net *net, - struct nf_proto_net *pn, - struct nf_conntrack_l4proto *l4proto) + struct nf_proto_net *pn, + const struct nf_conntrack_l4proto *l4proto) { #ifdef CONFIG_SYSCTL if (pn->ctl_table_header != NULL) @@ -366,7 +366,7 @@ out_unlock: EXPORT_SYMBOL_GPL(nf_ct_l4proto_register_one); int nf_ct_l4proto_pernet_register_one(struct net *net, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l4proto *l4proto) { int ret = 0; struct nf_proto_net *pn = NULL; @@ -391,7 +391,7 @@ out: } EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one); -static void __nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto) +static void __nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto) { BUG_ON(l4proto->l3proto >= ARRAY_SIZE(nf_ct_protos)); @@ -404,7 +404,7 @@ static void __nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto) &nf_conntrack_l4proto_generic); } -void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto) +void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *l4proto) { mutex_lock(&nf_ct_proto_mutex); __nf_ct_l4proto_unregister_one(l4proto); @@ -415,7 +415,7 @@ void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto) EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister_one); void nf_ct_l4proto_pernet_unregister_one(struct net *net, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l4proto *l4proto) { struct nf_proto_net *pn = nf_ct_l4proto_net(net, l4proto); @@ -449,7 +449,7 @@ int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[], EXPORT_SYMBOL_GPL(nf_ct_l4proto_register); int nf_ct_l4proto_pernet_register(struct net *net, - struct nf_conntrack_l4proto *l4proto[], + struct nf_conntrack_l4proto *const l4proto[], unsigned int num_proto) { int ret = -EINVAL; @@ -485,8 +485,8 @@ void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[], EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister); void nf_ct_l4proto_pernet_unregister(struct net *net, - struct nf_conntrack_l4proto *l4proto[], - unsigned int num_proto) + struct nf_conntrack_l4proto *const l4proto[], + unsigned int num_proto) { while (num_proto-- != 0) nf_ct_l4proto_pernet_unregister_one(net, l4proto[num_proto]); diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index f4fb6d4dd0b9..fcabccc99f0d 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -47,7 +47,8 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = { }; static int -ctnl_timeout_parse_policy(void *timeouts, struct nf_conntrack_l4proto *l4proto, +ctnl_timeout_parse_policy(void *timeouts, + const struct nf_conntrack_l4proto *l4proto, struct net *net, const struct nlattr *attr) { int ret = 0; @@ -401,7 +402,7 @@ err: static int cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid, u32 seq, u32 type, int event, - struct nf_conntrack_l4proto *l4proto) + const struct nf_conntrack_l4proto *l4proto) { struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; -- cgit v1.2.3-55-g7522 From 549d2d41c1a448380872858302ee91be5a3ed499 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 1 Aug 2017 12:48:03 +0200 Subject: netfilter: constify nf_loginfo structures The nf_loginfo structures are only passed as the seventh argument to nf_log_trace, which is declared as const or stored in a local const variable. Thus the nf_loginfo structures themselves can be const. Done with the help of Coccinelle. // @r disable optional_qualifier@ identifier i; position p; @@ static struct nf_loginfo i@p = { ... }; @ok1@ identifier r.i; expression list[6] es; position p; @@ nf_log_trace(es,&i@p,...) @ok2@ identifier r.i; const struct nf_loginfo *e; position p; @@ e = &i@p @bad@ position p != {r.p,ok1.p,ok2.p}; identifier r.i; struct nf_loginfo e; @@ e@i@p @depends on !bad disable optional_qualifier@ identifier r.i; @@ static +const struct nf_loginfo i = { ... }; // Signed-off-by: Julia Lawall Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/ip_tables.c | 2 +- net/ipv4/netfilter/nf_log_arp.c | 2 +- net/ipv4/netfilter/nf_log_ipv4.c | 2 +- net/ipv6/netfilter/ip6_tables.c | 2 +- net/ipv6/netfilter/nf_log_ipv6.c | 2 +- net/netfilter/nf_tables_core.c | 2 +- net/netfilter/nfnetlink_log.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index f47e8dad5e95..2aea896f5708 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -151,7 +151,7 @@ static const char *const comments[] = { [NF_IP_TRACE_COMMENT_POLICY] = "policy", }; -static struct nf_loginfo trace_loginfo = { +static const struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c index 2f3895ddc275..df5c2a2061a4 100644 --- a/net/ipv4/netfilter/nf_log_arp.c +++ b/net/ipv4/netfilter/nf_log_arp.c @@ -25,7 +25,7 @@ #include #include -static struct nf_loginfo default_loginfo = { +static const struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c index c83a9963269b..4388de0e5380 100644 --- a/net/ipv4/netfilter/nf_log_ipv4.c +++ b/net/ipv4/netfilter/nf_log_ipv4.c @@ -24,7 +24,7 @@ #include #include -static struct nf_loginfo default_loginfo = { +static const struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 1f90644056ac..9f6644958e5e 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -176,7 +176,7 @@ static const char *const comments[] = { [NF_IP6_TRACE_COMMENT_POLICY] = "policy", }; -static struct nf_loginfo trace_loginfo = { +static const struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c index 97c724224da7..b397a8fe88b9 100644 --- a/net/ipv6/netfilter/nf_log_ipv6.c +++ b/net/ipv6/netfilter/nf_log_ipv6.c @@ -25,7 +25,7 @@ #include #include -static struct nf_loginfo default_loginfo = { +static const struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index c5bab08b0d73..dfd0bf3810d2 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -29,7 +29,7 @@ static const char *const comments[__NFT_TRACETYPE_MAX] = { [NFT_TRACETYPE_RULE] = "rule", }; -static struct nf_loginfo trace_loginfo = { +static const struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index c684ba95dbb4..cad6498f10b0 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -606,7 +606,7 @@ nla_put_failure: return -1; } -static struct nf_loginfo default_loginfo = { +static const struct nf_loginfo default_loginfo = { .type = NF_LOG_TYPE_ULOG, .u = { .ulog = { -- cgit v1.2.3-55-g7522 From 956a25c9f120343a9b6ab6564539158ec7237181 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 31 Jul 2017 10:30:54 -0700 Subject: hyperv: netvsc: Neaten netvsc_send_pkt by using a temporary Repeated dereference of nvmsg.msg.v1_msg.send_rndis_pkt can be shortened by using a temporary. Do so. No change in object code. Miscellanea: o Use * const for rpkt and nvchan Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index c64934c64dca..9598220b3bcc 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -747,8 +747,10 @@ static inline int netvsc_send_pkt( struct sk_buff *skb) { struct nvsp_message nvmsg; - struct netvsc_channel *nvchan - = &net_device->chan_table[packet->q_idx]; + struct nvsp_1_message_send_rndis_packet * const rpkt = + &nvmsg.msg.v1_msg.send_rndis_pkt; + struct netvsc_channel * const nvchan = + &net_device->chan_table[packet->q_idx]; struct vmbus_channel *out_channel = nvchan->channel; struct net_device *ndev = hv_get_drvdata(device); struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); @@ -757,21 +759,16 @@ static inline int netvsc_send_pkt( u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound); nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; - if (skb != NULL) { - /* 0 is RMC_DATA; */ - nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 0; - } else { - /* 1 is RMC_CONTROL; */ - nvmsg.msg.v1_msg.send_rndis_pkt.channel_type = 1; - } + if (skb) + rpkt->channel_type = 0; /* 0 is RMC_DATA */ + else + rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_index = - packet->send_buf_index; + rpkt->send_buf_section_index = packet->send_buf_index; if (packet->send_buf_index == NETVSC_INVALID_INDEX) - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0; + rpkt->send_buf_section_size = 0; else - nvmsg.msg.v1_msg.send_rndis_pkt.send_buf_section_size = - packet->total_data_buflen; + rpkt->send_buf_section_size = packet->total_data_buflen; req_id = (ulong)skb; -- cgit v1.2.3-55-g7522 From aa6c16c43b73b0e4042d84fece85111da1a8087f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 1 Aug 2017 13:50:56 +0200 Subject: net: bcmgenet: drop COMPILE_TEST dependency The last patch added the dependency on 'OF && HAS_IOMEM' but left COMPILE_TEST as an alternative, which kind of defeats the purpose of adding the dependency, we still get randconfig build warnings: warning: (NET_DSA_BCM_SF2 && BCMGENET) selects MDIO_BCM_UNIMAC which has unmet direct dependencies (NETDEVICES && MDIO_BUS && HAS_IOMEM && OF_MDIO) For compile-testing purposes, we don't really need this anyway, as CONFIG_OF can be enabled on all architectures, and HAS_IOMEM is present on all architectures we do meaningful compile-testing on (the exception being arch/um). This makes both OF and HAS_IOMEM hard dependencies. Fixes: 5af74bb4fcf8 ("net: bcmgenet: Add dependency on HAS_IOMEM && OF") Signed-off-by: Arnd Bergmann Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 45775399cab6..1456cb18f830 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -61,7 +61,7 @@ config BCM63XX_ENET config BCMGENET tristate "Broadcom GENET internal MAC support" - depends on (OF && HAS_IOMEM) || COMPILE_TEST + depends on OF && HAS_IOMEM select MII select PHYLIB select FIXED_PHY -- cgit v1.2.3-55-g7522 From 5357f0bd4edf0b351f2cd98a57386b0324734f67 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 1 Aug 2017 07:02:44 -0700 Subject: tcp: tcp_data_queue() cleanup Commit c13ee2a4f03f ("tcp: reindent two spots after prequeue removal") removed code in tcp_data_queue(). We can go a little farther, removing an always true test, and removing initializers for fragstolen and eaten variables. Signed-off-by: Eric Dumazet Cc: Florian Westphal Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index af0a98d54b62..df670d7ed98d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4564,8 +4564,8 @@ err: static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); - bool fragstolen = false; - int eaten = -1; + bool fragstolen; + int eaten; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) { __kfree_skb(skb); @@ -4588,12 +4588,11 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) /* Ok. In sequence. In window. */ queue_and_out: - if (eaten < 0) { - if (skb_queue_len(&sk->sk_receive_queue) == 0) - sk_forced_mem_schedule(sk, skb->truesize); - else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) - goto drop; - } + if (skb_queue_len(&sk->sk_receive_queue) == 0) + sk_forced_mem_schedule(sk, skb->truesize); + else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) + goto drop; + eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); if (skb->len) -- cgit v1.2.3-55-g7522 From b2f9d432deebab5096aad5942c2f2b1ec2865f5a Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Tue, 1 Aug 2017 13:18:09 -0700 Subject: flow_dissector: remove unused functions They are introduced by commit f70ea018da06 ("net: Add functions to get skb->hash based on flow structures") but never gets used in tree. Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/linux/skbuff.h | 16 ---------------- net/core/flow_dissector.c | 45 --------------------------------------------- 2 files changed, 61 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 6f9f1b2715ec..be76082f48aa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1158,8 +1158,6 @@ static inline __u32 skb_get_hash(struct sk_buff *skb) return skb->hash; } -__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6); - static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) { if (!skb->l4_hash && !skb->sw_hash) { @@ -1172,20 +1170,6 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 return skb->hash; } -__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl); - -static inline __u32 skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4) -{ - if (!skb->l4_hash && !skb->sw_hash) { - struct flow_keys keys; - __u32 hash = __get_hash_from_flowi4(fl4, &keys); - - __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); - } - - return skb->hash; -} - __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index fc5fc4594c90..0cc672aba1f0 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -998,51 +998,6 @@ __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) } EXPORT_SYMBOL(skb_get_hash_perturb); -__u32 __skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6 *fl6) -{ - struct flow_keys keys; - - memset(&keys, 0, sizeof(keys)); - - memcpy(&keys.addrs.v6addrs.src, &fl6->saddr, - sizeof(keys.addrs.v6addrs.src)); - memcpy(&keys.addrs.v6addrs.dst, &fl6->daddr, - sizeof(keys.addrs.v6addrs.dst)); - keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; - keys.ports.src = fl6->fl6_sport; - keys.ports.dst = fl6->fl6_dport; - keys.keyid.keyid = fl6->fl6_gre_key; - keys.tags.flow_label = (__force u32)fl6->flowlabel; - keys.basic.ip_proto = fl6->flowi6_proto; - - __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), - flow_keys_have_l4(&keys)); - - return skb->hash; -} -EXPORT_SYMBOL(__skb_get_hash_flowi6); - -__u32 __skb_get_hash_flowi4(struct sk_buff *skb, const struct flowi4 *fl4) -{ - struct flow_keys keys; - - memset(&keys, 0, sizeof(keys)); - - keys.addrs.v4addrs.src = fl4->saddr; - keys.addrs.v4addrs.dst = fl4->daddr; - keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; - keys.ports.src = fl4->fl4_sport; - keys.ports.dst = fl4->fl4_dport; - keys.keyid.keyid = fl4->fl4_gre_key; - keys.basic.ip_proto = fl4->flowi4_proto; - - __skb_set_sw_hash(skb, flow_hash_from_keys(&keys), - flow_keys_have_l4(&keys)); - - return skb->hash; -} -EXPORT_SYMBOL(__skb_get_hash_flowi4); - u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen) { -- cgit v1.2.3-55-g7522 From f613ed665bb3ec49edc4907bd8799e3a2de47df5 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 1 Aug 2017 15:00:36 -0700 Subject: net: dsa: Add support for 64-bit statistics DSA slave network devices maintain a pair of bytes and packets counters for each directions, but these are not 64-bit capable. Re-use pcpu_sw_netstats which contains exactly what we need for that purpose and update the code path to report 64-bit capable statistics. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 8 ++++++-- net/dsa/dsa_priv.h | 2 ++ net/dsa/slave.c | 38 +++++++++++++++++++++++++++++++------- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index a55e2e4087a4..0ba842c08dd3 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -190,6 +190,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, { struct dsa_switch_tree *dst = dev->dsa_ptr; struct sk_buff *nskb = NULL; + struct dsa_slave_priv *p; if (unlikely(dst == NULL)) { kfree_skb(skb); @@ -207,12 +208,15 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, } skb = nskb; + p = netdev_priv(skb->dev); skb_push(skb, ETH_HLEN); skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); - skb->dev->stats.rx_packets++; - skb->dev->stats.rx_bytes += skb->len; + u64_stats_update_begin(&p->stats64.syncp); + p->stats64.rx_packets++; + p->stats64.rx_bytes += skb->len; + u64_stats_update_end(&p->stats64.syncp); netif_receive_skb(skb); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 55982cc39b24..7aa0656296c2 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -77,6 +77,8 @@ struct dsa_slave_priv { struct sk_buff * (*xmit)(struct sk_buff *skb, struct net_device *dev); + struct pcpu_sw_netstats stats64; + /* DSA port data, such as switch, port index, etc. */ struct dsa_port *dp; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index cc4bad3dadb4..e196562035b1 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -354,8 +354,10 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) struct dsa_slave_priv *p = netdev_priv(dev); struct sk_buff *nskb; - dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + u64_stats_update_begin(&p->stats64.syncp); + p->stats64.tx_packets++; + p->stats64.tx_bytes += skb->len; + u64_stats_update_end(&p->stats64.syncp); /* Transmit function may have to reallocate the original SKB, * in which case it must have freed it. Only free it here on error. @@ -594,11 +596,15 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->dp->ds; - - data[0] = dev->stats.tx_packets; - data[1] = dev->stats.tx_bytes; - data[2] = dev->stats.rx_packets; - data[3] = dev->stats.rx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&p->stats64.syncp); + data[0] = p->stats64.tx_packets; + data[1] = p->stats64.tx_bytes; + data[2] = p->stats64.rx_packets; + data[3] = p->stats64.rx_bytes; + } while (u64_stats_fetch_retry_irq(&p->stats64.syncp, start)); if (ds->ops->get_ethtool_stats) ds->ops->get_ethtool_stats(ds, p->dp->index, data + 4); } @@ -869,6 +875,22 @@ static int dsa_slave_setup_tc(struct net_device *dev, u32 handle, } } +static void dsa_slave_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + unsigned int start; + + netdev_stats_to_stats64(stats, &dev->stats); + do { + start = u64_stats_fetch_begin_irq(&p->stats64.syncp); + stats->tx_packets = p->stats64.tx_packets; + stats->tx_bytes = p->stats64.tx_bytes; + stats->rx_packets = p->stats64.rx_packets; + stats->rx_bytes = p->stats64.rx_bytes; + } while (u64_stats_fetch_retry_irq(&p->stats64.syncp, start)); +} + void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops) { ops->get_sset_count = dsa_cpu_port_get_sset_count; @@ -944,6 +966,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_bridge_dellink = switchdev_port_bridge_dellink, .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, .ndo_setup_tc = dsa_slave_setup_tc, + .ndo_get_stats64 = dsa_slave_get_stats64, }; static const struct switchdev_ops dsa_slave_switchdev_ops = { @@ -1179,6 +1202,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); + u64_stats_init(&p->stats64.syncp); p->dp = &ds->ports[port]; INIT_LIST_HEAD(&p->mall_tc_list); p->xmit = dst->tag_ops->xmit; -- cgit v1.2.3-55-g7522 From cf19a8c3d515d8c9d88d804437c6ac291eeaa2aa Mon Sep 17 00:00:00 2001 From: Derek Chickles Date: Tue, 1 Aug 2017 15:05:07 -0700 Subject: liquidio: set sriov_totalvfs correctly The file /sys/devices/pci000.../sriov_totalvfs is showing a wrong value. Fix it by calling pci_sriov_set_totalvfs() to set the total number of VFs available after calculations for the number of PF and VF queues are made. Signed-off-by: Derek Chickles Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 1d8fefa9ce64..39a8dca35ffa 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1825,6 +1825,11 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) case OCTEON_CN23XX_PCIID_PF: oct->chip_id = OCTEON_CN23XX_PF_VID; ret = setup_cn23xx_octeon_pf_device(oct); +#ifdef CONFIG_PCI_IOV + if (!ret) + pci_sriov_set_totalvfs(oct->pci_dev, + oct->sriov_info.max_vfs); +#endif s = "CN23XX"; break; -- cgit v1.2.3-55-g7522 From 444826a99f2cc339d53581e4e5f014282e4d1147 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Tue, 1 Aug 2017 19:57:38 -0400 Subject: atm: adummy: constify attribute_group structure Functions working with attribute_groups provided by work with const attribute_group. These attribute_group structures do not change at runtime so mark them as const. File size before: text data bss dec hex filename 2033 1448 0 3481 d99 drivers/atm/adummy.o File size after: text data bss dec hex filename 2129 1352 0 3481 d99 drivers/atm/adummy.o This change was made with the help of Coccinelle. Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/atm/adummy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index 1fd25e872ece..da27ddfa75a7 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -71,7 +71,7 @@ static struct attribute *adummy_attrs[] = { NULL }; -static struct attribute_group adummy_group_attrs = { +static const struct attribute_group adummy_group_attrs = { .name = NULL, /* We want them in dev's root folder */ .attrs = adummy_attrs }; -- cgit v1.2.3-55-g7522 From 638ce0fc1eeca9b243a20bb64a0d7edd11d92da7 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Tue, 1 Aug 2017 19:57:47 -0400 Subject: atm: solos-pci: constify attribute_group structures Functions working with attribute_groups provided by work with const attribute_group. These attribute_group structures do not change at runtime so mark them as const. File size before: text data bss dec hex filename 35740 28424 832 64996 fde4 drivers/atm/solos-pci.o File size after: text data bss dec hex filename 35932 28232 832 64996 fde4 drivers/atm/solos-pci.o This change was made with the help of Coccinelle. Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/atm/solos-pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 585984ee7dbd..8754793223cd 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -611,7 +611,7 @@ static struct attribute *solos_attrs[] = { NULL }; -static struct attribute_group solos_attr_group = { +static const struct attribute_group solos_attr_group = { .attrs = solos_attrs, .name = "parameters", }; @@ -628,7 +628,7 @@ static struct attribute *gpio_attrs[] = { NULL }; -static struct attribute_group gpio_attr_group = { +static const struct attribute_group gpio_attr_group = { .attrs = gpio_attrs, .name = "gpio", }; -- cgit v1.2.3-55-g7522 From 0c195567a8f6e82ea5535cd9f1d54a1626dd233e Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 1 Aug 2017 19:58:53 -0700 Subject: netvsc: transparent VF management This patch implements transparent fail over from synthetic NIC to SR-IOV virtual function NIC in Hyper-V environment. It is a better alternative to using bonding as is done now. Instead, the receive and transmit fail over is done internally inside the driver. Using bonding driver has lots of issues because it depends on the script being run early enough in the boot process and with sufficient information to make the association. This patch moves all that functionality into the kernel. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 12 ++ drivers/net/hyperv/netvsc_drv.c | 419 +++++++++++++++++++++++++++++++--------- 2 files changed, 342 insertions(+), 89 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index f2cef5aaed1f..c701b059c5ac 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -680,6 +680,15 @@ struct netvsc_ethtool_stats { unsigned long tx_busy; }; +struct netvsc_vf_pcpu_stats { + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + struct u64_stats_sync syncp; + u32 tx_dropped; +}; + struct netvsc_reconfig { struct list_head list; u32 event; @@ -713,6 +722,9 @@ struct net_device_context { /* State to manage the associated VF interface. */ struct net_device __rcu *vf_netdev; + struct netvsc_vf_pcpu_stats __percpu *vf_stats; + struct work_struct vf_takeover; + struct work_struct vf_notify; /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 9453eef6d09f..c71728d82049 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -71,6 +72,7 @@ static void netvsc_set_multicast_list(struct net_device *net) static int netvsc_open(struct net_device *net) { struct net_device_context *ndev_ctx = netdev_priv(net); + struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); struct rndis_device *rdev; int ret = 0; @@ -87,15 +89,29 @@ static int netvsc_open(struct net_device *net) netif_tx_wake_all_queues(net); rdev = nvdev->extension; - if (!rdev->link_state && !ndev_ctx->datapath) + + if (!rdev->link_state) netif_carrier_on(net); - return ret; + if (vf_netdev) { + /* Setting synthetic device up transparently sets + * slave as up. If open fails, then slave will be + * still be offline (and not used). + */ + ret = dev_open(vf_netdev); + if (ret) + netdev_warn(net, + "unable to open slave: %s: %d\n", + vf_netdev->name, ret); + } + return 0; } static int netvsc_close(struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); + struct net_device *vf_netdev + = rtnl_dereference(net_device_ctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); int ret; u32 aread, i, msec = 10, retry = 0, retry_max = 20; @@ -141,6 +157,9 @@ static int netvsc_close(struct net_device *net) ret = -ETIMEDOUT; } + if (vf_netdev) + dev_close(vf_netdev); + return ret; } @@ -224,13 +243,11 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, * * TODO support XPS - but get_xps_queue not exported */ -static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) +static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb) { - unsigned int num_tx_queues = ndev->real_num_tx_queues; int q_idx = sk_tx_queue_get(skb->sk); - if (q_idx < 0 || skb->ooo_okay) { + if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) { /* If forwarding a packet, we use the recorded queue when * available for better cache locality. */ @@ -240,12 +257,33 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, q_idx = netvsc_get_tx_queue(ndev, skb, q_idx); } - while (unlikely(q_idx >= num_tx_queues)) - q_idx -= num_tx_queues; - return q_idx; } +static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback) +{ + struct net_device_context *ndc = netdev_priv(ndev); + struct net_device *vf_netdev; + u16 txq; + + rcu_read_lock(); + vf_netdev = rcu_dereference(ndc->vf_netdev); + if (vf_netdev) { + txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; + qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping; + } else { + txq = netvsc_pick_tx(ndev, skb); + } + rcu_read_unlock(); + + while (unlikely(txq >= ndev->real_num_tx_queues)) + txq -= ndev->real_num_tx_queues; + + return txq; +} + static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, struct hv_page_buffer *pb) { @@ -367,6 +405,33 @@ static u32 net_checksum_info(struct sk_buff *skb) return TRANSPORT_INFO_NOT_IP; } +/* Send skb on the slave VF device. */ +static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev, + struct sk_buff *skb) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + unsigned int len = skb->len; + int rc; + + skb->dev = vf_netdev; + skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; + + rc = dev_queue_xmit(skb); + if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) { + struct netvsc_vf_pcpu_stats *pcpu_stats + = this_cpu_ptr(ndev_ctx->vf_stats); + + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->tx_packets++; + pcpu_stats->tx_bytes += len; + u64_stats_update_end(&pcpu_stats->syncp); + } else { + this_cpu_inc(ndev_ctx->vf_stats->tx_dropped); + } + + return rc; +} + static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); @@ -375,11 +440,20 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) unsigned int num_data_pgs; struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; + struct net_device *vf_netdev; u32 rndis_msg_size; struct rndis_per_packet_info *ppi; u32 hash; struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; + /* if VF is present and up then redirect packets + * already called with rcu_read_lock_bh + */ + vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev); + if (vf_netdev && netif_running(vf_netdev) && + !netpoll_tx_running(net)) + return netvsc_vf_xmit(net, vf_netdev, skb); + /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number * of pages in a single packet. If skb is scattered around @@ -658,29 +732,18 @@ int netvsc_recv_callback(struct net_device *net, struct netvsc_device *net_device; u16 q_idx = channel->offermsg.offer.sub_channel_index; struct netvsc_channel *nvchan; - struct net_device *vf_netdev; struct sk_buff *skb; struct netvsc_stats *rx_stats; if (net->reg_state != NETREG_REGISTERED) return NVSP_STAT_FAIL; - /* - * If necessary, inject this packet into the VF interface. - * On Hyper-V, multicast and brodcast packets are only delivered - * to the synthetic interface (after subjecting these to - * policy filters on the host). Deliver these via the VF - * interface in the guest. - */ rcu_read_lock(); net_device = rcu_dereference(net_device_ctx->nvdev); if (unlikely(!net_device)) goto drop; nvchan = &net_device->chan_table[q_idx]; - vf_netdev = rcu_dereference(net_device_ctx->vf_netdev); - if (vf_netdev && (vf_netdev->flags & IFF_UP)) - net = vf_netdev; /* Allocate a skb - TODO direct I/O to pages? */ skb = netvsc_alloc_recv_skb(net, &nvchan->napi, @@ -692,8 +755,7 @@ drop: return NVSP_STAT_FAIL; } - if (net != vf_netdev) - skb_record_rx_queue(skb, q_idx); + skb_record_rx_queue(skb, q_idx); /* * Even if injecting the packet, record the statistics @@ -853,6 +915,7 @@ static int netvsc_set_link_ksettings(struct net_device *dev, static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); + struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); struct hv_device *hdev = ndevctx->device_ctx; int orig_mtu = ndev->mtu; @@ -863,6 +926,13 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) if (!nvdev || nvdev->destroy) return -ENODEV; + /* Change MTU of underlying VF netdev first. */ + if (vf_netdev) { + ret = dev_set_mtu(vf_netdev, mtu); + if (ret) + return ret; + } + netif_device_detach(ndev); was_opened = rndis_filter_opened(nvdev); if (was_opened) @@ -883,6 +953,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) /* Attempt rollback to original MTU */ ndev->mtu = orig_mtu; rndis_filter_device_add(hdev, &device_info); + + if (vf_netdev) + dev_set_mtu(vf_netdev, orig_mtu); } if (was_opened) @@ -896,16 +969,56 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) return ret; } +static void netvsc_get_vf_stats(struct net_device *net, + struct netvsc_vf_pcpu_stats *tot) +{ + struct net_device_context *ndev_ctx = netdev_priv(net); + int i; + + memset(tot, 0, sizeof(*tot)); + + for_each_possible_cpu(i) { + const struct netvsc_vf_pcpu_stats *stats + = per_cpu_ptr(ndev_ctx->vf_stats, i); + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + rx_packets = stats->rx_packets; + tx_packets = stats->tx_packets; + rx_bytes = stats->rx_bytes; + tx_bytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; + tot->tx_dropped += stats->tx_dropped; + } +} + static void netvsc_get_stats64(struct net_device *net, struct rtnl_link_stats64 *t) { struct net_device_context *ndev_ctx = netdev_priv(net); struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); - int i; + struct netvsc_vf_pcpu_stats vf_tot; + int i; if (!nvdev) return; + netdev_stats_to_stats64(t, &net->stats); + + netvsc_get_vf_stats(net, &vf_tot); + t->rx_packets += vf_tot.rx_packets; + t->tx_packets += vf_tot.tx_packets; + t->rx_bytes += vf_tot.rx_bytes; + t->tx_bytes += vf_tot.tx_bytes; + t->tx_dropped += vf_tot.tx_dropped; + for (i = 0; i < nvdev->num_chn; i++) { const struct netvsc_channel *nvchan = &nvdev->chan_table[i]; const struct netvsc_stats *stats; @@ -934,12 +1047,6 @@ static void netvsc_get_stats64(struct net_device *net, t->rx_packets += packets; t->multicast += multicast; } - - t->tx_dropped = net->stats.tx_dropped; - t->tx_errors = net->stats.tx_errors; - - t->rx_dropped = net->stats.rx_dropped; - t->rx_errors = net->stats.rx_errors; } static int netvsc_set_mac_addr(struct net_device *ndev, void *p) @@ -980,9 +1087,16 @@ static const struct { { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, +}, vf_stats[] = { + { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, + { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, + { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) }, + { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) }, + { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) }, }; #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats) +#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats) /* 4 statistics per queue (rx/tx packets/bytes) */ #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4) @@ -997,7 +1111,9 @@ static int netvsc_get_sset_count(struct net_device *dev, int string_set) switch (string_set) { case ETH_SS_STATS: - return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev); + return NETVSC_GLOBAL_STATS_LEN + + NETVSC_VF_STATS_LEN + + NETVSC_QUEUE_STATS_LEN(nvdev); default: return -EINVAL; } @@ -1010,6 +1126,7 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); const void *nds = &ndc->eth_stats; const struct netvsc_stats *qstats; + struct netvsc_vf_pcpu_stats sum; unsigned int start; u64 packets, bytes; int i, j; @@ -1020,6 +1137,10 @@ static void netvsc_get_ethtool_stats(struct net_device *dev, for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++) data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset); + netvsc_get_vf_stats(dev, &sum); + for (j = 0; j < NETVSC_VF_STATS_LEN; j++) + data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset); + for (j = 0; j < nvdev->num_chn; j++) { qstats = &nvdev->chan_table[j].tx_stats; @@ -1054,11 +1175,16 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) - memcpy(p + i * ETH_GSTRING_LEN, - netvsc_stats[i].name, ETH_GSTRING_LEN); + for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) { + memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < ARRAY_SIZE(vf_stats); i++) { + memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } - p += i * ETH_GSTRING_LEN; for (i = 0; i < nvdev->num_chn; i++) { sprintf(p, "tx_queue_%u_packets", i); p += ETH_GSTRING_LEN; @@ -1298,8 +1424,7 @@ static void netvsc_link_change(struct work_struct *w) case RNDIS_STATUS_MEDIA_CONNECT: if (rdev->link_state) { rdev->link_state = false; - if (!ndev_ctx->datapath) - netif_carrier_on(net); + netif_carrier_on(net); netif_tx_wake_all_queues(net); } else { notify = true; @@ -1386,6 +1511,104 @@ static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) return NULL; } +/* Called when VF is injecting data into network stack. + * Change the associated network device from VF to netvsc. + * note: already called with rcu_read_lock + */ +static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data); + struct net_device_context *ndev_ctx = netdev_priv(ndev); + struct netvsc_vf_pcpu_stats *pcpu_stats + = this_cpu_ptr(ndev_ctx->vf_stats); + + skb->dev = ndev; + + u64_stats_update_begin(&pcpu_stats->syncp); + pcpu_stats->rx_packets++; + pcpu_stats->rx_bytes += skb->len; + u64_stats_update_end(&pcpu_stats->syncp); + + return RX_HANDLER_ANOTHER; +} + +static int netvsc_vf_join(struct net_device *vf_netdev, + struct net_device *ndev) +{ + struct net_device_context *ndev_ctx = netdev_priv(ndev); + int ret; + + ret = netdev_rx_handler_register(vf_netdev, + netvsc_vf_handle_frame, ndev); + if (ret != 0) { + netdev_err(vf_netdev, + "can not register netvsc VF receive handler (err = %d)\n", + ret); + goto rx_handler_failed; + } + + ret = netdev_upper_dev_link(vf_netdev, ndev); + if (ret != 0) { + netdev_err(vf_netdev, + "can not set master device %s (err = %d)\n", + ndev->name, ret); + goto upper_link_failed; + } + + /* set slave flag before open to prevent IPv6 addrconf */ + vf_netdev->flags |= IFF_SLAVE; + + schedule_work(&ndev_ctx->vf_takeover); + + netdev_info(vf_netdev, "joined to %s\n", ndev->name); + return 0; + +upper_link_failed: + netdev_rx_handler_unregister(vf_netdev); +rx_handler_failed: + return ret; +} + +static void __netvsc_vf_setup(struct net_device *ndev, + struct net_device *vf_netdev) +{ + int ret; + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + + /* Align MTU of VF with master */ + ret = dev_set_mtu(vf_netdev, ndev->mtu); + if (ret) + netdev_warn(vf_netdev, + "unable to change mtu to %u\n", ndev->mtu); + + if (netif_running(ndev)) { + ret = dev_open(vf_netdev); + if (ret) + netdev_warn(vf_netdev, + "unable to open: %d\n", ret); + } +} + +/* Setup VF as slave of the synthetic device. + * Runs in workqueue to avoid recursion in netlink callbacks. + */ +static void netvsc_vf_setup(struct work_struct *w) +{ + struct net_device_context *ndev_ctx + = container_of(w, struct net_device_context, vf_takeover); + struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); + struct net_device *vf_netdev; + + rtnl_lock(); + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + if (vf_netdev) + __netvsc_vf_setup(ndev, vf_netdev); + + rtnl_unlock(); +} + static int netvsc_register_vf(struct net_device *vf_netdev) { struct net_device *ndev; @@ -1409,10 +1632,12 @@ static int netvsc_register_vf(struct net_device *vf_netdev) if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev)) return NOTIFY_DONE; + if (netvsc_vf_join(vf_netdev, ndev) != 0) + return NOTIFY_DONE; + netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); - /* - * Take a reference on the module. - */ + + /* Prevent this module from being unloaded while VF is registered */ try_module_get(THIS_MODULE); dev_hold(vf_netdev); @@ -1420,61 +1645,59 @@ static int netvsc_register_vf(struct net_device *vf_netdev) return NOTIFY_OK; } -static int netvsc_vf_up(struct net_device *vf_netdev) +/* Change datapath */ +static void netvsc_vf_update(struct work_struct *w) { - struct net_device *ndev; + struct net_device_context *ndev_ctx + = container_of(w, struct net_device_context, vf_notify); + struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); struct netvsc_device *netvsc_dev; - struct net_device_context *net_device_ctx; - - ndev = get_netvsc_byref(vf_netdev); - if (!ndev) - return NOTIFY_DONE; - - net_device_ctx = netdev_priv(ndev); - netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); - - netdev_info(ndev, "VF up: %s\n", vf_netdev->name); - - /* - * Open the device before switching data path. - */ - rndis_filter_open(netvsc_dev); - - /* - * notify the host to switch the data path. - */ - netvsc_switch_datapath(ndev, true); - netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name); - - netif_carrier_off(ndev); + struct net_device *vf_netdev; + bool vf_is_up; - /* Now notify peers through VF device. */ - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev); + rtnl_lock(); + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); + if (!vf_netdev) + goto unlock; + + netvsc_dev = rtnl_dereference(ndev_ctx->nvdev); + if (!netvsc_dev) + goto unlock; + + vf_is_up = netif_running(vf_netdev); + if (vf_is_up != ndev_ctx->datapath) { + if (vf_is_up) { + netdev_info(ndev, "VF up: %s\n", vf_netdev->name); + rndis_filter_open(netvsc_dev); + netvsc_switch_datapath(ndev, true); + netdev_info(ndev, "Data path switched to VF: %s\n", + vf_netdev->name); + } else { + netdev_info(ndev, "VF down: %s\n", vf_netdev->name); + netvsc_switch_datapath(ndev, false); + rndis_filter_close(netvsc_dev); + netdev_info(ndev, "Data path switched from VF: %s\n", + vf_netdev->name); + } - return NOTIFY_OK; + /* Now notify peers through VF device. */ + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); + } +unlock: + rtnl_unlock(); } -static int netvsc_vf_down(struct net_device *vf_netdev) +static int netvsc_vf_notify(struct net_device *vf_netdev) { - struct net_device *ndev; - struct netvsc_device *netvsc_dev; struct net_device_context *net_device_ctx; + struct net_device *ndev; ndev = get_netvsc_byref(vf_netdev); if (!ndev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); - - netdev_info(ndev, "VF down: %s\n", vf_netdev->name); - netvsc_switch_datapath(ndev, false); - netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); - rndis_filter_close(netvsc_dev); - netif_carrier_on(ndev); - - /* Now notify peers through netvsc device. */ - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); + schedule_work(&net_device_ctx->vf_notify); return NOTIFY_OK; } @@ -1489,9 +1712,12 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); + cancel_work_sync(&net_device_ctx->vf_takeover); + cancel_work_sync(&net_device_ctx->vf_notify); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); + netdev_upper_dev_unlink(vf_netdev, ndev); RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); dev_put(vf_netdev); module_put(THIS_MODULE); @@ -1505,12 +1731,12 @@ static int netvsc_probe(struct hv_device *dev, struct net_device_context *net_device_ctx; struct netvsc_device_info device_info; struct netvsc_device *nvdev; - int ret; + int ret = -ENOMEM; net = alloc_etherdev_mq(sizeof(struct net_device_context), VRSS_CHANNEL_MAX); if (!net) - return -ENOMEM; + goto no_net; netif_carrier_off(net); @@ -1529,6 +1755,13 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); + INIT_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); + INIT_WORK(&net_device_ctx->vf_notify, netvsc_vf_update); + + net_device_ctx->vf_stats + = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); + if (!net_device_ctx->vf_stats) + goto no_stats; net->netdev_ops = &device_ops; net->ethtool_ops = ðtool_ops; @@ -1546,10 +1779,9 @@ static int netvsc_probe(struct hv_device *dev, if (IS_ERR(nvdev)) { ret = PTR_ERR(nvdev); netdev_err(net, "unable to add netvsc device (ret %d)\n", ret); - free_netdev(net); - hv_set_drvdata(dev, NULL); - return ret; + goto rndis_failed; } + memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); /* hw_features computed in rndis_filter_device_add */ @@ -1573,11 +1805,20 @@ static int netvsc_probe(struct hv_device *dev, ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); - rndis_filter_device_remove(dev, nvdev); - free_netdev(net); + goto register_failed; } return ret; + +register_failed: + rndis_filter_device_remove(dev, nvdev); +rndis_failed: + free_percpu(net_device_ctx->vf_stats); +no_stats: + hv_set_drvdata(dev, NULL); + free_netdev(net); +no_net: + return ret; } static int netvsc_remove(struct hv_device *dev) @@ -1611,6 +1852,7 @@ static int netvsc_remove(struct hv_device *dev) hv_set_drvdata(dev, NULL); + free_percpu(ndev_ctx->vf_stats); free_netdev(net); return 0; } @@ -1665,9 +1907,8 @@ static int netvsc_netdev_event(struct notifier_block *this, case NETDEV_UNREGISTER: return netvsc_unregister_vf(event_dev); case NETDEV_UP: - return netvsc_vf_up(event_dev); case NETDEV_DOWN: - return netvsc_vf_down(event_dev); + return netvsc_vf_notify(event_dev); default: return NOTIFY_DONE; } -- cgit v1.2.3-55-g7522 From a5050c61036859e6fd7924f25cc6a97e7462039d Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 1 Aug 2017 19:58:54 -0700 Subject: netvsc: add documentation Add some background documentation on netvsc device options and limitations. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- Documentation/networking/netvsc.txt | 63 +++++++++++++++++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 64 insertions(+) create mode 100644 Documentation/networking/netvsc.txt diff --git a/Documentation/networking/netvsc.txt b/Documentation/networking/netvsc.txt new file mode 100644 index 000000000000..4ddb4e4b0426 --- /dev/null +++ b/Documentation/networking/netvsc.txt @@ -0,0 +1,63 @@ +Hyper-V network driver +====================== + +Compatibility +============= + +This driver is compatible with Windows Server 2012 R2, 2016 and +Windows 10. + +Features +======== + + Checksum offload + ---------------- + The netvsc driver supports checksum offload as long as the + Hyper-V host version does. Windows Server 2016 and Azure + support checksum offload for TCP and UDP for both IPv4 and + IPv6. Windows Server 2012 only supports checksum offload for TCP. + + Receive Side Scaling + -------------------- + Hyper-V supports receive side scaling. For TCP, packets are + distributed among available queues based on IP address and port + number. Current versions of Hyper-V host, only distribute UDP + packets based on the IP source and destination address. + The port number is not used as part of the hash value for UDP. + Fragmented IP packets are not distributed between queues; + all fragmented packets arrive on the first channel. + + Generic Receive Offload, aka GRO + -------------------------------- + The driver supports GRO and it is enabled by default. GRO coalesces + like packets and significantly reduces CPU usage under heavy Rx + load. + + SR-IOV support + -------------- + Hyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV + is enabled in both the vSwitch and the guest configuration, then the + Virtual Function (VF) device is passed to the guest as a PCI + device. In this case, both a synthetic (netvsc) and VF device are + visible in the guest OS and both NIC's have the same MAC address. + + The VF is enslaved by netvsc device. The netvsc driver will transparently + switch the data path to the VF when it is available and up. + Network state (addresses, firewall, etc) should be applied only to the + netvsc device; the slave device should not be accessed directly in + most cases. The exceptions are if some special queue discipline or + flow direction is desired, these should be applied directly to the + VF slave device. + + Receive Buffer + -------------- + Packets are received into a receive area which is created when device + is probed. The receive area is broken into MTU sized chunks and each may + contain one or more packets. The number of receive sections may be changed + via ethtool Rx ring parameters. + + There is a similar send buffer which is used to aggregate packets for sending. + The send area is broken into chunks of 6144 bytes, each of section may + contain one or more packets. The send buffer is an optimization, the driver + will use slower method to handle very large packets or if the send buffer + area is exhausted. diff --git a/MAINTAINERS b/MAINTAINERS index 207e45310620..448f2f67802f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6258,6 +6258,7 @@ M: Haiyang Zhang M: Stephen Hemminger L: devel@linuxdriverproject.org S: Maintained +F: Documentation/networking/netvsc.txt F: arch/x86/include/asm/mshyperv.h F: arch/x86/include/uapi/asm/hyperv.h F: arch/x86/kernel/cpu/mshyperv.c -- cgit v1.2.3-55-g7522 From 12aa7469d101e139b3728e540884bc7d72dca70a Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 1 Aug 2017 19:58:55 -0700 Subject: netvsc: remove bonding setup script No longer needed, now all managed by transparent VF logic. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- tools/hv/bondvf.sh | 255 ----------------------------------------------------- 1 file changed, 255 deletions(-) delete mode 100755 tools/hv/bondvf.sh diff --git a/tools/hv/bondvf.sh b/tools/hv/bondvf.sh deleted file mode 100755 index 80f102860cf8..000000000000 --- a/tools/hv/bondvf.sh +++ /dev/null @@ -1,255 +0,0 @@ -#!/bin/bash - -# This example script creates bonding network devices based on synthetic NIC -# (the virtual network adapter usually provided by Hyper-V) and the matching -# VF NIC (SRIOV virtual function). So the synthetic NIC and VF NIC can -# function as one network device, and fail over to the synthetic NIC if VF is -# down. -# -# Usage: -# - After configured vSwitch and vNIC with SRIOV, start Linux virtual -# machine (VM) -# - Run this scripts on the VM. It will create configuration files in -# distro specific directory. -# - Reboot the VM, so that the bonding config are enabled. -# -# The config files are DHCP by default. You may edit them if you need to change -# to Static IP or change other settings. -# - -sysdir=/sys/class/net -netvsc_cls={f8615163-df3e-46c5-913f-f2d2f965ed0e} -bondcnt=0 - -# Detect Distro -if [ -f /etc/redhat-release ]; -then - cfgdir=/etc/sysconfig/network-scripts - distro=redhat -elif grep -q 'Ubuntu' /etc/issue -then - cfgdir=/etc/network - distro=ubuntu -elif grep -q 'SUSE' /etc/issue -then - cfgdir=/etc/sysconfig/network - distro=suse -else - echo "Unsupported Distro" - exit 1 -fi - -echo Detected Distro: $distro, or compatible - -# Get a list of ethernet names -list_eth=(`cd $sysdir && ls -d */ | cut -d/ -f1 | grep -v bond`) -eth_cnt=${#list_eth[@]} - -echo List of net devices: - -# Get the MAC addresses -for (( i=0; i < $eth_cnt; i++ )) -do - list_mac[$i]=`cat $sysdir/${list_eth[$i]}/address` - echo ${list_eth[$i]}, ${list_mac[$i]} -done - -# Find NIC with matching MAC -for (( i=0; i < $eth_cnt-1; i++ )) -do - for (( j=i+1; j < $eth_cnt; j++ )) - do - if [ "${list_mac[$i]}" = "${list_mac[$j]}" ] - then - list_match[$i]=${list_eth[$j]} - break - fi - done -done - -function create_eth_cfg_redhat { - local fn=$cfgdir/ifcfg-$1 - - rm -f $fn - echo DEVICE=$1 >>$fn - echo TYPE=Ethernet >>$fn - echo BOOTPROTO=none >>$fn - echo UUID=`uuidgen` >>$fn - echo ONBOOT=yes >>$fn - echo PEERDNS=yes >>$fn - echo IPV6INIT=yes >>$fn - echo MASTER=$2 >>$fn - echo SLAVE=yes >>$fn -} - -function create_eth_cfg_pri_redhat { - create_eth_cfg_redhat $1 $2 -} - -function create_bond_cfg_redhat { - local fn=$cfgdir/ifcfg-$1 - - rm -f $fn - echo DEVICE=$1 >>$fn - echo TYPE=Bond >>$fn - echo BOOTPROTO=dhcp >>$fn - echo UUID=`uuidgen` >>$fn - echo ONBOOT=yes >>$fn - echo PEERDNS=yes >>$fn - echo IPV6INIT=yes >>$fn - echo BONDING_MASTER=yes >>$fn - echo BONDING_OPTS=\"mode=active-backup miimon=100 primary=$2\" >>$fn -} - -function del_eth_cfg_ubuntu { - local mainfn=$cfgdir/interfaces - local fnlist=( $mainfn ) - - local dirlist=(`awk '/^[ \t]*source/{print $2}' $mainfn`) - - local i - for i in "${dirlist[@]}" - do - fnlist+=(`ls $i 2>/dev/null`) - done - - local tmpfl=$(mktemp) - - local nic_start='^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+'$1 - local nic_end='^[ \t]*(auto|iface|mapping|allow-.*|source)' - - local fn - for fn in "${fnlist[@]}" - do - awk "/$nic_end/{x=0} x{next} /$nic_start/{x=1;next} 1" \ - $fn >$tmpfl - - cp $tmpfl $fn - done - - rm $tmpfl -} - -function create_eth_cfg_ubuntu { - local fn=$cfgdir/interfaces - - del_eth_cfg_ubuntu $1 - echo $'\n'auto $1 >>$fn - echo iface $1 inet manual >>$fn - echo bond-master $2 >>$fn -} - -function create_eth_cfg_pri_ubuntu { - local fn=$cfgdir/interfaces - - del_eth_cfg_ubuntu $1 - echo $'\n'allow-hotplug $1 >>$fn - echo iface $1 inet manual >>$fn - echo bond-master $2 >>$fn - echo bond-primary $1 >>$fn -} - -function create_bond_cfg_ubuntu { - local fn=$cfgdir/interfaces - - del_eth_cfg_ubuntu $1 - - echo $'\n'auto $1 >>$fn - echo iface $1 inet dhcp >>$fn - echo bond-mode active-backup >>$fn - echo bond-miimon 100 >>$fn - echo bond-slaves none >>$fn -} - -function create_eth_cfg_suse { - local fn=$cfgdir/ifcfg-$1 - - rm -f $fn - echo BOOTPROTO=none >>$fn - echo STARTMODE=auto >>$fn -} - -function create_eth_cfg_pri_suse { - local fn=$cfgdir/ifcfg-$1 - - rm -f $fn - echo BOOTPROTO=none >>$fn - echo STARTMODE=hotplug >>$fn -} - -function create_bond_cfg_suse { - local fn=$cfgdir/ifcfg-$1 - - rm -f $fn - echo BOOTPROTO=dhcp >>$fn - echo STARTMODE=auto >>$fn - echo BONDING_MASTER=yes >>$fn - echo BONDING_SLAVE_0=$2 >>$fn - echo BONDING_SLAVE_1=$3 >>$fn - echo BONDING_MODULE_OPTS=\'mode=active-backup miimon=100 primary=$2\' >>$fn -} - -function create_bond { - local bondname=bond$bondcnt - local primary - local secondary - - local class_id1=`cat $sysdir/$1/device/class_id 2>/dev/null` - local class_id2=`cat $sysdir/$2/device/class_id 2>/dev/null` - - if [ "$class_id1" = "$netvsc_cls" ] - then - primary=$2 - secondary=$1 - elif [ "$class_id2" = "$netvsc_cls" ] - then - primary=$1 - secondary=$2 - else - return 0 - fi - - echo $'\nBond name:' $bondname - - if [ $distro == ubuntu ] - then - local mainfn=$cfgdir/interfaces - local s="^[ \t]*(auto|iface|mapping|allow-.*)[ \t]+${bondname}" - - grep -E "$s" $mainfn - if [ $? -eq 0 ] - then - echo "WARNING: ${bondname} has been configured already" - return - fi - elif [ $distro == redhat ] || [ $distro == suse ] - then - local fn=$cfgdir/ifcfg-$bondname - if [ -f $fn ] - then - echo "WARNING: ${bondname} has been configured already" - return - fi - else - echo "Unsupported Distro: ${distro}" - return - fi - - echo configuring $primary - create_eth_cfg_pri_$distro $primary $bondname - - echo configuring $secondary - create_eth_cfg_$distro $secondary $bondname - - echo creating: $bondname with primary slave: $primary - create_bond_cfg_$distro $bondname $primary $secondary -} - -for (( i=0; i < $eth_cnt-1; i++ )) -do - if [ -n "${list_match[$i]}" ] - then - create_bond ${list_eth[$i]} ${list_match[$i]} - let bondcnt=bondcnt+1 - fi -done -- cgit v1.2.3-55-g7522 From 9820355f6934ba7ba6218abd44df7ee12b65f029 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 Aug 2017 09:52:10 +0200 Subject: mlxsw: core: Use correct EMAD transaction ID in debug message 'trans->tid' is only assigned later in the function, resulting in a zero transaction ID. Use 'tid' instead. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index affe84eb4bff..9d5e7cf288be 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -667,7 +667,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core, int err; dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n", - trans->tid, reg->id, mlxsw_reg_id_str(reg->id), + tid, reg->id, mlxsw_reg_id_str(reg->id), mlxsw_core_reg_access_type_str(type)); skb = mlxsw_emad_alloc(mlxsw_core, reg->len); -- cgit v1.2.3-55-g7522 From 475abbf1ef67fdf35eed0720adcf59ed25ad924e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 Aug 2017 09:56:01 +0200 Subject: ipv4: fib: Set offload indication according to nexthop flags We're going to have capable drivers indicate route offload using the nexthop flags, but for non-multipath routes these flags aren't dumped to user space. Instead, set the offload indication in the route message flags. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Acked-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/fib_semantics.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index b8d18171cca3..f62dc2463280 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1342,6 +1342,8 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) rtm->rtm_flags |= RTNH_F_DEAD; } + if (fi->fib_nh->nh_flags & RTNH_F_OFFLOAD) + rtm->rtm_flags |= RTNH_F_OFFLOAD; #ifdef CONFIG_IP_ROUTE_CLASSID if (fi->fib_nh[0].nh_tclassid && nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid)) -- cgit v1.2.3-55-g7522 From 63e701c15c98741fc8df50eb0315991e6d8040dc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 Aug 2017 09:56:02 +0200 Subject: rocker: Provide offload indication using nexthop flags We want to stop using the FIB info's flags to provide the offlaod indication and instead do that on a per-nexthop basis. Convert rocker to do just that. It only supports one nexthop per-route, so conversion is simple. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_ofdpa.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 600e30e8f0be..da4e26b53a52 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2761,7 +2761,7 @@ static int ofdpa_fib4_add(struct rocker *rocker, fen_info->tb_id, 0); if (err) return err; - fib_info_offload_inc(fen_info->fi); + fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD; return 0; } @@ -2776,7 +2776,7 @@ static int ofdpa_fib4_del(struct rocker *rocker, ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker); if (!ofdpa_port) return 0; - fib_info_offload_dec(fen_info->fi); + fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst), fen_info->dst_len, fen_info->fi, fen_info->tb_id, OFDPA_OP_FLAG_REMOVE); @@ -2803,7 +2803,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker) rocker); if (!ofdpa_port) continue; - fib_info_offload_dec(flow_entry->fi); + flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE, flow_entry); } -- cgit v1.2.3-55-g7522 From 3984d1a89fe7cba04cdcc6084dc1a2024f0190b6 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 Aug 2017 09:56:03 +0200 Subject: mlxsw: spectrum_router: Provide offload indication using nexthop flags In a similar fashion to previous patch, use the nexthop flags to provide offload indication instead of the FIB info's flags. In case a nexthop in a multipath route can't be offloaded (gateway's MAC can't be resolved, for example), then its offload flag isn't set. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Tested-by: David Ahern Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 38 ++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index add03fa34a2d..4cdeedf9c61f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2103,13 +2103,47 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) } } +static void +mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + int i; + + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { + nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + return; + } + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + if (nh->offloaded) + nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD; + else + nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + } +} + +static void +mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + int i; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + + nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD; + } +} + static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { fib_entry->offloaded = true; switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: - fib_info_offload_inc(fib_entry->nh_group->key.fi); + mlxsw_sp_fib4_entry_offload_set(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: WARN_ON_ONCE(1); @@ -2121,7 +2155,7 @@ mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) { switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: - fib_info_offload_dec(fib_entry->nh_group->key.fi); + mlxsw_sp_fib4_entry_offload_unset(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: WARN_ON_ONCE(1); -- cgit v1.2.3-55-g7522 From 1353ee707362478527d54cd0b25d3ac17c47168d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 Aug 2017 09:56:04 +0200 Subject: mlxsw: spectrum_router: Don't check state when refreshing offload indication Previous patch removed the reliance on the counter in the FIB info to set the offload indication, so we no longer need to keep an offload state on each FIB entry and can just set or unset the RTNH_F_OFFLOAD flag in each nexthop. This is also necessary because we're going to need to refresh the offload indication whenever the nexthop group associated with the FIB entry is refreshed. Current check would prevent us from marking a newly resolved nexthop as offloaded if the FIB entry is already marked as offloaded. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Tested-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4cdeedf9c61f..40aecbc116ac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -394,7 +394,6 @@ struct mlxsw_sp_fib_entry { enum mlxsw_sp_fib_entry_type type; struct list_head nexthop_group_node; struct mlxsw_sp_nexthop_group *nh_group; - bool offloaded; }; struct mlxsw_sp_fib4_entry { @@ -2139,8 +2138,6 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { - fib_entry->offloaded = true; - switch (fib_entry->fib_node->fib->proto) { case MLXSW_SP_L3_PROTO_IPV4: mlxsw_sp_fib4_entry_offload_set(fib_entry); @@ -2160,8 +2157,6 @@ mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) case MLXSW_SP_L3_PROTO_IPV6: WARN_ON_ONCE(1); } - - fib_entry->offloaded = false; } static void @@ -2170,17 +2165,13 @@ mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, { switch (op) { case MLXSW_REG_RALUE_OP_WRITE_DELETE: - if (!fib_entry->offloaded) - return; return mlxsw_sp_fib_entry_offload_unset(fib_entry); case MLXSW_REG_RALUE_OP_WRITE_WRITE: if (err) return; - if (mlxsw_sp_fib_entry_should_offload(fib_entry) && - !fib_entry->offloaded) + if (mlxsw_sp_fib_entry_should_offload(fib_entry)) mlxsw_sp_fib_entry_offload_set(fib_entry); - else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) && - fib_entry->offloaded) + else if (!mlxsw_sp_fib_entry_should_offload(fib_entry)) mlxsw_sp_fib_entry_offload_unset(fib_entry); return; default: -- cgit v1.2.3-55-g7522 From 77d964e66cfad007db14077fc952ce13bdaa2733 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 Aug 2017 09:56:05 +0200 Subject: mlxsw: spectrum_router: Refresh offload indication upon group refresh Now that we provide offload indication using the nexthop's flags we must refresh the offload indication whenever the offload state within the group changes. This didn't matter until now, as offload indication was provided using the FIB info flags and multipath routes were marked as offloaded as long as one of the nexthops was offloaded. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Tested-by: David Ahern Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 40aecbc116ac..2f03c7e71584 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1652,6 +1652,24 @@ mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp, return 0; } +static void +mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry, + enum mlxsw_reg_ralue_op op, int err); + +static void +mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp) +{ + enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE; + struct mlxsw_sp_fib_entry *fib_entry; + + list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) { + if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node, + fib_entry)) + continue; + mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0); + } +} + static void mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) @@ -1739,6 +1757,10 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp, dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n"); goto set_trap; } + + /* Offload state within the group changed, so update the flags. */ + mlxsw_sp_nexthop_fib_entries_refresh(nh_grp); + return; set_trap: -- cgit v1.2.3-55-g7522 From 2202e35d47fff379fc744e6bd3c111b018cc77df Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 2 Aug 2017 09:56:06 +0200 Subject: ipv4: fib: Remove unused functions Previous patches converted users of these functions to provide offload indication using the nexthop's flags instead of the FIB info's. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Acked-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip_fib.h | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 41d580c6185f..ef8992d49bc3 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -124,7 +124,6 @@ struct fib_info { #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_weight; #endif - unsigned int fib_offload_cnt; struct rcu_head rcu; struct fib_nh fib_nh[0]; #define fib_dev fib_nh[0].nh_dev @@ -177,18 +176,6 @@ struct fib_result_nl { __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); -static inline void fib_info_offload_inc(struct fib_info *fi) -{ - fi->fib_offload_cnt++; - fi->fib_flags |= RTNH_F_OFFLOAD; -} - -static inline void fib_info_offload_dec(struct fib_info *fi) -{ - if (--fi->fib_offload_cnt == 0) - fi->fib_flags &= ~RTNH_F_OFFLOAD; -} - #define FIB_RES_SADDR(net, res) \ ((FIB_RES_NH(res).nh_saddr_genid == \ atomic_read(&(net)->ipv4.dev_addr_genid)) ? \ -- cgit v1.2.3-55-g7522 From fc81bab5eeb103711925d7510157cf5cd2b153f4 Mon Sep 17 00:00:00 2001 From: Malcolm Priestley Date: Sun, 30 Jul 2017 09:02:19 +0100 Subject: rtlwifi: rtl_pci_probe: Fix fail path of _rtl_pci_find_adapter _rtl_pci_find_adapter fail path will jump to label fail3 for unsupported adapter types. However, on course for fail3 there will be call rtl_deinit_core before rtl_init_core. For the inclusion of checking pci_iounmap this fail can be moved to fail2. Fixes [ 4.492963] BUG: unable to handle kernel NULL pointer dereference at (null) [ 4.493067] IP: rtl_deinit_core+0x31/0x90 [rtlwifi] Signed-off-by: Malcolm Priestley Cc: Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/pci.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 032b6317690d..08dc8919ef60 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2257,7 +2257,7 @@ int rtl_pci_probe(struct pci_dev *pdev, /* find adapter */ if (!_rtl_pci_find_adapter(pdev, hw)) { err = -ENODEV; - goto fail3; + goto fail2; } /* Init IO handler */ @@ -2318,10 +2318,10 @@ fail3: pci_set_drvdata(pdev, NULL); rtl_deinit_core(hw); +fail2: if (rtlpriv->io.pci_mem_start != 0) pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); -fail2: pci_release_regions(pdev); complete(&rtlpriv->firmware_loading_complete); -- cgit v1.2.3-55-g7522 From 9ddb378b237f0321ef0c25c407672591e4c86254 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 31 Jul 2017 09:49:24 +0000 Subject: mwifiex: correct IE parse during association It is observed that some IEs get missed during association. This patch correct the old IE parse code. sme->ie will be store as wpa ie, wps ie, wapi ie and gen ie accordingly. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 115 +++++++++++------------ 1 file changed, 55 insertions(+), 60 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 43ecd621d1ef..a6077ab3efc3 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -811,8 +811,8 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode) * is checked to determine WPA version. If buffer length is zero, the existing * WPA IE is reset. */ -static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, - u8 *ie_data_ptr, u16 ie_len) +static int mwifiex_set_wpa_ie(struct mwifiex_private *priv, + u8 *ie_data_ptr, u16 ie_len) { if (ie_len) { if (ie_len > sizeof(priv->wpa_ie)) { @@ -1351,101 +1351,96 @@ static int mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { - int ret = 0; struct ieee_types_vendor_header *pvendor_ie; const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 }; const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 }; - u16 unparsed_len = ie_len; - int find_wpa_ie = 0; + u16 unparsed_len = ie_len, cur_ie_len; /* If the passed length is zero, reset the buffer */ if (!ie_len) { priv->gen_ie_buf_len = 0; priv->wps.session_enable = false; - return 0; - } else if (!ie_data_ptr) { + } else if (!ie_data_ptr || + ie_len <= sizeof(struct ieee_types_header)) { return -1; } pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; while (pvendor_ie) { + cur_ie_len = pvendor_ie->len + sizeof(struct ieee_types_header); + + if (pvendor_ie->element_id == WLAN_EID_RSN) { + /* IE is a WPA/WPA2 IE so call set_wpa function */ + mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, cur_ie_len); + priv->wps.session_enable = false; + goto next_ie; + } + + if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { + /* IE is a WAPI IE so call set_wapi function */ + mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + goto next_ie; + } + if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) { - /* Test to see if it is a WPA IE, if not, then it is a - * gen IE + /* Test to see if it is a WPA IE, if not, then + * it is a gen IE */ if (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui))) { - find_wpa_ie = 1; - break; + /* IE is a WPA/WPA2 IE so call set_wpa function + */ + mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + priv->wps.session_enable = false; + goto next_ie; } - /* Test to see if it is a WPS IE, if so, enable - * wps session flag - */ if (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui))) { + /* Test to see if it is a WPS IE, + * if so, enable wps session flag + */ priv->wps.session_enable = true; mwifiex_dbg(priv->adapter, MSG, - "info: WPS Session Enabled.\n"); - ret = mwifiex_set_wps_ie(priv, - (u8 *)pvendor_ie, - unparsed_len); + "WPS Session Enabled.\n"); + mwifiex_set_wps_ie(priv, (u8 *)pvendor_ie, + cur_ie_len); + goto next_ie; } } - if (pvendor_ie->element_id == WLAN_EID_RSN) { - find_wpa_ie = 1; - break; - } + /* Saved in gen_ie, such as P2P IE.etc.*/ - if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { - /* IE is a WAPI IE so call set_wapi function */ - ret = mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie, - unparsed_len); - return ret; + /* Verify that the passed length is not larger than the + * available space remaining in the buffer + */ + if (cur_ie_len < + (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { + /* Append the passed data to the end + * of the genIeBuffer + */ + memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, + (u8 *)pvendor_ie, cur_ie_len); + /* Increment the stored buffer length by the + * size passed + */ + priv->gen_ie_buf_len += cur_ie_len; } - unparsed_len -= (pvendor_ie->len + - sizeof(struct ieee_types_header)); +next_ie: + unparsed_len -= cur_ie_len; if (unparsed_len <= sizeof(struct ieee_types_header)) pvendor_ie = NULL; else pvendor_ie = (struct ieee_types_vendor_header *) - (((u8 *)pvendor_ie) + pvendor_ie->len + - sizeof(struct ieee_types_header)); - } - - if (find_wpa_ie) { - /* IE is a WPA/WPA2 IE so call set_wpa function */ - ret = mwifiex_set_wpa_ie_helper(priv, (u8 *)pvendor_ie, - unparsed_len); - priv->wps.session_enable = false; - return ret; + (((u8 *)pvendor_ie) + cur_ie_len); } - /* - * Verify that the passed length is not larger than the - * available space remaining in the buffer - */ - if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { - - /* Append the passed data to the end of the - genIeBuffer */ - memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr, - ie_len); - /* Increment the stored buffer length by the - size passed */ - priv->gen_ie_buf_len += ie_len; - } else { - /* Passed data does not fit in the remaining - buffer space */ - ret = -1; - } - - /* Return 0, or -1 for error case */ - return ret; + return 0; } /* -- cgit v1.2.3-55-g7522 From 1d9b168d8ea9a0f51947d0e2f84856e77d2fe7ff Mon Sep 17 00:00:00 2001 From: Sven Joachim Date: Mon, 31 Jul 2017 18:10:45 +0200 Subject: rtlwifi: Fix fallback firmware loading Commit f70e4df2b384 ("rtlwifi: Add code to read new versions of firmware") added code to load an old firmware file if the new one is not available. Unfortunately that code is never reached because request_firmware_nowait() does not wait for the firmware to show up and returns 0 even if the file is not there. Use the existing fallback mechanism introduced by commit 62009b7f1279 ("rtlwifi: rtl8192cu: Add new firmware") instead. Fixes: f70e4df2b384 ("rtlwifi: Add code to read new versions of firmware") Cc: stable@vger.kernel.org Signed-off-by: Sven Joachim Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 17 +++++------------ drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 17 +++++------------ 2 files changed, 10 insertions(+), 24 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 56c05c4e1499..f47d839f388d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -187,18 +187,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - /* Failed to get firmware. Check if old version available */ - fw_name = "rtlwifi/rtl8723befw.bin"; - pr_info("Using firmware %s\n", fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, fw_name, - rtlpriv->io.dev, GFP_KERNEL, hw, - rtl_fw_cb); - if (err) { - pr_err("Failed to request firmware!\n"); - vfree(rtlpriv->rtlhal.pfirmware); - rtlpriv->rtlhal.pfirmware = NULL; - return 1; - } + pr_err("Failed to request firmware!\n"); + vfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + return 1; } return 0; } @@ -289,6 +281,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8723be_pci", + .alt_fw_name = "rtlwifi/rtl8723befw.bin", .ops = &rtl8723be_hal_ops, .mod_params = &rtl8723be_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index ec2d577ba85b..5925edf7877f 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -216,18 +216,10 @@ int rtl8821ae_init_sw_vars(struct ieee80211_hw *hw) rtlpriv->io.dev, GFP_KERNEL, hw, rtl_fw_cb); if (err) { - /* Failed to get firmware. Check if old version available */ - fw_name = "rtlwifi/rtl8821aefw.bin"; - pr_info("Using firmware %s\n", fw_name); - err = request_firmware_nowait(THIS_MODULE, 1, fw_name, - rtlpriv->io.dev, GFP_KERNEL, hw, - rtl_fw_cb); - if (err) { - pr_err("Failed to request normal firmware!\n"); - vfree(rtlpriv->rtlhal.wowlan_firmware); - vfree(rtlpriv->rtlhal.pfirmware); - return 1; - } + pr_err("Failed to request normal firmware!\n"); + vfree(rtlpriv->rtlhal.wowlan_firmware); + vfree(rtlpriv->rtlhal.pfirmware); + return 1; } /*load wowlan firmware*/ pr_info("Using firmware %s\n", wowlan_fw_name); @@ -331,6 +323,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .bar_id = 2, .write_readback = true, .name = "rtl8821ae_pci", + .alt_fw_name = "rtlwifi/rtl8821aefw.bin", .ops = &rtl8821ae_hal_ops, .mod_params = &rtl8821ae_mod_params, .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, -- cgit v1.2.3-55-g7522 From 4dd07d2b58d5f3c343ee6e4c8ba63edc643cc972 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:43 +0300 Subject: qtnfmac: updates for regulatory support On startup driver obtains regulatory rules from firmware and enables them during wiphy registration. Later on regulatory domain change can be requested by host. In this case firmware is notified about the upcoming changes. If the change is valid, then firmware updates hardware channel configuration and host driver receives updated channel info for each band. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 81 ++----- drivers/net/wireless/quantenna/qtnfmac/commands.c | 278 ++++++++++++++++++---- drivers/net/wireless/quantenna/qtnfmac/commands.h | 1 + drivers/net/wireless/quantenna/qtnfmac/core.c | 3 + drivers/net/wireless/quantenna/qtnfmac/core.h | 7 +- drivers/net/wireless/quantenna/qtnfmac/qlink.h | 126 +++++++++- 6 files changed, 388 insertions(+), 108 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index e3c090008125..7f70b0aae069 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -700,66 +700,43 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .disconnect = qtnf_disconnect }; -static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy, +static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, struct regulatory_request *req) { - struct qtnf_wmac *mac = wiphy_priv(wiphy); - struct qtnf_bus *bus; - struct qtnf_vif *vif; - struct qtnf_wmac *chan_mac; - int i; + struct qtnf_wmac *mac = wiphy_priv(wiphy_in); + struct qtnf_bus *bus = mac->bus; + struct wiphy *wiphy; + unsigned int mac_idx; enum nl80211_band band; - - bus = mac->bus; + int ret; pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator, req->alpha2[0], req->alpha2[1]); - vif = qtnf_mac_get_base_vif(mac); - if (!vif) { - pr_err("MAC%u: primary VIF is not configured\n", mac->macid); - return; - } - - /* ignore non-ISO3166 country codes */ - for (i = 0; i < sizeof(req->alpha2); i++) { - if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { - pr_err("MAC%u: not an ISO3166 code\n", mac->macid); - return; - } - } - if (!strncasecmp(req->alpha2, bus->hw_info.alpha2_code, - sizeof(req->alpha2))) { - pr_warn("MAC%u: unchanged country code\n", mac->macid); - return; - } - - if (qtnf_cmd_send_regulatory_config(mac, req->alpha2)) { - pr_err("MAC%u: failed to configure regulatory\n", mac->macid); + ret = qtnf_cmd_reg_notify(bus, req); + if (ret) { + if (ret != -EOPNOTSUPP && ret != -EALREADY) + pr_err("failed to update reg domain to %c%c\n", + req->alpha2[0], req->alpha2[1]); return; } - for (i = 0; i < bus->hw_info.num_mac; i++) { - chan_mac = bus->mac[i]; - - if (!chan_mac) + for (mac_idx = 0; mac_idx < QTNF_MAX_MAC; ++mac_idx) { + if (!(bus->hw_info.mac_bitmap & (1 << mac_idx))) continue; - if (!(bus->hw_info.mac_bitmap & BIT(i))) - continue; + mac = bus->mac[mac_idx]; + wiphy = priv_to_wiphy(mac); for (band = 0; band < NUM_NL80211_BANDS; ++band) { if (!wiphy->bands[band]) continue; - if (qtnf_cmd_get_mac_chan_info(chan_mac, - wiphy->bands[band])) { - pr_err("MAC%u: can't get channel info\n", - chan_mac->macid); - qtnf_core_detach(bus); - - return; - } + ret = qtnf_cmd_get_mac_chan_info(mac, + wiphy->bands[band]); + if (ret) + pr_err("failed to get chan info for mac %u band %u\n", + mac_idx, band); } } } @@ -844,10 +821,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) } iface_comb = kzalloc(sizeof(*iface_comb), GFP_KERNEL); - if (!iface_comb) { - ret = -ENOMEM; - goto out; - } + if (!iface_comb) + return -ENOMEM; ret = qtnf_wiphy_setup_if_comb(wiphy, iface_comb, &mac->macinfo); if (ret) @@ -889,21 +864,17 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) ether_addr_copy(wiphy->perm_addr, mac->macaddr); if (hw_info->hw_capab & QLINK_HW_SUPPORTS_REG_UPDATE) { - pr_debug("device supports REG_UPDATE\n"); + wiphy->regulatory_flags |= REGULATORY_STRICT_REG | + REGULATORY_CUSTOM_REG; wiphy->reg_notifier = qtnf_cfg80211_reg_notifier; - pr_debug("hint regulatory about EP region: %c%c\n", - hw_info->alpha2_code[0], - hw_info->alpha2_code[1]); - regulatory_hint(wiphy, hw_info->alpha2_code); + wiphy_apply_custom_regulatory(wiphy, hw_info->rd); } else { - pr_debug("device doesn't support REG_UPDATE\n"); wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; } ret = wiphy_register(wiphy); - out: - if (ret < 0) { + if (ret) { kfree(iface_comb); return ret; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index b39dbc3d3c1f..8be96f02888f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -181,38 +181,6 @@ out: return ret; } -int qtnf_cmd_send_regulatory_config(struct qtnf_wmac *mac, const char *alpha2) -{ - struct sk_buff *cmd_skb; - u16 res_code = QLINK_CMD_RESULT_OK; - int ret; - - cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, - QLINK_CMD_REG_REGION, - sizeof(struct qlink_cmd)); - if (unlikely(!cmd_skb)) - return -ENOMEM; - - qtnf_cmd_skb_put_tlv_arr(cmd_skb, WLAN_EID_COUNTRY, alpha2, - QTNF_MAX_ALPHA_LEN); - - ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); - - if (unlikely(ret)) - goto out; - - if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { - pr_err("MAC%u: CMD failed: %u\n", mac->macid, res_code); - ret = -EFAULT; - goto out; - } - - memcpy(mac->bus->hw_info.alpha2_code, alpha2, - sizeof(mac->bus->hw_info.alpha2_code)); -out: - return ret; -} - int qtnf_cmd_send_config_ap(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; @@ -848,25 +816,168 @@ out: return ret; } +static u32 qtnf_cmd_resp_reg_rule_flags_parse(u32 qflags) +{ + u32 flags = 0; + + if (qflags & QLINK_RRF_NO_OFDM) + flags |= NL80211_RRF_NO_OFDM; + + if (qflags & QLINK_RRF_NO_CCK) + flags |= NL80211_RRF_NO_CCK; + + if (qflags & QLINK_RRF_NO_INDOOR) + flags |= NL80211_RRF_NO_INDOOR; + + if (qflags & QLINK_RRF_NO_OUTDOOR) + flags |= NL80211_RRF_NO_OUTDOOR; + + if (qflags & QLINK_RRF_DFS) + flags |= NL80211_RRF_DFS; + + if (qflags & QLINK_RRF_PTP_ONLY) + flags |= NL80211_RRF_PTP_ONLY; + + if (qflags & QLINK_RRF_PTMP_ONLY) + flags |= NL80211_RRF_PTMP_ONLY; + + if (qflags & QLINK_RRF_NO_IR) + flags |= NL80211_RRF_NO_IR; + + if (qflags & QLINK_RRF_AUTO_BW) + flags |= NL80211_RRF_AUTO_BW; + + if (qflags & QLINK_RRF_IR_CONCURRENT) + flags |= NL80211_RRF_IR_CONCURRENT; + + if (qflags & QLINK_RRF_NO_HT40MINUS) + flags |= NL80211_RRF_NO_HT40MINUS; + + if (qflags & QLINK_RRF_NO_HT40PLUS) + flags |= NL80211_RRF_NO_HT40PLUS; + + if (qflags & QLINK_RRF_NO_80MHZ) + flags |= NL80211_RRF_NO_80MHZ; + + if (qflags & QLINK_RRF_NO_160MHZ) + flags |= NL80211_RRF_NO_160MHZ; + + return flags; +} + static int qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus, - const struct qlink_resp_get_hw_info *resp) + const struct qlink_resp_get_hw_info *resp, + size_t info_len) { struct qtnf_hw_info *hwinfo = &bus->hw_info; + const struct qlink_tlv_hdr *tlv; + const struct qlink_tlv_reg_rule *tlv_rule; + struct ieee80211_reg_rule *rule; + u16 tlv_type; + u16 tlv_value_len; + unsigned int rule_idx = 0; + + if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) + return -E2BIG; + + hwinfo->rd = kzalloc(sizeof(*hwinfo->rd) + + sizeof(struct ieee80211_reg_rule) + * resp->n_reg_rules, GFP_KERNEL); + + if (!hwinfo->rd) + return -ENOMEM; hwinfo->num_mac = resp->num_mac; hwinfo->mac_bitmap = resp->mac_bitmap; hwinfo->fw_ver = le32_to_cpu(resp->fw_ver); hwinfo->ql_proto_ver = le16_to_cpu(resp->ql_proto_ver); - memcpy(hwinfo->alpha2_code, resp->alpha2_code, - sizeof(hwinfo->alpha2_code)); hwinfo->total_tx_chain = resp->total_tx_chain; hwinfo->total_rx_chain = resp->total_rx_chain; hwinfo->hw_capab = le32_to_cpu(resp->hw_capab); + hwinfo->rd->n_reg_rules = resp->n_reg_rules; + hwinfo->rd->alpha2[0] = resp->alpha2[0]; + hwinfo->rd->alpha2[1] = resp->alpha2[1]; + + switch (resp->dfs_region) { + case QLINK_DFS_FCC: + hwinfo->rd->dfs_region = NL80211_DFS_FCC; + break; + case QLINK_DFS_ETSI: + hwinfo->rd->dfs_region = NL80211_DFS_ETSI; + break; + case QLINK_DFS_JP: + hwinfo->rd->dfs_region = NL80211_DFS_JP; + break; + case QLINK_DFS_UNSET: + default: + hwinfo->rd->dfs_region = NL80211_DFS_UNSET; + break; + } + + tlv = (const struct qlink_tlv_hdr *)resp->info; + + while (info_len >= sizeof(*tlv)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_value_len = le16_to_cpu(tlv->len); + + if (tlv_value_len + sizeof(*tlv) > info_len) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + + switch (tlv_type) { + case QTN_TLV_ID_REG_RULE: + if (rule_idx >= resp->n_reg_rules) { + pr_warn("unexpected number of rules: %u\n", + resp->n_reg_rules); + return -EINVAL; + } + + if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + + tlv_rule = (const struct qlink_tlv_reg_rule *)tlv; + rule = &hwinfo->rd->reg_rules[rule_idx++]; + + rule->freq_range.start_freq_khz = + le32_to_cpu(tlv_rule->start_freq_khz); + rule->freq_range.end_freq_khz = + le32_to_cpu(tlv_rule->end_freq_khz); + rule->freq_range.max_bandwidth_khz = + le32_to_cpu(tlv_rule->max_bandwidth_khz); + rule->power_rule.max_antenna_gain = + le32_to_cpu(tlv_rule->max_antenna_gain); + rule->power_rule.max_eirp = + le32_to_cpu(tlv_rule->max_eirp); + rule->dfs_cac_ms = + le32_to_cpu(tlv_rule->dfs_cac_ms); + rule->flags = qtnf_cmd_resp_reg_rule_flags_parse( + le32_to_cpu(tlv_rule->flags)); + break; + default: + break; + } + + info_len -= tlv_value_len + sizeof(*tlv); + tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + } + + if (rule_idx != resp->n_reg_rules) { + pr_warn("unexpected number of rules: expected %u got %u\n", + resp->n_reg_rules, rule_idx); + kfree(hwinfo->rd); + hwinfo->rd = NULL; + return -EINVAL; + } pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u\n", hwinfo->fw_ver, hwinfo->mac_bitmap, - hwinfo->alpha2_code[0], hwinfo->alpha2_code[1], + hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1], hwinfo->total_tx_chain, hwinfo->total_rx_chain); return 0; @@ -1013,14 +1124,24 @@ qtnf_cmd_resp_fill_channels_info(struct ieee80211_supported_band *band, unsigned int chidx = 0; u32 qflags; - kfree(band->channels); - band->channels = NULL; + if (band->channels) { + if (band->n_channels == resp->num_chans) { + memset(band->channels, 0, + sizeof(*band->channels) * band->n_channels); + } else { + kfree(band->channels); + band->n_channels = 0; + band->channels = NULL; + } + } band->n_channels = resp->num_chans; if (band->n_channels == 0) return 0; - band->channels = kcalloc(band->n_channels, sizeof(*chan), GFP_KERNEL); + if (!band->channels) + band->channels = kcalloc(band->n_channels, sizeof(*chan), + GFP_KERNEL); if (!band->channels) { band->n_channels = 0; return -ENOMEM; @@ -1256,6 +1377,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) const struct qlink_resp_get_hw_info *resp; u16 res_code = QLINK_CMD_RESULT_OK; int ret = 0; + size_t info_len; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, QLINK_CMD_GET_HW_INFO, @@ -1266,7 +1388,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) qtnf_bus_lock(bus); ret = qtnf_cmd_send_with_reply(bus, cmd_skb, &resp_skb, &res_code, - sizeof(*resp), NULL); + sizeof(*resp), &info_len); if (unlikely(ret)) goto out; @@ -1278,7 +1400,7 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) } resp = (const struct qlink_resp_get_hw_info *)resp_skb->data; - ret = qtnf_cmd_resp_proc_hw_info(bus, resp); + ret = qtnf_cmd_resp_proc_hw_info(bus, resp, info_len); out: qtnf_bus_unlock(bus); @@ -1976,3 +2098,77 @@ out: qtnf_bus_unlock(vif->mac->bus); return ret; } + +int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req) +{ + struct sk_buff *cmd_skb; + int ret; + u16 res_code; + struct qlink_cmd_reg_notify *cmd; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, + QLINK_CMD_REG_NOTIFY, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + cmd = (struct qlink_cmd_reg_notify *)cmd_skb->data; + cmd->alpha2[0] = req->alpha2[0]; + cmd->alpha2[1] = req->alpha2[1]; + + switch (req->initiator) { + case NL80211_REGDOM_SET_BY_CORE: + cmd->initiator = QLINK_REGDOM_SET_BY_CORE; + break; + case NL80211_REGDOM_SET_BY_USER: + cmd->initiator = QLINK_REGDOM_SET_BY_USER; + break; + case NL80211_REGDOM_SET_BY_DRIVER: + cmd->initiator = QLINK_REGDOM_SET_BY_DRIVER; + break; + case NL80211_REGDOM_SET_BY_COUNTRY_IE: + cmd->initiator = QLINK_REGDOM_SET_BY_COUNTRY_IE; + break; + } + + switch (req->user_reg_hint_type) { + case NL80211_USER_REG_HINT_USER: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_USER; + break; + case NL80211_USER_REG_HINT_CELL_BASE: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_CELL_BASE; + break; + case NL80211_USER_REG_HINT_INDOOR: + cmd->user_reg_hint_type = QLINK_USER_REG_HINT_INDOOR; + break; + } + + qtnf_bus_lock(bus); + + ret = qtnf_cmd_send(bus, cmd_skb, &res_code); + if (ret) + goto out; + + switch (res_code) { + case QLINK_CMD_RESULT_ENOTSUPP: + pr_warn("reg update not supported\n"); + ret = -EOPNOTSUPP; + break; + case QLINK_CMD_RESULT_EALREADY: + pr_info("regulatory domain is already set to %c%c", + req->alpha2[0], req->alpha2[1]); + ret = -EALREADY; + break; + case QLINK_CMD_RESULT_OK: + ret = 0; + break; + default: + ret = -EFAULT; + break; + } + +out: + qtnf_bus_unlock(bus); + + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 6c51854ef5e7..155b265d42bf 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -70,5 +70,6 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, u16 reason_code); int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up); +int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index f053532c0e87..17d17e332a8b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -549,6 +549,9 @@ void qtnf_core_detach(struct qtnf_bus *bus) destroy_workqueue(bus->workqueue); } + kfree(bus->hw_info.rd); + bus->hw_info.rd = NULL; + qtnf_trans_free(bus); } EXPORT_SYMBOL_GPL(qtnf_core_detach); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index a616434281cf..31b7ec2bfd3e 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -42,7 +42,6 @@ #define QTNF_MAX_SSID_LIST_LENGTH 2 #define QTNF_MAX_VSIE_LEN 255 -#define QTNF_MAX_ALPHA_LEN 2 #define QTNF_MAX_INTF 8 #define QTNF_MAX_EVENT_QUEUE_LEN 255 #define QTNF_DEFAULT_BG_SCAN_PERIOD 300 @@ -136,14 +135,14 @@ struct qtnf_wmac { }; struct qtnf_hw_info { + u16 ql_proto_ver; u8 num_mac; u8 mac_bitmap; - u8 alpha2_code[QTNF_MAX_ALPHA_LEN]; u32 fw_ver; - u16 ql_proto_ver; + u32 hw_capab; + struct ieee80211_regdomain *rd; u8 total_tx_chain; u8 total_rx_chain; - u32 hw_capab; }; struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 6eafc15e0065..e27833b78940 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -19,7 +19,7 @@ #include -#define QLINK_PROTO_VER 3 +#define QLINK_PROTO_VER 4 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -133,6 +133,9 @@ enum qlink_channel_width { * number of operational channels and information on each of the channel. * This command is generic to a specified MAC, interface index must be set * to QLINK_VIFID_RSVD in command header. + * @QLINK_CMD_REG_NOTIFY: notify device about regulatory domain change. This + * command is supported only if device reports QLINK_HW_SUPPORTS_REG_UPDATE + * capability. */ enum qlink_cmd_type { QLINK_CMD_FW_INIT = 0x0001, @@ -148,7 +151,7 @@ enum qlink_cmd_type { QLINK_CMD_DEL_INTF = 0x0016, QLINK_CMD_CHANGE_INTF = 0x0017, QLINK_CMD_UPDOWN_INTF = 0x0018, - QLINK_CMD_REG_REGION = 0x0019, + QLINK_CMD_REG_NOTIFY = 0x0019, QLINK_CMD_CHANS_INFO_GET = 0x001A, QLINK_CMD_CONFIG_AP = 0x0020, QLINK_CMD_START_AP = 0x0021, @@ -430,6 +433,44 @@ struct qlink_cmd_chans_info_get { u8 band; } __packed; +/** + * enum qlink_reg_initiator - Indicates the initiator of a reg domain request + * + * See &enum nl80211_reg_initiator for more info. + */ +enum qlink_reg_initiator { + QLINK_REGDOM_SET_BY_CORE, + QLINK_REGDOM_SET_BY_USER, + QLINK_REGDOM_SET_BY_DRIVER, + QLINK_REGDOM_SET_BY_COUNTRY_IE, +}; + +/** + * enum qlink_user_reg_hint_type - type of user regulatory hint + * + * See &enum nl80211_user_reg_hint_type for more info. + */ +enum qlink_user_reg_hint_type { + QLINK_USER_REG_HINT_USER = 0, + QLINK_USER_REG_HINT_CELL_BASE = 1, + QLINK_USER_REG_HINT_INDOOR = 2, +}; + +/** + * struct qlink_cmd_reg_notify - data for QLINK_CMD_REG_NOTIFY command + * + * @alpha2: the ISO / IEC 3166 alpha2 country code. + * @initiator: which entity sent the request, one of &enum qlink_reg_initiator. + * @user_reg_hint_type: type of hint for QLINK_REGDOM_SET_BY_USER request, one + * of &enum qlink_user_reg_hint_type. + */ +struct qlink_cmd_reg_notify { + struct qlink_cmd chdr; + u8 alpha2[2]; + u8 initiator; + u8 user_reg_hint_type; +} __packed; + /* QLINK Command Responses messages related definitions */ @@ -438,6 +479,7 @@ enum qlink_cmd_result { QLINK_CMD_RESULT_INVALID, QLINK_CMD_RESULT_ENOTSUPP, QLINK_CMD_RESULT_ENOTFOUND, + QLINK_CMD_RESULT_EALREADY, }; /** @@ -496,6 +538,18 @@ struct qlink_resp_get_mac_info { u8 var_info[0]; } __packed; +/** + * enum qlink_dfs_regions - regulatory DFS regions + * + * Corresponds to &enum nl80211_dfs_regions. + */ +enum qlink_dfs_regions { + QLINK_DFS_UNSET = 0, + QLINK_DFS_FCC = 1, + QLINK_DFS_ETSI = 2, + QLINK_DFS_JP = 3, +}; + /** * struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command * @@ -504,22 +558,29 @@ struct qlink_resp_get_mac_info { * @fw_ver: wireless hardware firmware version. * @hw_capab: Bitmap of capabilities supported by firmware. * @ql_proto_ver: Version of QLINK protocol used by firmware. - * @country_code: country code ID firmware is configured to. * @num_mac: Number of separate physical radio devices provided by hardware. * @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware. * @total_tx_chains: total number of transmit chains used by device. * @total_rx_chains: total number of receive chains. + * @alpha2: country code ID firmware is configured to. + * @n_reg_rules: number of regulatory rules TLVs in variable portion of the + * message. + * @dfs_region: regulatory DFS region, one of @enum qlink_dfs_region. + * @info: variable-length HW info, can contain QTN_TLV_ID_REG_RULE. */ struct qlink_resp_get_hw_info { struct qlink_resp rhdr; __le32 fw_ver; __le32 hw_capab; __le16 ql_proto_ver; - u8 alpha2_code[2]; u8 num_mac; u8 mac_bitmap; u8 total_tx_chain; u8 total_rx_chain; + u8 alpha2[2]; + u8 n_reg_rules; + u8 dfs_region; + u8 info[0]; } __packed; /** @@ -741,6 +802,7 @@ enum qlink_tlv_id { QTN_TLV_ID_LRETRY_LIMIT = 0x0204, QTN_TLV_ID_BCN_PERIOD = 0x0205, QTN_TLV_ID_DTIM = 0x0206, + QTN_TLV_ID_REG_RULE = 0x0207, QTN_TLV_ID_CHANNEL = 0x020F, QTN_TLV_ID_COVERAGE_CLASS = 0x0213, QTN_TLV_ID_IFACE_LIMIT = 0x0214, @@ -844,12 +906,54 @@ struct qlink_tlv_cclass { u8 cclass; } __packed; -enum qlink_dfs_state { - QLINK_DFS_USABLE, - QLINK_DFS_UNAVAILABLE, - QLINK_DFS_AVAILABLE, +/** + * enum qlink_reg_rule_flags - regulatory rule flags + * + * See description of &enum nl80211_reg_rule_flags + */ +enum qlink_reg_rule_flags { + QLINK_RRF_NO_OFDM = BIT(0), + QLINK_RRF_NO_CCK = BIT(1), + QLINK_RRF_NO_INDOOR = BIT(2), + QLINK_RRF_NO_OUTDOOR = BIT(3), + QLINK_RRF_DFS = BIT(4), + QLINK_RRF_PTP_ONLY = BIT(5), + QLINK_RRF_PTMP_ONLY = BIT(6), + QLINK_RRF_NO_IR = BIT(7), + QLINK_RRF_AUTO_BW = BIT(8), + QLINK_RRF_IR_CONCURRENT = BIT(9), + QLINK_RRF_NO_HT40MINUS = BIT(10), + QLINK_RRF_NO_HT40PLUS = BIT(11), + QLINK_RRF_NO_80MHZ = BIT(12), + QLINK_RRF_NO_160MHZ = BIT(13), }; +/** + * struct qlink_tlv_reg_rule - data for QTN_TLV_ID_REG_RULE TLV + * + * Regulatory rule description. + * + * @start_freq_khz: start frequency of the range the rule is attributed to. + * @end_freq_khz: end frequency of the range the rule is attributed to. + * @max_bandwidth_khz: max bandwidth that channels in specified range can be + * configured to. + * @max_antenna_gain: max antenna gain that can be used in the specified + * frequency range, dBi. + * @max_eirp: maximum EIRP. + * @flags: regulatory rule flags in &enum qlink_reg_rule_flags. + * @dfs_cac_ms: DFS CAC period. + */ +struct qlink_tlv_reg_rule { + struct qlink_tlv_hdr hdr; + __le32 start_freq_khz; + __le32 end_freq_khz; + __le32 max_bandwidth_khz; + __le32 max_antenna_gain; + __le32 max_eirp; + __le32 flags; + __le32 dfs_cac_ms; +} __packed; + enum qlink_channel_flags { QLINK_CHAN_DISABLED = BIT(0), QLINK_CHAN_NO_IR = BIT(1), @@ -865,6 +969,12 @@ enum qlink_channel_flags { QLINK_CHAN_NO_10MHZ = BIT(12), }; +enum qlink_dfs_state { + QLINK_DFS_USABLE, + QLINK_DFS_UNAVAILABLE, + QLINK_DFS_AVAILABLE, +}; + struct qlink_tlv_channel { struct qlink_tlv_hdr hdr; __le16 hw_value; -- cgit v1.2.3-55-g7522 From ea19479f68496f729ddf3f532ec94c6688279eec Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:44 +0300 Subject: qtnfmac: regulatory configuration for self-managed setup Regdomain information needs to be registered with cfg80211 for devices with REGULATORY_WIPHY_SELF_MANAGED flag set. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 7f70b0aae069..e288b1d4432a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -873,6 +873,15 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) } ret = wiphy_register(wiphy); + if (ret < 0) + goto out; + + if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) + ret = regulatory_set_wiphy_regd(wiphy, hw_info->rd); + else if (isalpha(hw_info->rd->alpha2[0]) && + isalpha(hw_info->rd->alpha2[1])) + ret = regulatory_hint(wiphy, hw_info->rd->alpha2); + out: if (ret) { kfree(iface_comb); -- cgit v1.2.3-55-g7522 From 9ef7509505c880f80a99ccd4e593fa181836da2e Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:45 +0300 Subject: qtnfmac: add missing bus lock Add missing bus lock into get_mac_chan_info command. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/commands.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 8be96f02888f..f5a294f3c2a4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1442,6 +1442,9 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac, cmd = (struct qlink_cmd_chans_info_get *)cmd_skb->data; cmd->band = qband; + + qtnf_bus_lock(mac->bus); + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, sizeof(*resp), &info_len); @@ -1465,6 +1468,7 @@ int qtnf_cmd_get_mac_chan_info(struct qtnf_wmac *mac, ret = qtnf_cmd_resp_fill_channels_info(band, resp, info_len); out: + qtnf_bus_unlock(mac->bus); consume_skb(resp_skb); return ret; -- cgit v1.2.3-55-g7522 From 7c04b43984a57bd820d8e646d089d9ae9984c77a Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:46 +0300 Subject: qtnfmac: implement cfg80211 dump_survey handler This patch implements cfg80211 dump_survey handler enabling per-channel survey data reports. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 69 +++++++++++++- drivers/net/wireless/quantenna/qtnfmac/commands.c | 107 ++++++++++++++++++++++ drivers/net/wireless/quantenna/qtnfmac/commands.h | 2 + drivers/net/wireless/quantenna/qtnfmac/core.h | 9 ++ drivers/net/wireless/quantenna/qtnfmac/qlink.h | 31 +++++++ 5 files changed, 217 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index e288b1d4432a..23f180b7d43c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -677,6 +677,72 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev, return 0; } +static int +qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, + int idx, struct survey_info *survey) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct ieee80211_supported_band *sband; + struct ieee80211_channel *chan; + struct qtnf_chan_stats stats; + int ret; + + sband = wiphy->bands[NL80211_BAND_2GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + if (!sband) + sband = wiphy->bands[NL80211_BAND_5GHZ]; + + if (!sband || idx >= sband->n_channels) + return -ENOENT; + + chan = &sband->channels[idx]; + memset(&stats, 0, sizeof(stats)); + + survey->channel = chan; + survey->filled = 0x0; + + ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats); + switch (ret) { + case 0: + if (unlikely(stats.chan_num != chan->hw_value)) { + pr_err("received stats for channel %d instead of %d\n", + stats.chan_num, chan->hw_value); + ret = -EINVAL; + break; + } + + survey->filled = SURVEY_INFO_TIME | + SURVEY_INFO_TIME_SCAN | + SURVEY_INFO_TIME_BUSY | + SURVEY_INFO_TIME_RX | + SURVEY_INFO_TIME_TX | + SURVEY_INFO_NOISE_DBM; + + survey->time_scan = stats.cca_try; + survey->time = stats.cca_try; + survey->time_tx = stats.cca_tx; + survey->time_rx = stats.cca_rx; + survey->time_busy = stats.cca_busy; + survey->noise = stats.chan_noise; + break; + case -ENOENT: + pr_debug("no stats for channel %u\n", chan->hw_value); + ret = 0; + break; + default: + pr_debug("failed to get chan(%d) stats from card\n", + chan->hw_value); + ret = -EINVAL; + break; + } + + return ret; +} + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -697,7 +763,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .set_default_mgmt_key = qtnf_set_default_mgmt_key, .scan = qtnf_scan, .connect = qtnf_connect, - .disconnect = qtnf_disconnect + .disconnect = qtnf_disconnect, + .dump_survey = qtnf_dump_survey }; static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index f5a294f3c2a4..a1ce12082e10 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1333,6 +1333,62 @@ static int qtnf_cmd_resp_proc_phy_params(struct qtnf_wmac *mac, return 0; } +static int +qtnf_cmd_resp_proc_chan_stat_info(struct qtnf_chan_stats *stats, + const u8 *payload, size_t payload_len) +{ + struct qlink_chan_stats *qlink_stats; + const struct qlink_tlv_hdr *tlv; + size_t tlv_full_len; + u16 tlv_value_len; + u16 tlv_type; + + tlv = (struct qlink_tlv_hdr *)payload; + while (payload_len >= sizeof(struct qlink_tlv_hdr)) { + tlv_type = le16_to_cpu(tlv->type); + tlv_value_len = le16_to_cpu(tlv->len); + tlv_full_len = tlv_value_len + sizeof(struct qlink_tlv_hdr); + if (tlv_full_len > payload_len) { + pr_warn("malformed TLV 0x%.2X; LEN: %u\n", + tlv_type, tlv_value_len); + return -EINVAL; + } + switch (tlv_type) { + case QTN_TLV_ID_CHANNEL_STATS: + if (unlikely(tlv_value_len != sizeof(*qlink_stats))) { + pr_err("invalid CHANNEL_STATS entry size\n"); + return -EINVAL; + } + + qlink_stats = (void *)tlv->val; + + stats->chan_num = le32_to_cpu(qlink_stats->chan_num); + stats->cca_tx = le32_to_cpu(qlink_stats->cca_tx); + stats->cca_rx = le32_to_cpu(qlink_stats->cca_rx); + stats->cca_busy = le32_to_cpu(qlink_stats->cca_busy); + stats->cca_try = le32_to_cpu(qlink_stats->cca_try); + stats->chan_noise = qlink_stats->chan_noise; + + pr_debug("chan(%u) try(%u) busy(%u) noise(%d)\n", + stats->chan_num, stats->cca_try, + stats->cca_busy, stats->chan_noise); + break; + default: + pr_warn("Unknown TLV type: %#x\n", + le16_to_cpu(tlv->type)); + } + payload_len -= tlv_full_len; + tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); + } + + if (payload_len) { + pr_warn("malformed TLV buf; bytes left: %zu\n", payload_len); + return -EINVAL; + } + + return 0; +} + int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb, *resp_skb = NULL; @@ -2176,3 +2232,54 @@ out: return ret; } + +int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, + struct qtnf_chan_stats *stats) +{ + struct sk_buff *cmd_skb, *resp_skb = NULL; + struct qlink_cmd_get_chan_stats *cmd; + struct qlink_resp_get_chan_stats *resp; + size_t var_data_len; + u16 res_code = QLINK_CMD_RESULT_OK; + int ret = 0; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, + QLINK_CMD_CHAN_STATS, + sizeof(*cmd)); + if (!cmd_skb) + return -ENOMEM; + + qtnf_bus_lock(mac->bus); + + cmd = (struct qlink_cmd_get_chan_stats *)cmd_skb->data; + cmd->channel = cpu_to_le16(channel); + + ret = qtnf_cmd_send_with_reply(mac->bus, cmd_skb, &resp_skb, &res_code, + sizeof(*resp), &var_data_len); + if (unlikely(ret)) { + qtnf_bus_unlock(mac->bus); + return ret; + } + + if (unlikely(res_code != QLINK_CMD_RESULT_OK)) { + switch (res_code) { + case QLINK_CMD_RESULT_ENOTFOUND: + ret = -ENOENT; + break; + default: + pr_err("cmd exec failed: 0x%.4X\n", res_code); + ret = -EFAULT; + break; + } + goto out; + } + + resp = (struct qlink_resp_get_chan_stats *)resp_skb->data; + ret = qtnf_cmd_resp_proc_chan_stat_info(stats, resp->info, + var_data_len); + +out: + qtnf_bus_unlock(mac->bus); + consume_skb(resp_skb); + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 155b265d42bf..41e2d50988b7 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -71,5 +71,7 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif, int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, bool up); int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req); +int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, + struct qtnf_chan_stats *stats); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 31b7ec2bfd3e..b2d050c4f1e2 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -124,6 +124,15 @@ struct qtnf_mac_info { size_t n_limits; }; +struct qtnf_chan_stats { + u32 chan_num; + u32 cca_tx; + u32 cca_rx; + u32 cca_busy; + u32 cca_try; + s8 chan_noise; +}; + struct qtnf_wmac { u8 macid; u8 wiphy_registered; diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index e27833b78940..dd01c0b4f632 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -164,6 +164,7 @@ enum qlink_cmd_type { QLINK_CMD_CHANGE_STA = 0x0051, QLINK_CMD_DEL_STA = 0x0052, QLINK_CMD_SCAN = 0x0053, + QLINK_CMD_CHAN_STATS = 0x0054, QLINK_CMD_CONNECT = 0x0060, QLINK_CMD_DISCONNECT = 0x0061, }; @@ -433,6 +434,16 @@ struct qlink_cmd_chans_info_get { u8 band; } __packed; +/** + * struct qlink_cmd_get_chan_stats - data for QLINK_CMD_CHAN_STATS command + * + * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J + */ +struct qlink_cmd_get_chan_stats { + struct qlink_cmd chdr; + __le16 channel; +} __packed; + /** * enum qlink_reg_initiator - Indicates the initiator of a reg domain request * @@ -635,6 +646,16 @@ struct qlink_resp_phy_params { u8 info[0]; } __packed; +/** + * struct qlink_resp_get_chan_stats - response for QLINK_CMD_CHAN_STATS cmd + * + * @info: variable-length channel info. + */ +struct qlink_resp_get_chan_stats { + struct qlink_cmd rhdr; + u8 info[0]; +} __packed; + /* QLINK Events messages related definitions */ @@ -807,6 +828,7 @@ enum qlink_tlv_id { QTN_TLV_ID_COVERAGE_CLASS = 0x0213, QTN_TLV_ID_IFACE_LIMIT = 0x0214, QTN_TLV_ID_NUM_IFACE_COMB = 0x0215, + QTN_TLV_ID_CHANNEL_STATS = 0x0216, QTN_TLV_ID_STA_BASIC_COUNTERS = 0x0300, QTN_TLV_ID_STA_GENERIC_INFO = 0x0301, QTN_TLV_ID_KEY = 0x0302, @@ -1008,4 +1030,13 @@ struct qlink_auth_encr { u8 control_port_no_encrypt; } __packed; +struct qlink_chan_stats { + __le32 chan_num; + __le32 cca_tx; + __le32 cca_rx; + __le32 cca_busy; + __le32 cca_try; + s8 chan_noise; +} __packed; + #endif /* _QTN_QLINK_H_ */ -- cgit v1.2.3-55-g7522 From 278944482b2970b925d1f5b837e6618764a36125 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:47 +0300 Subject: qtnfmac: implement reporting current channel Implement current channel reporting functionality. Current operating channel can be obtained either directly using cfg80211 get_channel callback or from stats reported by cfg80211 survey_dump callback. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 62 ++++++++++++++++++++++- drivers/net/wireless/quantenna/qtnfmac/commands.c | 2 +- drivers/net/wireless/quantenna/qtnfmac/qlink.h | 2 +- 3 files changed, 62 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 23f180b7d43c..ef8089c163bc 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -593,6 +593,7 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + struct cfg80211_chan_def chandef; struct qtnf_bss_config *bss_cfg; int ret; @@ -605,9 +606,20 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, bss_cfg = &vif->bss_cfg; memset(bss_cfg, 0, sizeof(*bss_cfg)); + if (sme->channel) { + /* FIXME: need to set proper nl80211_channel_type value */ + cfg80211_chandef_create(&chandef, sme->channel, + NL80211_CHAN_HT20); + /* fall-back to minimal safe chandef description */ + if (!cfg80211_chandef_valid(&chandef)) + cfg80211_chandef_create(&chandef, sme->channel, + NL80211_CHAN_HT20); + + memcpy(&bss_cfg->chandef, &chandef, sizeof(bss_cfg->chandef)); + } + bss_cfg->ssid_len = sme->ssid_len; memcpy(&bss_cfg->ssid, sme->ssid, bss_cfg->ssid_len); - bss_cfg->chandef.chan = sme->channel; bss_cfg->auth_type = sme->auth_type; bss_cfg->privacy = sme->privacy; bss_cfg->mfp = sme->mfp; @@ -683,10 +695,15 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, { struct qtnf_wmac *mac = wiphy_priv(wiphy); struct ieee80211_supported_band *sband; + struct cfg80211_chan_def *bss_chandef; struct ieee80211_channel *chan; struct qtnf_chan_stats stats; + struct qtnf_vif *vif; int ret; + vif = qtnf_netdev_get_priv(dev); + bss_chandef = &vif->bss_cfg.chandef; + sband = wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { idx -= sband->n_channels; @@ -705,6 +722,10 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, survey->channel = chan; survey->filled = 0x0; + if (bss_chandef->chan) + if (chan->hw_value == bss_chandef->chan->hw_value) + survey->filled |= SURVEY_INFO_IN_USE; + ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats); switch (ret) { case 0: @@ -743,6 +764,42 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, return ret; } +static int +qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, + struct cfg80211_chan_def *chandef) +{ + struct net_device *ndev = wdev->netdev; + struct qtnf_bss_config *bss_cfg; + struct qtnf_vif *vif; + + if (!ndev) + return -ENODEV; + + vif = qtnf_netdev_get_priv(wdev->netdev); + bss_cfg = &vif->bss_cfg; + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_STATION: + if (vif->sta_state == QTNF_STA_DISCONNECTED) { + pr_warn("%s: STA disconnected\n", ndev->name); + return -ENODATA; + } + break; + case NL80211_IFTYPE_AP: + if (!(vif->bss_status & QTNF_STATE_AP_START)) { + pr_warn("%s: AP not started\n", ndev->name); + return -ENODATA; + } + break; + default: + pr_err("unsupported vif type (%d)\n", vif->wdev.iftype); + return -ENODATA; + } + + memcpy(chandef, &bss_cfg->chandef, sizeof(*chandef)); + return 0; +} + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -764,7 +821,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .scan = qtnf_scan, .connect = qtnf_connect, .disconnect = qtnf_disconnect, - .dump_survey = qtnf_dump_survey + .dump_survey = qtnf_dump_survey, + .get_channel = qtnf_get_channel }; static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index a1ce12082e10..c8c0685f81b3 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -2036,7 +2036,7 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, ether_addr_copy(cmd->bssid, bss_cfg->bssid); if (bss_cfg->chandef.chan) - cmd->freq = cpu_to_le16(bss_cfg->chandef.chan->center_freq); + cmd->channel = cpu_to_le16(bss_cfg->chandef.chan->hw_value); cmd->bg_scan_period = cpu_to_le16(bss_cfg->bg_scan_period); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index dd01c0b4f632..6c6940b503a4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -384,7 +384,7 @@ enum qlink_sta_connect_flags { struct qlink_cmd_connect { struct qlink_cmd chdr; __le32 flags; - __le16 freq; + __le16 channel; __le16 bg_scan_period; u8 bssid[ETH_ALEN]; u8 payload[0]; -- cgit v1.2.3-55-g7522 From fd19ecebe03e7d7d468a246b8078d062b8e8db25 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:48 +0300 Subject: qtnfmac: fix station leave reason endianness Use proper endianness conversion for client station leave reason. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/event.c | 4 ++-- drivers/net/wireless/quantenna/qtnfmac/qlink.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 9b61e9a83670..00570de918e6 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -211,8 +211,8 @@ qtnf_event_handle_bss_leave(struct qtnf_vif *vif, pr_debug("VIF%u.%u: disconnected\n", vif->mac->macid, vif->vifid); - cfg80211_disconnected(vif->netdev, leave_info->reason, NULL, 0, 0, - GFP_KERNEL); + cfg80211_disconnected(vif->netdev, le16_to_cpu(leave_info->reason), + NULL, 0, 0, GFP_KERNEL); vif->sta_state = QTNF_STA_DISCONNECTED; netif_carrier_off(vif->netdev); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 6c6940b503a4..5c2d8f0abd7f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -733,7 +733,7 @@ struct qlink_event_bss_join { */ struct qlink_event_bss_leave { struct qlink_event ehdr; - u16 reason; + __le16 reason; } __packed; enum qlink_rxmgmt_flags { -- cgit v1.2.3-55-g7522 From 34f1145b2c5ee7eac353c048e964510e252773d0 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:49 +0300 Subject: qtnfmac: move current channel info from vif to mac Wireless cfg80211 core supplies channel settings in cfg80211_ap_settings structure for each BSS in multiple BSS configuration. On the other hand all the virtual interfaces on one radio are using the same PHY settings including channel. Move chandef structure from vif to mac structure in order to mantain the only instance of cfg80211_chan_def structure in qtnf_wmac rather than its multiple copies in qtnf_vif. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 38 +++++++++++++++-------- drivers/net/wireless/quantenna/qtnfmac/commands.c | 6 ++-- drivers/net/wireless/quantenna/qtnfmac/core.h | 2 +- 3 files changed, 29 insertions(+), 17 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index ef8089c163bc..d47050934f00 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -266,11 +266,19 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + struct qtnf_wmac *mac = wiphy_priv(wiphy); struct qtnf_bss_config *bss_cfg; int ret; - bss_cfg = &vif->bss_cfg; + if (!cfg80211_chandef_identical(&mac->chandef, &settings->chandef)) { + memcpy(&mac->chandef, &settings->chandef, sizeof(mac->chandef)); + if (vif->vifid != 0) + pr_warn("%s: unexpected chan %u (%u MHz)\n", dev->name, + settings->chandef.chan->hw_value, + settings->chandef.chan->center_freq); + } + bss_cfg = &vif->bss_cfg; memset(bss_cfg, 0, sizeof(*bss_cfg)); bss_cfg->bcn_period = settings->beacon_interval; @@ -281,8 +289,6 @@ static int qtnf_start_ap(struct wiphy *wiphy, struct net_device *dev, bss_cfg->ssid_len = settings->ssid_len; memcpy(&bss_cfg->ssid, settings->ssid, bss_cfg->ssid_len); - memcpy(&bss_cfg->chandef, &settings->chandef, - sizeof(struct cfg80211_chan_def)); memcpy(&bss_cfg->crypto, &settings->crypto, sizeof(struct cfg80211_crypto_settings)); @@ -593,6 +599,7 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + struct qtnf_wmac *mac = wiphy_priv(wiphy); struct cfg80211_chan_def chandef; struct qtnf_bss_config *bss_cfg; int ret; @@ -615,7 +622,7 @@ qtnf_connect(struct wiphy *wiphy, struct net_device *dev, cfg80211_chandef_create(&chandef, sme->channel, NL80211_CHAN_HT20); - memcpy(&bss_cfg->chandef, &chandef, sizeof(bss_cfg->chandef)); + memcpy(&mac->chandef, &chandef, sizeof(mac->chandef)); } bss_cfg->ssid_len = sme->ssid_len; @@ -695,14 +702,14 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, { struct qtnf_wmac *mac = wiphy_priv(wiphy); struct ieee80211_supported_band *sband; - struct cfg80211_chan_def *bss_chandef; + struct cfg80211_chan_def *chandef; struct ieee80211_channel *chan; struct qtnf_chan_stats stats; struct qtnf_vif *vif; int ret; vif = qtnf_netdev_get_priv(dev); - bss_chandef = &vif->bss_cfg.chandef; + chandef = &mac->chandef; sband = wiphy->bands[NL80211_BAND_2GHZ]; if (sband && idx >= sband->n_channels) { @@ -722,9 +729,10 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, survey->channel = chan; survey->filled = 0x0; - if (bss_chandef->chan) - if (chan->hw_value == bss_chandef->chan->hw_value) - survey->filled |= SURVEY_INFO_IN_USE; + if (chandef->chan) { + if (chan->hw_value == chandef->chan->hw_value) + survey->filled = SURVEY_INFO_IN_USE; + } ret = qtnf_cmd_get_chan_stats(mac, chan->hw_value, &stats); switch (ret) { @@ -736,7 +744,7 @@ qtnf_dump_survey(struct wiphy *wiphy, struct net_device *dev, break; } - survey->filled = SURVEY_INFO_TIME | + survey->filled |= SURVEY_INFO_TIME | SURVEY_INFO_TIME_SCAN | SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_RX | @@ -768,15 +776,14 @@ static int qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_chan_def *chandef) { + struct qtnf_wmac *mac = wiphy_priv(wiphy); struct net_device *ndev = wdev->netdev; - struct qtnf_bss_config *bss_cfg; struct qtnf_vif *vif; if (!ndev) return -ENODEV; vif = qtnf_netdev_get_priv(wdev->netdev); - bss_cfg = &vif->bss_cfg; switch (vif->wdev.iftype) { case NL80211_IFTYPE_STATION: @@ -796,7 +803,12 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, return -ENODATA; } - memcpy(chandef, &bss_cfg->chandef, sizeof(*chandef)); + if (!cfg80211_chandef_valid(&mac->chandef)) { + pr_err("invalid channel settings on %s\n", ndev->name); + return -ENODATA; + } + + memcpy(chandef, &mac->chandef, sizeof(*chandef)); return 0; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index c8c0685f81b3..a3c3dddb194c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -185,7 +185,7 @@ int qtnf_cmd_send_config_ap(struct qtnf_vif *vif) { struct sk_buff *cmd_skb; struct qtnf_bss_config *bss_cfg = &vif->bss_cfg; - struct cfg80211_chan_def *chandef = &bss_cfg->chandef; + struct cfg80211_chan_def *chandef = &vif->mac->chandef; struct qlink_tlv_channel *qchan; struct qlink_auth_encr aen; u16 res_code = QLINK_CMD_RESULT_OK; @@ -2035,8 +2035,8 @@ int qtnf_cmd_send_connect(struct qtnf_vif *vif, ether_addr_copy(cmd->bssid, bss_cfg->bssid); - if (bss_cfg->chandef.chan) - cmd->channel = cpu_to_le16(bss_cfg->chandef.chan->hw_value); + if (vif->mac->chandef.chan) + cmd->channel = cpu_to_le16(vif->mac->chandef.chan->hw_value); cmd->bg_scan_period = cpu_to_le16(bss_cfg->bg_scan_period); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index b2d050c4f1e2..6830ff45976d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -67,7 +67,6 @@ struct qtnf_bss_config { u16 auth_type; bool privacy; enum nl80211_mfp mfp; - struct cfg80211_chan_def chandef; struct cfg80211_crypto_settings crypto; u16 bg_scan_period; u32 connect_flags; @@ -141,6 +140,7 @@ struct qtnf_wmac { struct qtnf_mac_info macinfo; struct qtnf_vif iflist[QTNF_MAX_INTF]; struct cfg80211_scan_request *scan_req; + struct cfg80211_chan_def chandef; }; struct qtnf_hw_info { -- cgit v1.2.3-55-g7522 From 97883695d596e296bf327b65e1a4db32fa302d16 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:50 +0300 Subject: qtnfmac: implement cfg80211 channel_switch handler This patch implements cfg80211 channel_switch handler enabling CSA channel-switch procedure. Driver performs only basic validation of the requested new channel and then sends command to firmware. Beacon IEs are not sent since beacon update is handled by firmware. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 60 +++++++++++++++++++++- drivers/net/wireless/quantenna/qtnfmac/commands.c | 55 ++++++++++++++++++++ drivers/net/wireless/quantenna/qtnfmac/commands.h | 2 + drivers/net/wireless/quantenna/qtnfmac/core.h | 6 +++ drivers/net/wireless/quantenna/qtnfmac/event.c | 61 +++++++++++++++++++++++ drivers/net/wireless/quantenna/qtnfmac/qlink.h | 28 +++++++++++ 6 files changed, 210 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index d47050934f00..ac8fdc1db482 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -812,6 +812,59 @@ qtnf_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev, return 0; } +static int qtnf_channel_switch(struct wiphy *wiphy, struct net_device *dev, + struct cfg80211_csa_settings *params) +{ + struct qtnf_wmac *mac = wiphy_priv(wiphy); + struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); + int ret; + + pr_debug("%s: chan(%u) count(%u) radar(%u) block_tx(%u)\n", dev->name, + params->chandef.chan->hw_value, params->count, + params->radar_required, params->block_tx); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_AP: + if (!(vif->bss_status & QTNF_STATE_AP_START)) { + pr_warn("AP not started on %s\n", dev->name); + return -ENOTCONN; + } + break; + default: + pr_err("unsupported vif type (%d) on %s\n", + vif->wdev.iftype, dev->name); + return -EOPNOTSUPP; + } + + if (vif->vifid != 0) { + if (!(mac->status & QTNF_MAC_CSA_ACTIVE)) + return -EOPNOTSUPP; + + if (!cfg80211_chandef_identical(¶ms->chandef, + &mac->csa_chandef)) + return -EINVAL; + + return 0; + } + + if (!cfg80211_chandef_valid(¶ms->chandef)) { + pr_err("%s: invalid channel\n", dev->name); + return -EINVAL; + } + + if (cfg80211_chandef_identical(¶ms->chandef, &mac->chandef)) { + pr_err("%s: switch request to the same channel\n", dev->name); + return -EALREADY; + } + + ret = qtnf_cmd_send_chan_switch(mac, params); + if (ret) + pr_warn("%s: failed to switch to channel (%u)\n", + dev->name, params->chandef.chan->hw_value); + + return ret; +} + static struct cfg80211_ops qtn_cfg80211_ops = { .add_virtual_intf = qtnf_add_virtual_intf, .change_virtual_intf = qtnf_change_virtual_intf, @@ -834,7 +887,8 @@ static struct cfg80211_ops qtn_cfg80211_ops = { .connect = qtnf_connect, .disconnect = qtnf_disconnect, .dump_survey = qtnf_dump_survey, - .get_channel = qtnf_get_channel + .get_channel = qtnf_get_channel, + .channel_switch = qtnf_channel_switch }; static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in, @@ -981,6 +1035,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->iface_combinations = iface_comb; wiphy->n_iface_combinations = 1; + wiphy->max_num_csa_counters = 2; /* Initialize cipher suits */ wiphy->cipher_suites = qtnf_cipher_suites; @@ -988,7 +1043,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac) wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | - WIPHY_FLAG_AP_UAPSD; + WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_HAS_CHANNEL_SWITCH; wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2; diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index a3c3dddb194c..524269d2c30c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -2283,3 +2283,58 @@ out: consume_skb(resp_skb); return ret; } + +int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac, + struct cfg80211_csa_settings *params) +{ + struct qlink_cmd_chan_switch *cmd; + struct sk_buff *cmd_skb; + u16 res_code = QLINK_CMD_RESULT_OK; + int ret; + + cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0x0, + QLINK_CMD_CHAN_SWITCH, + sizeof(*cmd)); + + if (unlikely(!cmd_skb)) + return -ENOMEM; + + qtnf_bus_lock(mac->bus); + + cmd = (struct qlink_cmd_chan_switch *)cmd_skb->data; + cmd->channel = cpu_to_le16(params->chandef.chan->hw_value); + cmd->radar_required = params->radar_required; + cmd->block_tx = params->block_tx; + cmd->beacon_count = params->count; + + ret = qtnf_cmd_send(mac->bus, cmd_skb, &res_code); + + if (unlikely(ret)) + goto out; + + switch (res_code) { + case QLINK_CMD_RESULT_OK: + memcpy(&mac->csa_chandef, ¶ms->chandef, + sizeof(mac->csa_chandef)); + mac->status |= QTNF_MAC_CSA_ACTIVE; + ret = 0; + break; + case QLINK_CMD_RESULT_ENOTFOUND: + ret = -ENOENT; + break; + case QLINK_CMD_RESULT_ENOTSUPP: + ret = -EOPNOTSUPP; + break; + case QLINK_CMD_RESULT_EALREADY: + ret = -EALREADY; + break; + case QLINK_CMD_RESULT_INVALID: + default: + ret = -EFAULT; + break; + } + +out: + qtnf_bus_unlock(mac->bus); + return ret; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h index 41e2d50988b7..783b20364296 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.h +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h @@ -73,5 +73,7 @@ int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif, int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req); int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, struct qtnf_chan_stats *stats); +int qtnf_cmd_send_chan_switch(struct qtnf_wmac *mac, + struct cfg80211_csa_settings *params); #endif /* QLINK_COMMANDS_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 6830ff45976d..099aad76afeb 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -88,6 +88,10 @@ enum qtnf_sta_state { QTNF_STA_CONNECTED }; +enum qtnf_mac_status { + QTNF_MAC_CSA_ACTIVE = BIT(0) +}; + struct qtnf_vif { struct wireless_dev wdev; u8 vifid; @@ -136,11 +140,13 @@ struct qtnf_wmac { u8 macid; u8 wiphy_registered; u8 macaddr[ETH_ALEN]; + u32 status; struct qtnf_bus *bus; struct qtnf_mac_info macinfo; struct qtnf_vif iflist[QTNF_MAX_INTF]; struct cfg80211_scan_request *scan_req; struct cfg80211_chan_def chandef; + struct cfg80211_chan_def csa_chandef; }; struct qtnf_hw_info { diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 00570de918e6..43d2e7fd6e02 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -350,6 +350,63 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac, return 0; } +static int +qtnf_event_handle_freq_change(struct qtnf_wmac *mac, + const struct qlink_event_freq_change *data, + u16 len) +{ + struct wiphy *wiphy = priv_to_wiphy(mac); + struct cfg80211_chan_def chandef; + struct ieee80211_channel *chan; + struct qtnf_vif *vif; + int freq; + int i; + + if (len < sizeof(*data)) { + pr_err("payload is too short\n"); + return -EINVAL; + } + + freq = le32_to_cpu(data->freq); + chan = ieee80211_get_channel(wiphy, freq); + if (!chan) { + pr_err("channel at %d MHz not found\n", freq); + return -EINVAL; + } + + pr_debug("MAC%d switch to new channel %u MHz\n", mac->macid, freq); + + if (mac->status & QTNF_MAC_CSA_ACTIVE) { + mac->status &= ~QTNF_MAC_CSA_ACTIVE; + if (chan->hw_value != mac->csa_chandef.chan->hw_value) + pr_warn("unexpected switch to %u during CSA to %u\n", + chan->hw_value, + mac->csa_chandef.chan->hw_value); + } + + /* FIXME: need to figure out proper nl80211_channel_type value */ + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); + /* fall-back to minimal safe chandef description */ + if (!cfg80211_chandef_valid(&chandef)) + cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20); + + memcpy(&mac->chandef, &chandef, sizeof(mac->chandef)); + + for (i = 0; i < QTNF_MAX_INTF; i++) { + vif = &mac->iflist[i]; + if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED) + continue; + + if (vif->netdev) { + mutex_lock(&vif->wdev.mtx); + cfg80211_ch_switch_notify(vif->netdev, &chandef); + mutex_unlock(&vif->wdev.mtx); + } + } + + return 0; +} + static int qtnf_event_parse(struct qtnf_wmac *mac, const struct sk_buff *event_skb) { @@ -400,6 +457,10 @@ static int qtnf_event_parse(struct qtnf_wmac *mac, ret = qtnf_event_handle_bss_leave(vif, (const void *)event, event_len); break; + case QLINK_EVENT_FREQ_CHANGE: + ret = qtnf_event_handle_freq_change(mac, (const void *)event, + event_len); + break; default: pr_warn("unknown event type: %x\n", event_id); break; diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index 5c2d8f0abd7f..c529cc1994b4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -153,6 +153,7 @@ enum qlink_cmd_type { QLINK_CMD_UPDOWN_INTF = 0x0018, QLINK_CMD_REG_NOTIFY = 0x0019, QLINK_CMD_CHANS_INFO_GET = 0x001A, + QLINK_CMD_CHAN_SWITCH = 0x001B, QLINK_CMD_CONFIG_AP = 0x0020, QLINK_CMD_START_AP = 0x0021, QLINK_CMD_STOP_AP = 0x0022, @@ -482,6 +483,22 @@ struct qlink_cmd_reg_notify { u8 user_reg_hint_type; } __packed; +/** + * struct qlink_cmd_chan_switch - data for QLINK_CMD_CHAN_SWITCH command + * + * @channel: channel number according to 802.11 17.3.8.3.2 and Annex J + * @radar_required: whether radar detection is required on the new channel + * @block_tx: whether transmissions should be blocked while changing + * @beacon_count: number of beacons until switch + */ +struct qlink_cmd_chan_switch { + struct qlink_cmd chdr; + __le16 channel; + u8 radar_required; + u8 block_tx; + u8 beacon_count; +} __packed; + /* QLINK Command Responses messages related definitions */ @@ -667,6 +684,7 @@ enum qlink_event_type { QLINK_EVENT_SCAN_COMPLETE = 0x0025, QLINK_EVENT_BSS_JOIN = 0x0026, QLINK_EVENT_BSS_LEAVE = 0x0027, + QLINK_EVENT_FREQ_CHANGE = 0x0028, }; /** @@ -736,6 +754,16 @@ struct qlink_event_bss_leave { __le16 reason; } __packed; +/** + * struct qlink_event_freq_change - data for QLINK_EVENT_FREQ_CHANGE event + * + * @freq: new operating frequency in MHz + */ +struct qlink_event_freq_change { + struct qlink_event ehdr; + __le32 freq; +} __packed; + enum qlink_rxmgmt_flags { QLINK_RXMGMT_FLAG_ANSWERED = 1 << 0, }; -- cgit v1.2.3-55-g7522 From c7ead2abd26ab536a2e479af605a6d9529e3a694 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:51 +0300 Subject: qtnfmac: implement scan timeout Userspace tools may hang on scan in the case when scan completion event is not returned by firmware. This patch implements the scan timeout to avoid such situation. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | 22 ++++++++++++++++++---- drivers/net/wireless/quantenna/qtnfmac/cfg80211.h | 4 ++++ drivers/net/wireless/quantenna/qtnfmac/core.c | 2 ++ drivers/net/wireless/quantenna/qtnfmac/core.h | 3 +++ drivers/net/wireless/quantenna/qtnfmac/event.c | 2 ++ 5 files changed, 29 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index ac8fdc1db482..856fa6e8327e 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c @@ -579,19 +579,33 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, return ret; } +static void qtnf_scan_timeout(unsigned long data) +{ + struct qtnf_wmac *mac = (struct qtnf_wmac *)data; + + pr_warn("mac%d scan timed out\n", mac->macid); + qtnf_scan_done(mac, true); +} + static int qtnf_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct qtnf_wmac *mac = wiphy_priv(wiphy); - int ret; mac->scan_req = request; - ret = qtnf_cmd_send_scan(mac); - if (ret) + if (qtnf_cmd_send_scan(mac)) { pr_err("MAC%u: failed to start scan\n", mac->macid); + mac->scan_req = NULL; + return -EFAULT; + } - return ret; + mac->scan_timeout.data = (unsigned long)mac; + mac->scan_timeout.function = qtnf_scan_timeout; + mod_timer(&mac->scan_timeout, + jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ); + + return 0; } static int diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index 5bd33124a7c8..6a4af52522b8 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h @@ -34,10 +34,14 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted) .aborted = aborted, }; + mutex_lock(&mac->mac_lock); + if (mac->scan_req) { cfg80211_scan_done(mac->scan_req, &info); mac->scan_req = NULL; } + + mutex_unlock(&mac->mac_lock); } #endif /* _QTN_FMAC_CFG80211_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c index 17d17e332a8b..5e60180482d1 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.c +++ b/drivers/net/wireless/quantenna/qtnfmac/core.c @@ -288,6 +288,8 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus, mac->iflist[i].mac = mac; mac->iflist[i].vifid = i; qtnf_sta_list_init(&mac->iflist[i].sta_list); + mutex_init(&mac->mac_lock); + init_timer(&mac->scan_timeout); } qtnf_mac_init_primary_intf(mac); diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h index 099aad76afeb..066fcd1095a0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/core.h +++ b/drivers/net/wireless/quantenna/qtnfmac/core.h @@ -46,6 +46,7 @@ #define QTNF_MAX_EVENT_QUEUE_LEN 255 #define QTNF_DEFAULT_BG_SCAN_PERIOD 300 #define QTNF_MAX_BG_SCAN_PERIOD 0xffff +#define QTNF_SCAN_TIMEOUT_SEC 15 #define QTNF_DEF_BSS_PRIORITY 0 #define QTNF_DEF_WDOG_TIMEOUT 5 @@ -147,6 +148,8 @@ struct qtnf_wmac { struct cfg80211_scan_request *scan_req; struct cfg80211_chan_def chandef; struct cfg80211_chan_def csa_chandef; + struct mutex mac_lock; /* lock during wmac speicific ops */ + struct timer_list scan_timeout; }; struct qtnf_hw_info { diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 43d2e7fd6e02..0fc2814eafad 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c @@ -345,6 +345,8 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac, return -EINVAL; } + if (timer_pending(&mac->scan_timeout)) + del_timer_sync(&mac->scan_timeout); qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); return 0; -- cgit v1.2.3-55-g7522 From 41c8fa0c62744469466dae072fcb93b716155c7a Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:52 +0300 Subject: qtnfmac: fix handling of iftype mask reported by firmware Firmware sends supported interface type rather than mask. As a result, types field of ieee80211_iface_limit structure may end up having multiple iftype bits set. This leads to WARN_ON from wiphy_verify_combinations. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/commands.c | 9 +++++---- drivers/net/wireless/quantenna/qtnfmac/qlink.h | 2 +- .../net/wireless/quantenna/qtnfmac/qlink_util.c | 23 ++++++++++++---------- .../net/wireless/quantenna/qtnfmac/qlink_util.h | 2 +- 4 files changed, 20 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 524269d2c30c..94656106b496 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -989,7 +989,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, struct ieee80211_iface_limit *limits = NULL; const struct qlink_iface_limit *limit_record; size_t record_count = 0, rec = 0; - u16 tlv_type, tlv_value_len, mask; + u16 tlv_type, tlv_value_len; struct qlink_iface_comb_num *comb; size_t tlv_full_len; const struct qlink_tlv_hdr *tlv; @@ -1042,9 +1042,10 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, limit_record = (void *)tlv->val; limits[rec].max = le16_to_cpu(limit_record->max_num); - mask = le16_to_cpu(limit_record->type_mask); - limits[rec].types = qlink_iface_type_mask_to_nl(mask); - /* only AP and STA modes are supported */ + limits[rec].types = qlink_iface_type_to_nl_mask( + le16_to_cpu(limit_record->type)); + + /* supported modes: STA, AP */ limits[rec].types &= BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION); diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index c529cc1994b4..ec5126362494 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -873,7 +873,7 @@ struct qlink_tlv_hdr { struct qlink_iface_limit { __le16 max_num; - __le16 type_mask; + __le16 type; } __packed; struct qlink_iface_comb_num { diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index 49ae652ad9a3..22fa631d692d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -17,24 +17,27 @@ #include "qlink_util.h" -u16 qlink_iface_type_mask_to_nl(u16 qlink_mask) +u16 qlink_iface_type_to_nl_mask(u16 qlink_type) { u16 result = 0; - if (qlink_mask & QLINK_IFTYPE_AP) + switch (qlink_type) { + case QLINK_IFTYPE_AP: result |= BIT(NL80211_IFTYPE_AP); - - if (qlink_mask & QLINK_IFTYPE_STATION) + break; + case QLINK_IFTYPE_STATION: result |= BIT(NL80211_IFTYPE_STATION); - - if (qlink_mask & QLINK_IFTYPE_ADHOC) + break; + case QLINK_IFTYPE_ADHOC: result |= BIT(NL80211_IFTYPE_ADHOC); - - if (qlink_mask & QLINK_IFTYPE_MONITOR) + break; + case QLINK_IFTYPE_MONITOR: result |= BIT(NL80211_IFTYPE_MONITOR); - - if (qlink_mask & QLINK_IFTYPE_WDS) + break; + case QLINK_IFTYPE_WDS: result |= BIT(NL80211_IFTYPE_WDS); + break; + } return result; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index 90d7d09a6c63..c9e882a3a991 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -68,7 +68,7 @@ static inline void qtnf_cmd_skb_put_tlv_u16(struct sk_buff *skb, memcpy(hdr->val, &tmp, sizeof(tmp)); } -u16 qlink_iface_type_mask_to_nl(u16 qlink_mask); +u16 qlink_iface_type_to_nl_mask(u16 qlink_type); u8 qlink_chan_width_mask_to_nl(u16 qlink_mask); #endif /* _QTN_FMAC_QLINK_UTIL_H_ */ -- cgit v1.2.3-55-g7522 From 03ddf59d786d6c9f67d2fc8ea7b54c12f1daa45b Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:53 +0300 Subject: qtnfmac: remove function qtnf_cmd_skb_put_action This function is not used anymore, so remove it. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/qlink_util.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h index c9e882a3a991..de06c1e20b5b 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h @@ -22,14 +22,6 @@ #include "qlink.h" -static inline void qtnf_cmd_skb_put_action(struct sk_buff *skb, u16 action) -{ - __le16 *buf_ptr; - - buf_ptr = skb_put(skb, sizeof(action)); - *buf_ptr = cpu_to_le16(action); -} - static inline void qtnf_cmd_skb_put_buffer(struct sk_buff *skb, const u8 *buf_src, size_t len) { -- cgit v1.2.3-55-g7522 From 805b28c05c8e0496f679f180e2d276cce8b949b0 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Fri, 28 Jul 2017 02:06:54 +0300 Subject: qtnfmac: prepare for AP_VLAN interface type support Modify qlink command structures and interface types handling to prepare adding AP_VLAN support to qtnfmac driver. Signed-off-by: Igor Mitsyanko Signed-off-by: Sergey Matyukevich Signed-off-by: Avinash Patil Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/commands.c | 27 ++++++++++++++++++---- drivers/net/wireless/quantenna/qtnfmac/qlink.h | 13 ++++++++--- .../net/wireless/quantenna/qtnfmac/qlink_util.c | 3 +++ 3 files changed, 36 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index 94656106b496..4206886b110c 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -1047,6 +1047,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, /* supported modes: STA, AP */ limits[rec].types &= BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_AP_VLAN) | BIT(NL80211_IFTYPE_STATION); pr_debug("MAC%u: MAX: %u; TYPES: %.4X\n", mac->macid, @@ -1058,6 +1059,7 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac, default: break; } + tlv_buf_size -= tlv_full_len; tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len); } @@ -1859,10 +1861,27 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac, cmd = (struct qlink_cmd_change_sta *)cmd_skb->data; ether_addr_copy(cmd->sta_addr, mac); - cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_mask)); - cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( - params->sta_flags_set)); + + switch (vif->wdev.iftype) { + case NL80211_IFTYPE_AP: + cmd->if_type = cpu_to_le16(QLINK_IFTYPE_AP); + cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_mask)); + cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_set)); + break; + case NL80211_IFTYPE_STATION: + cmd->if_type = cpu_to_le16(QLINK_IFTYPE_STATION); + cmd->sta_flags_mask = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_mask)); + cmd->sta_flags_set = cpu_to_le32(qtnf_encode_sta_flags( + params->sta_flags_set)); + break; + default: + pr_err("unsupported iftype %d\n", vif->wdev.iftype); + ret = -EINVAL; + goto out; + } ret = qtnf_cmd_send(vif->mac->bus, cmd_skb, &res_code); if (unlikely(ret)) diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h index ec5126362494..a8242f678496 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h @@ -19,7 +19,7 @@ #include -#define QLINK_PROTO_VER 4 +#define QLINK_PROTO_VER 5 #define QLINK_MACID_RSVD 0xFF #define QLINK_VIFID_RSVD 0xFF @@ -77,6 +77,7 @@ enum qlink_iface_type { QLINK_IFTYPE_ADHOC = 3, QLINK_IFTYPE_MONITOR = 4, QLINK_IFTYPE_WDS = 5, + QLINK_IFTYPE_AP_VLAN = 6, }; /** @@ -85,12 +86,12 @@ enum qlink_iface_type { * Data describing a single virtual interface. * * @if_type: Mode of interface operation, one of &enum qlink_iface_type - * @flags: interface flagsmap. + * @vlanid: VLAN ID for AP_VLAN interface type * @mac_addr: MAC address of virtual interface. */ struct qlink_intf_info { __le16 if_type; - __le16 flags; + __le16 vlanid; u8 mac_addr[ETH_ALEN]; u8 rsvd[2]; } __packed; @@ -292,6 +293,7 @@ struct qlink_cmd_get_sta_info { * @pairwise: whether to use pairwise key. * @addr: MAC address of a STA key is being installed to. * @cipher: cipher suite. + * @vlanid: VLAN ID for AP_VLAN interface type * @key_data: key data itself. */ struct qlink_cmd_add_key { @@ -300,6 +302,7 @@ struct qlink_cmd_add_key { u8 pairwise; u8 addr[ETH_ALEN]; __le32 cipher; + __le16 vlanid; u8 key_data[0]; } __packed; @@ -346,12 +349,16 @@ struct qlink_cmd_set_def_mgmt_key { * * @sta_flags_mask: STA flags mask, bitmap of &enum qlink_sta_flags * @sta_flags_set: STA flags values, bitmap of &enum qlink_sta_flags + * @if_type: Mode of interface operation, one of &enum qlink_iface_type + * @vlanid: VLAN ID to assign to specific STA * @sta_addr: address of the STA for which parameters are set. */ struct qlink_cmd_change_sta { struct qlink_cmd chdr; __le32 sta_flags_mask; __le32 sta_flags_set; + __le16 if_type; + __le16 vlanid; u8 sta_addr[ETH_ALEN]; } __packed; diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c index 22fa631d692d..cf024c995fd6 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c @@ -37,6 +37,9 @@ u16 qlink_iface_type_to_nl_mask(u16 qlink_type) case QLINK_IFTYPE_WDS: result |= BIT(NL80211_IFTYPE_WDS); break; + case QLINK_IFTYPE_AP_VLAN: + result |= BIT(NL80211_IFTYPE_AP_VLAN); + break; } return result; -- cgit v1.2.3-55-g7522 From 13cb8a5845ff1a1fe1644bbae9790b557ac69bdb Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 24 Jul 2017 18:41:30 +0100 Subject: qtnfmac: Tidy up DMA mask setting As the only caller of dma_supported() outside of DMA API internals, the qtfnmac driver stands out and invites scrutiny. Thankfully, it's not being used for evil, but it is entirely redundant, since it open-codes a check that the DMA mask setting functions are going to perform anyway. In fact, the whole qtnf_pcie_init_dma_mask() function is nothing more than a rather long-winded implementation of dma_set_mask_and_coherent(), so let's just use that directly. Signed-off-by: Robin Murphy Acked-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- .../net/wireless/quantenna/qtnfmac/pearl/pcie.c | 28 +--------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 7fc4f0d6a9ad..2c065ffda070 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -274,32 +274,6 @@ static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) return 0; } -static int -qtnf_pcie_init_dma_mask(struct qtnf_pcie_bus_priv *priv, u64 dma_mask) -{ - int ret; - - ret = dma_supported(&priv->pdev->dev, dma_mask); - if (!ret) { - pr_err("DMA mask %llu not supported\n", dma_mask); - return ret; - } - - ret = pci_set_dma_mask(priv->pdev, dma_mask); - if (ret) { - pr_err("failed to set DMA mask %llu\n", dma_mask); - return ret; - } - - ret = pci_set_consistent_dma_mask(priv->pdev, dma_mask); - if (ret) { - pr_err("failed to set consistent DMA mask %llu\n", dma_mask); - return ret; - } - - return ret; -} - static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) { struct pci_dev *pdev = priv->pdev; @@ -1212,7 +1186,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_base; } - ret = qtnf_pcie_init_dma_mask(pcie_priv, DMA_BIT_MASK(32)); + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { pr_err("PCIE DMA mask init failed\n"); goto err_base; -- cgit v1.2.3-55-g7522 From d86d8dbdebcc8c2ef763af79b5bc1db4846216ad Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:39:49 +0530 Subject: rtlwifi: rtl8192de: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 2833 945 12 3790 ece realtek/rtlwifi/rtl8192de/sw.o File size After adding 'const': text data bss dec hex filename 2929 849 12 3790 ece realtek/rtlwifi/rtl8192de/sw.o Signed-off-by: Arvind Yadav Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index e38d6f7370aa..a6549f5f6c59 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -349,7 +349,7 @@ static const struct rtl_hal_cfg rtl92de_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, }; -static struct pci_device_id rtl92de_pci_ids[] = { +static const struct pci_device_id rtl92de_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8193, rtl92de_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x002B, rtl92de_hal_cfg)}, {}, -- cgit v1.2.3-55-g7522 From 67f512e6707f41911c788c6a36990fce1a7ae1c0 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:39:50 +0530 Subject: rtlwifi: rtl8192se: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 2817 1040 0 3857 f11 realtek/rtlwifi/rtl8192se/sw.o File size After adding 'const': text data bss dec hex filename 3009 848 0 3857 f11 realtek/rtlwifi/rtl8192se/sw.o Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 1ec20efb9ce1..d7945b9db493 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -398,7 +398,7 @@ static const struct rtl_hal_cfg rtl92se_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, }; -static struct pci_device_id rtl92se_pci_ids[] = { +static const struct pci_device_id rtl92se_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8192, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8171, rtl92se_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8172, rtl92se_hal_cfg)}, -- cgit v1.2.3-55-g7522 From 468952f147147e306e376f24d2af93dac63bef58 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:39:51 +0530 Subject: rtlwifi: rtl8821ae: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 2491 960 0 3451 d7b realtek/rtlwifi/rtl8821ae/sw.o File size After adding 'const': text data bss dec hex filename 2587 864 0 3451 d7b realtek/rtlwifi/rtl8821ae/sw.o Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 5925edf7877f..0894ef48ab87 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -423,7 +423,7 @@ static const struct rtl_hal_cfg rtl8821ae_hal_cfg = { .maps[RTL_RC_VHT_RATE_2SS_MCS9] = DESC_RATEVHT2SS_MCS9, }; -static struct pci_device_id rtl8821ae_pci_ids[] = { +static const struct pci_device_id rtl8821ae_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8812, rtl8821ae_hal_cfg)}, {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8821, rtl8821ae_hal_cfg)}, {}, -- cgit v1.2.3-55-g7522 From e9a214def6cec7b99d12de92ced4f3441f9b8536 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:39:52 +0530 Subject: rtlwifi: rtl8723ae: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 2775 912 0 3687 e67 realtek/rtlwifi/rtl8723ae/sw.o File size After adding 'const': text data bss dec hex filename 2839 848 0 3687 e67 realtek/rtlwifi/rtl8723ae/sw.o Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index aab86667a7f3..97b8bd294aa8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -369,7 +369,7 @@ static const struct rtl_hal_cfg rtl8723e_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl8723e_pci_ids[] = { +static const struct pci_device_id rtl8723e_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8723, rtl8723e_hal_cfg)}, {}, }; -- cgit v1.2.3-55-g7522 From 8c6f0f5bf1e15019cfa531e940b8ba1bdd9f6e08 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:39:53 +0530 Subject: rtlwifi: rtl8723be: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 3032 912 0 3944 f68 realtek/rtlwifi/rtl8723be/sw.o File size After adding 'const': text data bss dec hex filename 3096 848 0 3944 f68 realtek/rtlwifi/rtl8723be/sw.o Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index f47d839f388d..2b16a1467e78 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -375,7 +375,7 @@ static const struct rtl_hal_cfg rtl8723be_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl8723be_pci_ids[] = { +static const struct pci_device_id rtl8723be_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB723, rtl8723be_hal_cfg)}, {}, }; -- cgit v1.2.3-55-g7522 From 4dc2efc132a761a76a0cb684adf293692a7a9842 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:39:54 +0530 Subject: rtlwifi: rtl8188ee: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 3090 912 0 4002 fa2 realtek/rtlwifi/rtl8188ee/sw.o File size After adding 'const': text data bss dec hex filename 3154 848 0 4002 fa2 realtek/rtlwifi/rtl8188ee/sw.o Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index bddd5a5ebe52..57e5d5c1d24b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -378,7 +378,7 @@ static const struct rtl_hal_cfg rtl88ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl88ee_pci_ids[] = { +static const struct pci_device_id rtl88ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8179, rtl88ee_hal_cfg)}, {}, }; -- cgit v1.2.3-55-g7522 From d20d893d7a749662f21ff97894c3a496f9a5c991 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:39:55 +0530 Subject: rtlwifi: rtl8192ee: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. File size before: text data bss dec hex filename 1899 928 0 2827 b0b realtek/rtlwifi/rtl8192ee/sw.o File size After adding 'const': text data bss dec hex filename 1963 864 0 2827 b0b realtek/rtlwifi/rtl8192ee/sw.o Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 745e9c32655c..a3490080d066 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -356,7 +356,7 @@ static const struct rtl_hal_cfg rtl92ee_hal_cfg = { .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15, }; -static struct pci_device_id rtl92ee_pci_ids[] = { +static const struct pci_device_id rtl92ee_pci_ids[] = { {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x818B, rtl92ee_hal_cfg)}, {}, }; -- cgit v1.2.3-55-g7522 From 9ff067ff4c4a08d412e51307f11f7de3fb3045e7 Mon Sep 17 00:00:00 2001 From: Stefano Brivio Date: Thu, 27 Jul 2017 13:20:15 +0200 Subject: hostap: Fix outdated comment about dev->destructor After commit cf124db566e6 ("net: Fix inconsistent teardown and release of private netdev state."), setting 'dev->needs_free_netdev' ensures device data is released, and 'dev->destructor' is not used anymore. Fixes: cf124db566e6 ("net: Fix inconsistent teardown and release of private netdev state.") Signed-off-by: Stefano Brivio Signed-off-by: Kalle Valo --- drivers/net/wireless/intersil/hostap/hostap_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index a3c066f90afc..012930d35434 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -125,8 +125,8 @@ void hostap_remove_interface(struct net_device *dev, int rtnl_locked, else unregister_netdev(dev); - /* dev->destructor = free_netdev() will free the device data, including - * private data, when removing the device */ + /* 'dev->needs_free_netdev = true' implies device data, including + * private data, will be freed when the device is removed */ } -- cgit v1.2.3-55-g7522 From cc5becd38e87518671f68f17670f13c98bd62be0 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Mon, 17 Jul 2017 23:44:23 +0530 Subject: net: qtnfmac: constify pci_device_id. pci_device_id are not supposed to change at runtime. All functions working with pci_device_id provided by work with const pci_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Reviewed-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 2c065ffda070..ae8acc1bf291 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -1310,7 +1310,7 @@ static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend, qtnf_pcie_resume); #endif -static struct pci_device_id qtnf_pcie_devid_table[] = { +static const struct pci_device_id qtnf_pcie_devid_table[] = { { PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL, PCI_ANY_ID, PCI_ANY_ID, 0, 0, -- cgit v1.2.3-55-g7522 From e37f6483dc282267ebf12cb94c40e8a0c8823722 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 1 Aug 2017 01:38:23 +0000 Subject: mwifiex: make addba request command clean uninitilized variable, such as .add_req_result might be magic stack value. Initialize the structure to make it clean. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/11n.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 16c77c27f1b6..725206914911 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -572,6 +572,8 @@ int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac) mwifiex_dbg(priv->adapter, CMD, "cmd: %s: tid %d\n", __func__, tid); + memset(&add_ba_req, 0, sizeof(add_ba_req)); + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) && priv->adapter->is_hw_11ac_capable && -- cgit v1.2.3-55-g7522 From e7ece050113dcfab7ea844a643fa1f3c3ac75835 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 1 Aug 2017 01:38:24 +0000 Subject: mwifiex: pcie: compatible with wifi-only image while extract wifi-part fw Sometimes, we might using wifi-only firmware with a combo firmware name, in this case, do not need to filter bluetooth part from header. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Reviewed-by: Brian Norris Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 3da1eeb730eb..cd314946452c 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -1985,7 +1985,8 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter, * (3) wifi image. * * This function bypass the header and bluetooth part, return - * the offset of tail wifi-only part. + * the offset of tail wifi-only part. If the image is already wifi-only, + * that is start with CMD1, return 0. */ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, @@ -1993,7 +1994,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, const struct mwifiex_fw_data *fwdata; u32 offset = 0, data_len, dnld_cmd; int ret = 0; - bool cmd7_before = false; + bool cmd7_before = false, first_cmd = false; while (1) { /* Check for integer and buffer overflow */ @@ -2014,20 +2015,29 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, switch (dnld_cmd) { case MWIFIEX_FW_DNLD_CMD_1: - if (!cmd7_before) { - mwifiex_dbg(adapter, ERROR, - "no cmd7 before cmd1!\n"); + if (offset + data_len < data_len) { + mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); ret = -1; goto done; } - if (offset + data_len < data_len) { - mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); + + /* Image start with cmd1, already wifi-only firmware */ + if (!first_cmd) { + mwifiex_dbg(adapter, MSG, + "input wifi-only firmware\n"); + return 0; + } + + if (!cmd7_before) { + mwifiex_dbg(adapter, ERROR, + "no cmd7 before cmd1!\n"); ret = -1; goto done; } offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_5: + first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); @@ -2037,6 +2047,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, offset += data_len; break; case MWIFIEX_FW_DNLD_CMD_6: + first_cmd = true; /* Check for integer overflow */ if (offset + data_len < data_len) { mwifiex_dbg(adapter, ERROR, "bad FW parse\n"); @@ -2053,6 +2064,7 @@ static int mwifiex_extract_wifi_fw(struct mwifiex_adapter *adapter, } goto done; case MWIFIEX_FW_DNLD_CMD_7: + first_cmd = true; cmd7_before = true; break; default: -- cgit v1.2.3-55-g7522 From 9d546198705a79630cb29b1cc47a43e75b8afb89 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Wed, 2 Aug 2017 17:59:15 +0530 Subject: rtlwifi: Replace hardcode value with macro In _rtl_init_mac80211(), hardcoded value for hw->max_listen_interval and hw->max_rate_tries are replaced by macro and removed the comment. Signed-off-by: Souptick Joarder Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/base.c | 5 ++--- drivers/net/wireless/realtek/rtlwifi/base.h | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 8b833e21b5cb..ea18aa7afecb 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -426,9 +426,8 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw) hw->extra_tx_headroom = RTL_TX_HEADER_SIZE; /* TODO: Correct this value for our hw */ - /* TODO: define these hard code value */ - hw->max_listen_interval = 10; - hw->max_rate_tries = 4; + hw->max_listen_interval = MAX_LISTEN_INTERVAL; + hw->max_rate_tries = MAX_RATE_TRIES; /* hw->max_rates = 1; */ hw->sta_data_size = sizeof(struct rtl_sta_info); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index ab7d81904d25..b56d1b7f5567 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -65,6 +65,8 @@ enum ap_peer { #define FRAME_OFFSET_ADDRESS3 16 #define FRAME_OFFSET_SEQUENCE 22 #define FRAME_OFFSET_ADDRESS4 24 +#define MAX_LISTEN_INTERVAL 10 +#define MAX_RATE_TRIES 4 #define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \ WRITEEF2BYTE(_hdr, _val) -- cgit v1.2.3-55-g7522 From 47cc0ca91c9e4dde68d69f35e139360ef356267f Mon Sep 17 00:00:00 2001 From: Matthias Frei Date: Fri, 28 Jul 2017 15:15:36 +0300 Subject: ath10k: set a-mpdu receiver reference number Set the a-mpdu reference number in ath10k to make it accessible in the receivers radiotap header. Implemented as in ath9k. The reference number is needed for troubleshooting and research at the receivers site (e.g. to identify mpdu's that were aggregated in an a-mpdu) Signed-off-by: Matthias Frei [kvalo@qca.qualcomm.com: fix checkpatch warning, commit log cleanup] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 2 ++ drivers/net/wireless/ath/ath10k/htt_rx.c | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 2b499af722ad..d74e8980b96e 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -993,6 +993,8 @@ struct ath10k { u32 reg_ack_cts_timeout_orig; } fw_coverage; + u32 ampdu_reference; + void *ce_priv; /* must be last */ diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 398dda978d6e..799fb7501eb5 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -890,16 +890,26 @@ static void ath10k_htt_rx_h_ppdu(struct ath10k *ar, status->nss = 0; status->encoding = RX_ENC_LEGACY; status->bw = RATE_INFO_BW_20; + status->flag &= ~RX_FLAG_MACTIME_END; status->flag |= RX_FLAG_NO_SIGNAL_VAL; + status->flag &= ~(RX_FLAG_AMPDU_IS_LAST); + status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN; + status->ampdu_reference = ar->ampdu_reference; + ath10k_htt_rx_h_signal(ar, status, rxd); ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id); ath10k_htt_rx_h_rates(ar, status, rxd); } - if (is_last_ppdu) + if (is_last_ppdu) { ath10k_htt_rx_h_mactime(ar, status, rxd); + + /* set ampdu last segment flag */ + status->flag |= RX_FLAG_AMPDU_IS_LAST; + ar->ampdu_reference++; + } } static const char * const tid_to_ac[] = { -- cgit v1.2.3-55-g7522 From b00435e6cda6863c2c7f40c09c6fab21c205f455 Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Fri, 28 Jul 2017 15:15:37 +0300 Subject: ath10k: various usb related definitions Definitions for USB based chipsets Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 1 + drivers/net/wireless/ath/ath10k/core.h | 3 +++ drivers/net/wireless/ath/ath10k/debug.h | 2 ++ 3 files changed, 6 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 8ff47458207c..2aa320a223af 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1454,6 +1454,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name, { switch (ar->hif.bus) { case ATH10K_BUS_SDIO: + case ATH10K_BUS_USB: scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin", ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus), fw_api); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index d74e8980b96e..34b713c5e022 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -92,6 +92,7 @@ enum ath10k_bus { ATH10K_BUS_PCI, ATH10K_BUS_AHB, ATH10K_BUS_SDIO, + ATH10K_BUS_USB, }; static inline const char *ath10k_bus_str(enum ath10k_bus bus) @@ -103,6 +104,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus) return "ahb"; case ATH10K_BUS_SDIO: return "sdio"; + case ATH10K_BUS_USB: + return "usb"; } return "unknown"; diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 257d10985c6e..548ad5483a4a 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -40,6 +40,8 @@ enum ath10k_debug_mask { ATH10K_DBG_AHB = 0x00008000, ATH10K_DBG_SDIO = 0x00010000, ATH10K_DBG_SDIO_DUMP = 0x00020000, + ATH10K_DBG_USB = 0x00040000, + ATH10K_DBG_USB_BULK = 0x00080000, ATH10K_DBG_ANY = 0xffffffff, }; -- cgit v1.2.3-55-g7522 From 4db66499df91b9398435e2dbee0e42cd6df0bc27 Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Fri, 28 Jul 2017 15:15:39 +0300 Subject: ath10k: add initial USB support Chipsets like QCA9377 have support for USB so add initial USB bus support to ath10k. With this patch we have the low level HIF and HTC protocol working and it's possible to boot the firmware, but it's still not possible to connect or anything like. More changes are needed for full functionality. For that reason we print during initialisation: WARNING: ath10k USB support is incomplete, don't expect anything to work! Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/Kconfig | 7 + drivers/net/wireless/ath/ath10k/Makefile | 3 + drivers/net/wireless/ath/ath10k/usb.c | 1106 ++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/usb.h | 128 ++++ 4 files changed, 1244 insertions(+) create mode 100644 drivers/net/wireless/ath/ath10k/usb.c create mode 100644 drivers/net/wireless/ath/ath10k/usb.h diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 412eb1380dcc..87f56d0e17a6 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -29,6 +29,13 @@ config ATH10K_SDIO This module adds experimental support for SDIO/MMC bus. Currently work in progress and will not fully work. +config ATH10K_USB + tristate "Atheros ath10k USB support (EXPERIMENTAL)" + depends on ATH10K && USB + ---help--- + This module adds experimental support for USB bus. Currently + work in progress and will not fully work. + config ATH10K_DEBUG bool "Atheros ath10k debugging" depends on ATH10K diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index b0b19a7eb98b..899b9b79f4ce 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -30,5 +30,8 @@ ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o ath10k_sdio-y += sdio.o +obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o +ath10k_usb-y += usb.o + # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c new file mode 100644 index 000000000000..d4803ff5a78a --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/usb.c @@ -0,0 +1,1106 @@ +/* + * Copyright (c) 2007-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include "debug.h" +#include "core.h" +#include "bmi.h" +#include "hif.h" +#include "htc.h" +#include "usb.h" + +static void ath10k_usb_post_recv_transfers(struct ath10k *ar, + struct ath10k_usb_pipe *recv_pipe); + +/* inlined helper functions */ + +static inline enum ath10k_htc_ep_id +eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr) +{ + return (enum ath10k_htc_ep_id)htc_hdr->eid; +} + +static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr) +{ + return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len; +} + +/* pipe/urb operations */ +static struct ath10k_urb_context * +ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe) +{ + struct ath10k_urb_context *urb_context = NULL; + unsigned long flags; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + if (!list_empty(&pipe->urb_list_head)) { + urb_context = list_first_entry(&pipe->urb_list_head, + struct ath10k_urb_context, link); + list_del(&urb_context->link); + pipe->urb_cnt--; + } + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); + + return urb_context; +} + +static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe, + struct ath10k_urb_context *urb_context) +{ + unsigned long flags; + + spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags); + + pipe->urb_cnt++; + list_add(&urb_context->link, &pipe->urb_list_head); + + spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags); +} + +static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context) +{ + dev_kfree_skb(urb_context->skb); + urb_context->skb = NULL; + + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +} + +static void ath10k_usb_free_pipe_resources(struct ath10k *ar, + struct ath10k_usb_pipe *pipe) +{ + struct ath10k_urb_context *urb_context; + + if (!pipe->ar_usb) { + /* nothing allocated for this pipe */ + return; + } + + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + + if (pipe->urb_alloc != pipe->urb_cnt) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc, pipe->urb_cnt); + } + + for (;;) { + urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); + + if (!urb_context) + break; + + kfree(urb_context); + } +} + +static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + int i; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) + ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]); +} + +/* hif usb rx/tx completion functions */ + +static void ath10k_usb_recv_complete(struct urb *urb) +{ + struct ath10k_urb_context *urb_context = urb->context; + struct ath10k_usb_pipe *pipe = urb_context->pipe; + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + int status = 0; + + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb recv pipe %d stat %d len %d urb 0x%pK\n", + pipe->logical_pipe_num, urb->status, urb->actual_length, + urb); + + if (urb->status != 0) { + status = -EIO; + switch (urb->status) { + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* no need to spew these errors when device + * removed or urb killed due to driver shutdown + */ + status = -ECANCELED; + break; + default: + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb recv pipe %d ep 0x%2.2x failed: %d\n", + pipe->logical_pipe_num, + pipe->ep_address, urb->status); + break; + } + goto cleanup_recv_urb; + } + + if (urb->actual_length == 0) + goto cleanup_recv_urb; + + skb = urb_context->skb; + + /* we are going to pass it up */ + urb_context->skb = NULL; + skb_put(skb, urb->actual_length); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); + +cleanup_recv_urb: + ath10k_usb_cleanup_recv_urb(urb_context); + + if (status == 0 && + pipe->urb_cnt >= pipe->urb_cnt_thresh) { + /* our free urbs are piling up, post more transfers */ + ath10k_usb_post_recv_transfers(ar, pipe); + } +} + +static void ath10k_usb_transmit_complete(struct urb *urb) +{ + struct ath10k_urb_context *urb_context = urb->context; + struct ath10k_usb_pipe *pipe = urb_context->pipe; + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + + if (urb->status != 0) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "pipe: %d, failed:%d\n", + pipe->logical_pipe_num, urb->status); + } + + skb = urb_context->skb; + urb_context->skb = NULL; + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); + + /* note: queue implements a lock */ + skb_queue_tail(&pipe->io_comp_queue, skb); + schedule_work(&pipe->io_complete_work); +} + +/* pipe operations */ +static void ath10k_usb_post_recv_transfers(struct ath10k *ar, + struct ath10k_usb_pipe *recv_pipe) +{ + struct ath10k_urb_context *urb_context; + struct urb *urb; + int usb_status; + + for (;;) { + urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe); + if (!urb_context) + break; + + urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE); + if (!urb_context->skb) + goto err; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) + goto err; + + usb_fill_bulk_urb(urb, + recv_pipe->ar_usb->udev, + recv_pipe->usb_pipe_handle, + urb_context->skb->data, + ATH10K_USB_RX_BUFFER_SIZE, + ath10k_usb_recv_complete, urb_context); + + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n", + recv_pipe->logical_pipe_num, + recv_pipe->usb_pipe_handle, recv_pipe->ep_address, + ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb); + + usb_anchor_urb(urb, &recv_pipe->urb_submitted); + usb_status = usb_submit_urb(urb, GFP_ATOMIC); + + if (usb_status) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk recv failed: %d\n", + usb_status); + usb_unanchor_urb(urb); + usb_free_urb(urb); + goto err; + } + usb_free_urb(urb); + } + + return; + +err: + ath10k_usb_cleanup_recv_urb(urb_context); +} + +static void ath10k_usb_flush_all(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + int i; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { + if (ar_usb->pipes[i].ar_usb) { + usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted); + cancel_work_sync(&ar_usb->pipes[i].io_complete_work); + } + } +} + +static void ath10k_usb_start_recv_pipes(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1; + + ath10k_usb_post_recv_transfers(ar, + &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); +} + +static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htc_hdr *htc_hdr; + struct ath10k_htc_ep *ep; + + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + ep = &ar->htc.endpoint[htc_hdr->eid]; + ath10k_htc_notify_tx_completion(ep, skb); + /* The TX complete handler now owns the skb... */ +} + +static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb) +{ + struct ath10k_htc *htc = &ar->htc; + struct ath10k_htc_hdr *htc_hdr; + enum ath10k_htc_ep_id eid; + struct ath10k_htc_ep *ep; + u16 payload_len; + u8 *trailer; + int ret; + + htc_hdr = (struct ath10k_htc_hdr *)skb->data; + eid = eid_from_htc_hdr(htc_hdr); + ep = &ar->htc.endpoint[eid]; + + if (ep->service_id == 0) { + ath10k_warn(ar, "ep %d is not connected\n", eid); + goto out_free_skb; + } + + payload_len = le16_to_cpu(htc_hdr->len); + if (!payload_len) { + ath10k_warn(ar, "zero length frame received, firmware crashed?\n"); + goto out_free_skb; + } + + if (payload_len < htc_hdr->trailer_len) { + ath10k_warn(ar, "malformed frame received, firmware crashed?\n"); + goto out_free_skb; + } + + if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) { + trailer = skb->data + sizeof(*htc_hdr) + payload_len - + htc_hdr->trailer_len; + + ret = ath10k_htc_process_trailer(htc, + trailer, + htc_hdr->trailer_len, + eid, + NULL, + NULL); + if (ret) + goto out_free_skb; + + if (is_trailer_only_msg(htc_hdr)) + goto out_free_skb; + + /* strip off the trailer from the skb since it should not + * be passed on to upper layers + */ + skb_trim(skb, skb->len - htc_hdr->trailer_len); + } + + skb_pull(skb, sizeof(*htc_hdr)); + ep->ep_ops.ep_rx_complete(ar, skb); + /* The RX complete handler now owns the skb... */ + + return; + +out_free_skb: + dev_kfree_skb(skb); +} + +static void ath10k_usb_io_comp_work(struct work_struct *work) +{ + struct ath10k_usb_pipe *pipe = container_of(work, + struct ath10k_usb_pipe, + io_complete_work); + struct ath10k *ar = pipe->ar_usb->ar; + struct sk_buff *skb; + + while ((skb = skb_dequeue(&pipe->io_comp_queue))) { + if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX) + ath10k_usb_tx_complete(ar, skb); + else + ath10k_usb_rx_complete(ar, skb); + } +} + +#define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write)) +#define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + +static void ath10k_usb_destroy(struct ath10k *ar) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ath10k_usb_flush_all(ar); + ath10k_usb_cleanup_pipe_resources(ar); + usb_set_intfdata(ar_usb->interface, NULL); + + kfree(ar_usb->diag_cmd_buffer); + kfree(ar_usb->diag_resp_buffer); +} + +static int ath10k_usb_hif_start(struct ath10k *ar) +{ + int i; + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + ath10k_usb_start_recv_pipes(ar); + + /* set the TX resource avail threshold for each TX pipe */ + for (i = ATH10K_USB_PIPE_TX_CTRL; + i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) { + ar_usb->pipes[i].urb_cnt_thresh = + ar_usb->pipes[i].urb_alloc / 2; + } + + return 0; +} + +static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id]; + struct ath10k_urb_context *urb_context; + struct sk_buff *skb; + struct urb *urb; + int ret, i; + + for (i = 0; i < n_items; i++) { + urb_context = ath10k_usb_alloc_urb_from_pipe(pipe); + if (!urb_context) { + ret = -ENOMEM; + goto err; + } + + skb = items[i].transfer_context; + urb_context->skb = skb; + + urb = usb_alloc_urb(0, GFP_ATOMIC); + if (!urb) { + ret = -ENOMEM; + goto err_free_urb_to_pipe; + } + + usb_fill_bulk_urb(urb, + ar_usb->udev, + pipe->usb_pipe_handle, + skb->data, + skb->len, + ath10k_usb_transmit_complete, urb_context); + + if (!(skb->len % pipe->max_packet_size)) { + /* hit a max packet boundary on this pipe */ + urb->transfer_flags |= URB_ZERO_PACKET; + } + + usb_anchor_urb(urb, &pipe->urb_submitted); + ret = usb_submit_urb(urb, GFP_ATOMIC); + if (ret) { + ath10k_dbg(ar, ATH10K_DBG_USB_BULK, + "usb bulk transmit failed: %d\n", ret); + usb_unanchor_urb(urb); + ret = -EINVAL; + goto err_free_urb_to_pipe; + } + + usb_free_urb(urb); + } + + return 0; + +err_free_urb_to_pipe: + ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context); +err: + return ret; +} + +static void ath10k_usb_hif_stop(struct ath10k *ar) +{ + ath10k_usb_flush_all(ar); +} + +static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + + return ar_usb->pipes[pipe_id].urb_cnt; +} + +static int ath10k_usb_submit_ctrl_out(struct ath10k *ar, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmemdup(data, size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transferred */ + ret = usb_control_msg(ar_usb->udev, + usb_sndctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 1000); + + if (ret < 0) { + ath10k_warn(ar, "Failed to submit usb control message: %d\n", + ret); + kfree(buf); + return ret; + } + + kfree(buf); + + return 0; +} + +static int ath10k_usb_submit_ctrl_in(struct ath10k *ar, + u8 req, u16 value, u16 index, void *data, + u32 size) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + u8 *buf = NULL; + int ret; + + if (size > 0) { + buf = kmalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + } + + /* note: if successful returns number of bytes transferred */ + ret = usb_control_msg(ar_usb->udev, + usb_rcvctrlpipe(ar_usb->udev, 0), + req, + USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, buf, + size, 2 * HZ); + + if (ret < 0) { + ath10k_warn(ar, "Failed to read usb control message: %d\n", + ret); + kfree(buf); + return ret; + } + + memcpy((u8 *)data, buf, size); + + kfree(buf); + + return 0; +} + +static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar, + u8 req_val, u8 *req_buf, u32 req_len, + u8 resp_val, u8 *resp_buf, + u32 *resp_len) +{ + int ret; + + /* send command */ + ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0, + req_buf, req_len); + if (ret) + goto err; + + /* get response */ + if (resp_buf) { + ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0, + resp_buf, *resp_len); + if (ret) + goto err; + } + + return 0; +err: + return ret; +} + +static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_ctrl_diag_cmd_read *cmd; + u32 resp_len; + int ret; + + if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + return -EINVAL; + + cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer; + memset(cmd, 0, sizeof(*cmd)); + cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ; + cmd->address = cpu_to_le32(address); + resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read); + + ret = ath10k_usb_ctrl_msg_exchange(ar, + ATH10K_USB_CONTROL_REQ_DIAG_CMD, + (u8 *)cmd, + sizeof(*cmd), + ATH10K_USB_CONTROL_REQ_DIAG_RESP, + ar_usb->diag_resp_buffer, &resp_len); + if (ret) + return ret; + + if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read)) + return -EMSGSIZE; + + memcpy(buf, ar_usb->diag_resp_buffer, + sizeof(struct ath10k_usb_ctrl_diag_resp_read)); + + return 0; +} + +static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address, + const void *data, int nbytes) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct ath10k_usb_ctrl_diag_cmd_write *cmd; + int ret; + + if (nbytes != sizeof(cmd->value)) + return -EINVAL; + + cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer; + memset(cmd, 0, sizeof(*cmd)); + cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE); + cmd->address = cpu_to_le32(address); + memcpy(&cmd->value, data, nbytes); + + ret = ath10k_usb_ctrl_msg_exchange(ar, + ATH10K_USB_CONTROL_REQ_DIAG_CMD, + (u8 *)cmd, + sizeof(*cmd), + 0, NULL, NULL); + if (ret) + return ret; + + return 0; +} + +static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) +{ + int ret; + + if (req) { + ret = ath10k_usb_submit_ctrl_out(ar, + ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD, + 0, 0, req, req_len); + if (ret) { + ath10k_warn(ar, + "unable to send the bmi data to the device: %d\n", + ret); + return ret; + } + } + + if (resp) { + ret = ath10k_usb_submit_ctrl_in(ar, + ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP, + 0, 0, resp, *resp_len); + if (ret) { + ath10k_warn(ar, + "Unable to read the bmi data from the device: %d\n", + ret); + return ret; + } + } + + return 0; +} + +static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) +{ + *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; + *dl_pipe = ATH10K_USB_PIPE_RX_CTRL; +} + +static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + switch (svc_id) { + case ATH10K_HTC_SVC_ID_RSVD_CTRL: + case ATH10K_HTC_SVC_ID_WMI_CONTROL: + *ul_pipe = ATH10K_USB_PIPE_TX_CTRL; + /* due to large control packets, shift to data pipe */ + *dl_pipe = ATH10K_USB_PIPE_RX_DATA; + break; + case ATH10K_HTC_SVC_ID_HTT_DATA_MSG: + *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP; + /* Disable rxdata2 directly, it will be enabled + * if FW enable rxdata2 + */ + *dl_pipe = ATH10K_USB_PIPE_RX_DATA; + break; + default: + return -EPERM; + } + + return 0; +} + +/* This op is currently only used by htc_wait_target if the HTC ready + * message times out. It is not applicable for USB since there is nothing + * we can do if the HTC ready message does not arrive in time. + * TODO: Make this op non mandatory by introducing a NULL check in the + * hif op wrapper. + */ +static void ath10k_usb_hif_send_complete_check(struct ath10k *ar, + u8 pipe, int force) +{ +} + +static int ath10k_usb_hif_power_up(struct ath10k *ar) +{ + return 0; +} + +static void ath10k_usb_hif_power_down(struct ath10k *ar) +{ + ath10k_usb_flush_all(ar); +} + +#ifdef CONFIG_PM + +static int ath10k_usb_hif_suspend(struct ath10k *ar) +{ + return -EOPNOTSUPP; +} + +static int ath10k_usb_hif_resume(struct ath10k *ar) +{ + return -EOPNOTSUPP; +} +#endif + +static const struct ath10k_hif_ops ath10k_usb_hif_ops = { + .tx_sg = ath10k_usb_hif_tx_sg, + .diag_read = ath10k_usb_hif_diag_read, + .diag_write = ath10k_usb_hif_diag_write, + .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg, + .start = ath10k_usb_hif_start, + .stop = ath10k_usb_hif_stop, + .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe, + .get_default_pipe = ath10k_usb_hif_get_default_pipe, + .send_complete_check = ath10k_usb_hif_send_complete_check, + .get_free_queue_number = ath10k_usb_hif_get_free_queue_number, + .power_up = ath10k_usb_hif_power_up, + .power_down = ath10k_usb_hif_power_down, +#ifdef CONFIG_PM + .suspend = ath10k_usb_hif_suspend, + .resume = ath10k_usb_hif_resume, +#endif +}; + +static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count) +{ + u8 pipe_num = ATH10K_USB_PIPE_INVALID; + + switch (ep_address) { + case ATH10K_USB_EP_ADDR_APP_CTRL_IN: + pipe_num = ATH10K_USB_PIPE_RX_CTRL; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_IN: + pipe_num = ATH10K_USB_PIPE_RX_DATA; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_INT_IN: + pipe_num = ATH10K_USB_PIPE_RX_INT; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA2_IN: + pipe_num = ATH10K_USB_PIPE_RX_DATA2; + *urb_count = RX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_CTRL_OUT: + pipe_num = ATH10K_USB_PIPE_TX_CTRL; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_LP; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_MP; + *urb_count = TX_URB_COUNT; + break; + case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT: + pipe_num = ATH10K_USB_PIPE_TX_DATA_HP; + *urb_count = TX_URB_COUNT; + break; + default: + /* note: there may be endpoints not currently used */ + break; + } + + return pipe_num; +} + +static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar, + struct ath10k_usb_pipe *pipe, + int urb_cnt) +{ + struct ath10k_urb_context *urb_context; + int i; + + INIT_LIST_HEAD(&pipe->urb_list_head); + init_usb_anchor(&pipe->urb_submitted); + + for (i = 0; i < urb_cnt; i++) { + urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL); + if (!urb_context) + return -ENOMEM; + + urb_context->pipe = pipe; + + /* we are only allocate the urb contexts here, the actual URB + * is allocated from the kernel as needed to do a transaction + */ + pipe->urb_alloc++; + ath10k_usb_free_urb_to_pipe(pipe, urb_context); + } + + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n", + pipe->logical_pipe_num, pipe->usb_pipe_handle, + pipe->urb_alloc); + + return 0; +} + +static int ath10k_usb_setup_pipe_resources(struct ath10k *ar, + struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct usb_host_interface *iface_desc = interface->cur_altsetting; + struct usb_endpoint_descriptor *endpoint; + struct ath10k_usb_pipe *pipe; + int ret, i, urbcount; + u8 pipe_num; + + ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n"); + + /* walk decriptors and setup pipes */ + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + endpoint = &iface_desc->endpoint[i].desc; + + if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s bulk ep 0x%2.2x maxpktsz %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize)); + } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + ath10k_dbg(ar, ATH10K_DBG_USB, + "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n", + ATH10K_USB_IS_DIR_IN + (endpoint->bEndpointAddress) ? + "rx" : "tx", endpoint->bEndpointAddress, + le16_to_cpu(endpoint->wMaxPacketSize), + endpoint->bInterval); + } + urbcount = 0; + + pipe_num = + ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress, + &urbcount); + if (pipe_num == ATH10K_USB_PIPE_INVALID) + continue; + + pipe = &ar_usb->pipes[pipe_num]; + if (pipe->ar_usb) + /* hmmm..pipe was already setup */ + continue; + + pipe->ar_usb = ar_usb; + pipe->logical_pipe_num = pipe_num; + pipe->ep_address = endpoint->bEndpointAddress; + pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize); + + if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) { + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvbulkpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndbulkpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) { + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvintpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndintpipe(ar_usb->udev, + pipe->ep_address); + } + } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) { + /* TODO for ISO */ + if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) { + pipe->usb_pipe_handle = + usb_rcvisocpipe(ar_usb->udev, + pipe->ep_address); + } else { + pipe->usb_pipe_handle = + usb_sndisocpipe(ar_usb->udev, + pipe->ep_address); + } + } + + pipe->ep_desc = endpoint; + + if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address)) + pipe->flags |= ATH10K_USB_PIPE_FLAG_TX; + + ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount); + if (ret) + return ret; + } + + return 0; +} + +static int ath10k_usb_create(struct ath10k *ar, + struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = ath10k_usb_priv(ar); + struct usb_device *dev = interface_to_usbdev(interface); + struct ath10k_usb_pipe *pipe; + int ret, i; + + usb_set_intfdata(interface, ar_usb); + spin_lock_init(&ar_usb->cs_lock); + ar_usb->udev = dev; + ar_usb->interface = interface; + + for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) { + pipe = &ar_usb->pipes[i]; + INIT_WORK(&pipe->io_complete_work, + ath10k_usb_io_comp_work); + skb_queue_head_init(&pipe->io_comp_queue); + } + + ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL); + if (!ar_usb->diag_cmd_buffer) { + ret = -ENOMEM; + goto err; + } + + ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP, + GFP_KERNEL); + if (!ar_usb->diag_resp_buffer) { + ret = -ENOMEM; + goto err; + } + + ret = ath10k_usb_setup_pipe_resources(ar, interface); + if (ret) + goto err; + + return 0; + +err: + ath10k_usb_destroy(ar); + return ret; +} + +/* ath10k usb driver registered functions */ +static int ath10k_usb_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ + struct ath10k *ar; + struct ath10k_usb *ar_usb; + struct usb_device *dev = interface_to_usbdev(interface); + int ret, vendor_id, product_id; + enum ath10k_hw_rev hw_rev; + u32 chip_id; + + /* Assumption: All USB based chipsets (so far) are QCA9377 based. + * If there will be newer chipsets that does not use the hw reg + * setup as defined in qca6174_regs and qca6174_values, this + * assumption is no longer valid and hw_rev must be setup differently + * depending on chipset. + */ + hw_rev = ATH10K_HW_QCA9377; + + ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB, + hw_rev, &ath10k_usb_hif_ops); + if (!ar) { + dev_err(&dev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + usb_get_dev(dev); + vendor_id = le16_to_cpu(dev->descriptor.idVendor); + product_id = le16_to_cpu(dev->descriptor.idProduct); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, + "usb new func vendor 0x%04x product 0x%04x\n", + vendor_id, product_id); + + ar_usb = ath10k_usb_priv(ar); + ret = ath10k_usb_create(ar, interface); + ar_usb->ar = ar; + + ar->dev_id = product_id; + ar->id.vendor = vendor_id; + ar->id.device = product_id; + + /* TODO: don't know yet how to get chip_id with USB */ + chip_id = 0; + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_warn(ar, "failed to register driver core: %d\n", ret); + goto err; + } + + /* TODO: remove this once USB support is fully implemented */ + ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n"); + + return 0; + +err: + ath10k_core_destroy(ar); + + usb_put_dev(dev); + + return ret; +} + +static void ath10k_usb_remove(struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb; + + ar_usb = usb_get_intfdata(interface); + if (!ar_usb) + return; + + ath10k_core_unregister(ar_usb->ar); + ath10k_usb_destroy(ar_usb->ar); + usb_put_dev(interface_to_usbdev(interface)); + ath10k_core_destroy(ar_usb->ar); +} + +#ifdef CONFIG_PM + +static int ath10k_usb_pm_suspend(struct usb_interface *interface, + pm_message_t message) +{ + struct ath10k_usb *ar_usb = usb_get_intfdata(interface); + + ath10k_usb_flush_all(ar_usb->ar); + return 0; +} + +static int ath10k_usb_pm_resume(struct usb_interface *interface) +{ + struct ath10k_usb *ar_usb = usb_get_intfdata(interface); + struct ath10k *ar = ar_usb->ar; + + ath10k_usb_post_recv_transfers(ar, + &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]); + + return 0; +} + +#else + +#define ath10k_usb_pm_suspend NULL +#define ath10k_usb_pm_resume NULL + +#endif + +/* table of devices that work with this driver */ +static struct usb_device_id ath10k_usb_ids[] = { + {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */ + { /* Terminating entry */ }, +}; + +MODULE_DEVICE_TABLE(usb, ath10k_usb_ids); + +static struct usb_driver ath10k_usb_driver = { + .name = "ath10k_usb", + .probe = ath10k_usb_probe, + .suspend = ath10k_usb_pm_suspend, + .resume = ath10k_usb_pm_resume, + .disconnect = ath10k_usb_remove, + .id_table = ath10k_usb_ids, + .supports_autosuspend = true, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(ath10k_usb_driver); + +MODULE_AUTHOR("Atheros Communications, Inc."); +MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h new file mode 100644 index 000000000000..f60a3cc7d712 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/usb.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004-2011 Atheros Communications Inc. + * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. + * Copyright (c) 2016-2017 Erik Stromdahl + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _USB_H_ +#define _USB_H_ + +/* constants */ +#define TX_URB_COUNT 32 +#define RX_URB_COUNT 32 +#define ATH10K_USB_RX_BUFFER_SIZE 4096 + +#define ATH10K_USB_PIPE_INVALID ATH10K_USB_PIPE_MAX + +/* USB endpoint definitions */ +#define ATH10K_USB_EP_ADDR_APP_CTRL_IN 0x81 +#define ATH10K_USB_EP_ADDR_APP_DATA_IN 0x82 +#define ATH10K_USB_EP_ADDR_APP_DATA2_IN 0x83 +#define ATH10K_USB_EP_ADDR_APP_INT_IN 0x84 + +#define ATH10K_USB_EP_ADDR_APP_CTRL_OUT 0x01 +#define ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT 0x02 +#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03 +#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04 + +/* diagnostic command defnitions */ +#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1 +#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2 +#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3 +#define ATH10K_USB_CONTROL_REQ_DIAG_RESP 4 + +#define ATH10K_USB_CTRL_DIAG_CC_READ 0 +#define ATH10K_USB_CTRL_DIAG_CC_WRITE 1 + +#define ATH10K_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02) +#define ATH10K_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03) +#define ATH10K_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01) +#define ATH10K_USB_IS_DIR_IN(addr) ((addr) & 0x80) + +struct ath10k_usb_ctrl_diag_cmd_write { + __le32 cmd; + __le32 address; + __le32 value; + __le32 padding; +} __packed; + +struct ath10k_usb_ctrl_diag_cmd_read { + __le32 cmd; + __le32 address; +} __packed; + +struct ath10k_usb_ctrl_diag_resp_read { + u8 value[4]; +} __packed; + +/* tx/rx pipes for usb */ +enum ath10k_usb_pipe_id { + ATH10K_USB_PIPE_TX_CTRL = 0, + ATH10K_USB_PIPE_TX_DATA_LP, + ATH10K_USB_PIPE_TX_DATA_MP, + ATH10K_USB_PIPE_TX_DATA_HP, + ATH10K_USB_PIPE_RX_CTRL, + ATH10K_USB_PIPE_RX_DATA, + ATH10K_USB_PIPE_RX_DATA2, + ATH10K_USB_PIPE_RX_INT, + ATH10K_USB_PIPE_MAX +}; + +struct ath10k_usb_pipe { + struct list_head urb_list_head; + struct usb_anchor urb_submitted; + u32 urb_alloc; + u32 urb_cnt; + u32 urb_cnt_thresh; + unsigned int usb_pipe_handle; + u32 flags; + u8 ep_address; + u8 logical_pipe_num; + struct ath10k_usb *ar_usb; + u16 max_packet_size; + struct work_struct io_complete_work; + struct sk_buff_head io_comp_queue; + struct usb_endpoint_descriptor *ep_desc; +}; + +#define ATH10K_USB_PIPE_FLAG_TX BIT(0) + +/* usb device object */ +struct ath10k_usb { + /* protects pipe->urb_list_head and pipe->urb_cnt */ + spinlock_t cs_lock; + + struct usb_device *udev; + struct usb_interface *interface; + struct ath10k_usb_pipe pipes[ATH10K_USB_PIPE_MAX]; + u8 *diag_cmd_buffer; + u8 *diag_resp_buffer; + struct ath10k *ar; +}; + +/* usb urb object */ +struct ath10k_urb_context { + struct list_head link; + struct ath10k_usb_pipe *pipe; + struct sk_buff *skb; + struct ath10k *ar; +}; + +static inline struct ath10k_usb *ath10k_usb_priv(struct ath10k *ar) +{ + return (struct ath10k_usb *)ar->drv_priv; +} + +#endif -- cgit v1.2.3-55-g7522 From 433ef1b226788d1a5e6fbdc6697ff2c06ba6a9b0 Mon Sep 17 00:00:00 2001 From: Erik Stromdahl Date: Fri, 28 Jul 2017 15:15:40 +0300 Subject: ath10k: sdio: fix compile warning As suggested by Arnd Bergmann, replace "while (time_before_...) {}" with "do {} while (time_before_...)" This fixes the following warnings detected by gcc 4.1.2: drivers/net/wireless/ath/ath10k/sdio.c: In function ‘ath10k_sdio_mbox_rxmsg_pending_handler’: drivers/net/wireless/ath/ath10k/sdio.c:676: warning: ‘ret’ may be used uninitialized in this function ... drivers/net/wireless/ath/ath10k/sdio.c: In function ‘ath10k_sdio_irq_handler’: drivers/net/wireless/ath/ath10k/sdio.c:1331: warning: ‘ret’ may be used uninitialized in this function Signed-off-by: Erik Stromdahl Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/sdio.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 859ed870bd97..48268f02bc07 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -683,7 +683,7 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar, lookaheads[0] = msg_lookahead; timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ; - while (time_before(jiffies, timeout)) { + do { /* Try to allocate as many HTC RX packets indicated by * n_lookaheads. */ @@ -719,7 +719,7 @@ static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar, * performance in high throughput situations. */ *done = false; - } + } while (time_before(jiffies, timeout)); if (ret && (ret != -ECANCELED)) ath10k_warn(ar, "failed to get pending recv messages: %d\n", @@ -1336,11 +1336,11 @@ static void ath10k_sdio_irq_handler(struct sdio_func *func) sdio_release_host(ar_sdio->func); timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ; - while (time_before(jiffies, timeout) && !done) { + do { ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done); if (ret) break; - } + } while (time_before(jiffies, timeout) && !done); sdio_claim_host(ar_sdio->func); -- cgit v1.2.3-55-g7522 From 810fe818d6a19b5dd822600003750c2ec6543f59 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 28 Jul 2017 15:15:41 +0300 Subject: ath10k: extend wmi service map to accommodate new services Though there is room to accommodate 512 services in wmi service ready event, target uses only first 4-bits of each 32-bit word for advertising wmi services thereby limiting max wmi services to 64. TDLS implementation for 10.4 firmwares introduces new wmi services by making use of remaining unused bits of each 32-bit word, therefore the wmi service mapping in host needs to be extended. This patch adds the logic to extend the wmi SVCMAP to accommodate new wmi services. Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index baa38c8f847c..3a4cbbcd815e 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -420,9 +420,20 @@ static inline char *wmi_service_name(int service_id) __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \ BIT((svc_id) % (sizeof(u32)))) +/* This extension is required to accommodate new services, current limit + * for wmi_services is 64 as target is using only 4-bits of each 32-bit + * wmi_service word. Extending this to make use of remaining unused bits + * for new services. + */ +#define WMI_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \ + ((svc_id) >= (len) && \ + __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 28]) & \ + BIT(((((svc_id) - (len)) % 28) & 0x1f) + 4)) + #define SVCMAP(x, y, len) \ do { \ - if (WMI_SERVICE_IS_ENABLED((in), (x), (len))) \ + if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \ + (WMI_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \ __set_bit(y, out); \ } while (0) -- cgit v1.2.3-55-g7522 From add6cd8d5ab7aea383f13ea5a7dd573257ef9c24 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 28 Jul 2017 15:15:42 +0300 Subject: ath10k: add tdls support for 10.4 firmwares This patch adds the support of TDLS feature for 10.4 firmware versions. A new WMI service is added to advertise the support of TDLS for 10.4 firmwares. Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 9 ++ drivers/net/wireless/ath/ath10k/hw.h | 5 + drivers/net/wireless/ath/ath10k/mac.c | 5 +- drivers/net/wireless/ath/ath10k/wmi.c | 163 ++++++++++++++++++++- drivers/net/wireless/ath/ath10k/wmi.h | 257 +++++++++++++++++++++++++++++++++ 5 files changed, 436 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 2aa320a223af..3602aa462662 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1886,6 +1886,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->fw_stats_req_mask = WMI_10_4_STAT_PEER | WMI_10_4_STAT_PEER_EXTD; ar->max_spatial_stream = ar->hw_params.max_spatial_stream; + ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS; if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, fw_file->fw_features)) @@ -2124,6 +2125,14 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, ar->running_fw->fw_file.fw_features)) val |= WMI_10_4_COEX_GPIO_SUPPORT; + if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + ar->wmi.svc_map)) + val |= WMI_10_4_TDLS_EXPLICIT_MODE_ONLY; + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, + ar->wmi.svc_map)) + val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA; + status = ath10k_mac_ext_resource_config(ar, val); if (status) { ath10k_err(ar, diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 19e43512af50..0c089f6dd3d9 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -720,6 +720,11 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, #define TARGET_10_4_IPHDR_PAD_CONFIG 1 #define TARGET_10_4_QWRAP_CONFIG 0 +/* TDLS config */ +#define TARGET_10_4_NUM_TDLS_VDEVS 1 +#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1 +#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1 + /* Maximum number of Copy Engine's supported */ #define CE_COUNT_MAX 12 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 55c808f03a84..523a5490dece 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -8197,8 +8197,11 @@ int ath10k_mac_register(struct ath10k *ar) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; } - if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map)) + if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || + test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; + ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); + } ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 96cd1ebd6a7e..a09e6bcfb52f 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -651,8 +651,6 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID, .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID, .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED, - .tdls_set_state_cmdid = WMI_CMD_UNSUPPORTED, - .tdls_peer_update_cmdid = WMI_CMD_UNSUPPORTED, .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED, .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID, .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID, @@ -711,6 +709,33 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = { .pdev_bss_chan_info_request_cmdid = WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID, + .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID, + .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID, + .atf_ssid_grouping_request_cmdid = + WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID, + .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID, + .set_periodic_channel_stats_cfg_cmdid = + WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG, + .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID, + .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID, + .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID, + .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID, + .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID, + .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID, + .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID, + .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID, + .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID, + .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID, + .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID, + .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID, + .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID, + .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID, + .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID, + .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID, + .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID, + .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID, + .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID, + .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, }; /* MAIN WMI VDEV param map */ @@ -7803,14 +7828,28 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, { struct wmi_ext_resource_config_10_4_cmd *cmd; struct sk_buff *skb; + u32 num_tdls_sleep_sta = 0; skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); if (!skb) return ERR_PTR(-ENOMEM); + if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map)) + num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA; + cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data; cmd->host_platform_config = __cpu_to_le32(type); cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap); + cmd->wlan_gpio_priority = __cpu_to_le32(-1); + cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT); + cmd->coex_gpio_pin1 = __cpu_to_le32(-1); + cmd->coex_gpio_pin2 = __cpu_to_le32(-1); + cmd->coex_gpio_pin3 = __cpu_to_le32(-1); + cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS); + cmd->num_tdls_conn_table_entries = __cpu_to_le32(20); + cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta); + cmd->max_tdls_concurrent_buffer_sta = + __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi ext resource config host type %d firmware feature bitmap %08x\n", @@ -7818,6 +7857,124 @@ ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar, return skb; } +static struct sk_buff * +ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id, + enum wmi_tdls_state state) +{ + struct wmi_10_4_tdls_set_state_cmd *cmd; + struct sk_buff *skb; + u32 options = 0; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) + state = WMI_TDLS_ENABLE_PASSIVE; + + if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) + options |= WMI_TDLS_BUFFER_STA_EN; + + cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(vdev_id); + cmd->state = __cpu_to_le32(state); + cmd->notification_interval_ms = __cpu_to_le32(5000); + cmd->tx_discovery_threshold = __cpu_to_le32(100); + cmd->tx_teardown_threshold = __cpu_to_le32(5); + cmd->rssi_teardown_threshold = __cpu_to_le32(-75); + cmd->rssi_delta = __cpu_to_le32(-20); + cmd->tdls_options = __cpu_to_le32(options); + cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2); + cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000); + cmd->tdls_puapsd_mask = __cpu_to_le32(0xf); + cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0); + cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10); + cmd->teardown_notification_ms = __cpu_to_le32(10); + cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n", + state, vdev_id); + return skb; +} + +static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp) +{ + u32 peer_qos = 0; + + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) + peer_qos |= WMI_TDLS_PEER_QOS_AC_VO; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) + peer_qos |= WMI_TDLS_PEER_QOS_AC_VI; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + peer_qos |= WMI_TDLS_PEER_QOS_AC_BK; + if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) + peer_qos |= WMI_TDLS_PEER_QOS_AC_BE; + + peer_qos |= SM(sp, WMI_TDLS_PEER_SP); + + return peer_qos; +} + +static struct sk_buff * +ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar, + const struct wmi_tdls_peer_update_cmd_arg *arg, + const struct wmi_tdls_peer_capab_arg *cap, + const struct wmi_channel_arg *chan_arg) +{ + struct wmi_10_4_tdls_peer_update_cmd *cmd; + struct wmi_tdls_peer_capabilities *peer_cap; + struct wmi_channel *chan; + struct sk_buff *skb; + u32 peer_qos; + int len, chan_len; + int i; + + /* tdls peer update cmd has place holder for one channel*/ + chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0; + + len = sizeof(*cmd) + chan_len * sizeof(*chan); + + skb = ath10k_wmi_alloc_skb(ar, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + memset(skb->data, 0, sizeof(*cmd)); + + cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data; + cmd->vdev_id = __cpu_to_le32(arg->vdev_id); + ether_addr_copy(cmd->peer_macaddr.addr, arg->addr); + cmd->peer_state = __cpu_to_le32(arg->peer_state); + + peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues, + cap->peer_max_sp); + + peer_cap = &cmd->peer_capab; + peer_cap->peer_qos = __cpu_to_le32(peer_qos); + peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support); + peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support); + peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass); + peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass); + peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len); + peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len); + + for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++) + peer_cap->peer_operclass[i] = cap->peer_operclass[i]; + + peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder); + peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num); + peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw); + + for (i = 0; i < cap->peer_chan_len; i++) { + chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i]; + ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]); + } + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi tdls peer update vdev %i state %d n_chans %u\n", + arg->vdev_id, arg->peer_state, cap->peer_chan_len); + return skb; +} + static struct sk_buff * ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value) { @@ -8197,6 +8354,8 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill, .ext_resource_config = ath10k_wmi_10_4_ext_resource_config, + .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state, + .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update, /* shared with 10.2 */ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 3a4cbbcd815e..60d2bb045042 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -184,6 +184,17 @@ enum wmi_service { WMI_SERVICE_TX_MODE_PUSH_ONLY, WMI_SERVICE_TX_MODE_PUSH_PULL, WMI_SERVICE_TX_MODE_DYNAMIC, + WMI_SERVICE_VDEV_RX_FILTER, + WMI_SERVICE_BTCOEX, + WMI_SERVICE_CHECK_CAL_VERSION, + WMI_SERVICE_DBGLOG_WARN2, + WMI_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_SERVICE_SMART_LOGGING_SUPPORT, + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, /* keep last */ WMI_SERVICE_MAX, @@ -310,6 +321,21 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY, WMI_10_4_SERVICE_TX_MODE_PUSH_PULL, WMI_10_4_SERVICE_TX_MODE_DYNAMIC, + WMI_10_4_SERVICE_VDEV_RX_FILTER, + WMI_10_4_SERVICE_BTCOEX, + WMI_10_4_SERVICE_CHECK_CAL_VERSION, + WMI_10_4_SERVICE_DBGLOG_WARN2, + WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT, + WMI_10_4_SERVICE_TDLS, + WMI_10_4_SERVICE_TDLS_OFFCHAN, + WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, }; static inline char *wmi_service_name(int service_id) @@ -408,6 +434,16 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY); SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL); SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC); + SVCSTR(WMI_SERVICE_VDEV_RX_FILTER); + SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION); + SVCSTR(WMI_SERVICE_DBGLOG_WARN2); + SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE); + SVCSTR(WMI_SERVICE_4_WIRE_COEX_SUPPORT); + SVCSTR(WMI_SERVICE_EXTENDED_NSS_SUPPORT); + SVCSTR(WMI_SERVICE_PROG_GPIO_BAND_SELECT); + SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT); + SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE); + SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY); default: return NULL; } @@ -674,6 +710,36 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_TX_MODE_PUSH_PULL, len); SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC, WMI_SERVICE_TX_MODE_DYNAMIC, len); + SVCMAP(WMI_10_4_SERVICE_VDEV_RX_FILTER, + WMI_SERVICE_VDEV_RX_FILTER, len); + SVCMAP(WMI_10_4_SERVICE_BTCOEX, + WMI_SERVICE_BTCOEX, len); + SVCMAP(WMI_10_4_SERVICE_CHECK_CAL_VERSION, + WMI_SERVICE_CHECK_CAL_VERSION, len); + SVCMAP(WMI_10_4_SERVICE_DBGLOG_WARN2, + WMI_SERVICE_DBGLOG_WARN2, len); + SVCMAP(WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE, + WMI_SERVICE_BTCOEX_DUTY_CYCLE, len); + SVCMAP(WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT, + WMI_SERVICE_4_WIRE_COEX_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT, + WMI_SERVICE_EXTENDED_NSS_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT, + WMI_SERVICE_PROG_GPIO_BAND_SELECT, len); + SVCMAP(WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT, + WMI_SERVICE_SMART_LOGGING_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_TDLS, + WMI_SERVICE_TDLS, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_OFFCHAN, + WMI_SERVICE_TDLS_OFFCHAN, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA, + WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA, + WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, + WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len); + SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY, + WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len); } #undef SVCMAP @@ -848,6 +914,29 @@ struct wmi_cmd_map { u32 pdev_bss_chan_info_request_cmdid; u32 pdev_enable_adaptive_cca_cmdid; u32 ext_resource_cfg_cmdid; + u32 vdev_set_ie_cmdid; + u32 set_lteu_config_cmdid; + u32 atf_ssid_grouping_request_cmdid; + u32 peer_atf_ext_request_cmdid; + u32 set_periodic_channel_stats_cfg_cmdid; + u32 peer_bwf_request_cmdid; + u32 btcoex_cfg_cmdid; + u32 peer_tx_mu_txmit_count_cmdid; + u32 peer_tx_mu_txmit_rstcnt_cmdid; + u32 peer_gid_userpos_list_cmdid; + u32 pdev_check_cal_version_cmdid; + u32 coex_version_cfg_cmid; + u32 pdev_get_rx_filter_cmdid; + u32 pdev_extended_nss_cfg_cmdid; + u32 vdev_set_scan_nac_rssi_cmdid; + u32 prog_gpio_band_select_cmdid; + u32 config_smart_logging_cmdid; + u32 debug_fatal_condition_cmdid; + u32 get_tsf_timer_cmdid; + u32 pdev_get_tpc_table_cmdid; + u32 vdev_sifs_trigger_time_cmdid; + u32 pdev_wds_entry_list_cmdid; + u32 tdls_set_offchan_mode_cmdid; }; /* @@ -1658,6 +1747,29 @@ enum wmi_10_4_cmd_id { WMI_10_4_EXT_RESOURCE_CFG_CMDID, WMI_10_4_VDEV_SET_IE_CMDID, WMI_10_4_SET_LTEU_CONFIG_CMDID, + WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID, + WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID, + WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG, + WMI_10_4_PEER_BWF_REQUEST_CMDID, + WMI_10_4_BTCOEX_CFG_CMDID, + WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID, + WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID, + WMI_10_4_PEER_GID_USERPOS_LIST_CMDID, + WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID, + WMI_10_4_COEX_VERSION_CFG_CMID, + WMI_10_4_PDEV_GET_RX_FILTER_CMDID, + WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID, + WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID, + WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID, + WMI_10_4_CONFIG_SMART_LOGGING_CMDID, + WMI_10_4_DEBUG_FATAL_CONDITION_CMDID, + WMI_10_4_GET_TSF_TIMER_CMDID, + WMI_10_4_PDEV_GET_TPC_TABLE_CMDID, + WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID, + WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID, + WMI_10_4_TDLS_SET_STATE_CMDID, + WMI_10_4_TDLS_PEER_UPDATE_CMDID, + WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID, WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1, }; @@ -1721,6 +1833,18 @@ enum wmi_10_4_event_id { WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID, WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID, WMI_10_4_MU_REPORT_EVENTID, + WMI_10_4_TX_DATA_TRAFFIC_CTRL_EVENTID, + WMI_10_4_PEER_TX_MU_TXMIT_COUNT_EVENTID, + WMI_10_4_PEER_GID_USERPOS_LIST_EVENTID, + WMI_10_4_PDEV_CHECK_CAL_VERSION_EVENTID, + WMI_10_4_ATF_PEER_STATS_EVENTID, + WMI_10_4_PDEV_GET_RX_FILTER_EVENTID, + WMI_10_4_NAC_RSSI_EVENTID, + WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID, + WMI_10_4_GET_TSF_TIMER_RESP_EVENTID, + WMI_10_4_PDEV_TPC_TABLE_EVENTID, + WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID, + WMI_10_4_TDLS_PEER_EVENTID, WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1, }; @@ -2729,6 +2853,18 @@ struct wmi_resource_config_10_4 { __le32 qwrap_config; } __packed; +enum wmi_coex_version { + WMI_NO_COEX_VERSION_SUPPORT = 0, + /* 3 wire coex support*/ + WMI_COEX_VERSION_1 = 1, + /* 2.5 wire coex support*/ + WMI_COEX_VERSION_2 = 2, + /* 2.5 wire coex with duty cycle support */ + WMI_COEX_VERSION_3 = 3, + /* 4 wire coex support*/ + WMI_COEX_VERSION_4 = 4, +}; + /** * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags * @WMI_10_4_LTEU_SUPPORT: LTEU config @@ -2737,6 +2873,14 @@ struct wmi_resource_config_10_4 { * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats * @WMI_10_4_PEER_STATS: Per station stats + * @WMI_10_4_VDEV_STATS: Per vdev stats + * @WMI_10_4_TDLS: Implicit TDLS support in firmware enable/disable + * @WMI_10_4_TDLS_OFFCHAN: TDLS offchannel support enable/disable + * @WMI_10_4_TDLS_UAPSD_BUFFER_STA: TDLS buffer sta support enable/disable + * @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable + * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host + * enable/disable + * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable */ enum wmi_10_4_feature_mask { WMI_10_4_LTEU_SUPPORT = BIT(0), @@ -2745,6 +2889,14 @@ enum wmi_10_4_feature_mask { WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3), WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4), WMI_10_4_PEER_STATS = BIT(5), + WMI_10_4_VDEV_STATS = BIT(6), + WMI_10_4_TDLS = BIT(7), + WMI_10_4_TDLS_OFFCHAN = BIT(8), + WMI_10_4_TDLS_UAPSD_BUFFER_STA = BIT(9), + WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10), + WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11), + WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12), + }; struct wmi_ext_resource_config_10_4_cmd { @@ -2752,6 +2904,22 @@ struct wmi_ext_resource_config_10_4_cmd { __le32 host_platform_config; /* see enum wmi_10_4_feature_mask */ __le32 fw_feature_bitmap; + /* WLAN priority GPIO number */ + __le32 wlan_gpio_priority; + /* see enum wmi_coex_version */ + __le32 coex_version; + /* COEX GPIO config */ + __le32 coex_gpio_pin1; + __le32 coex_gpio_pin2; + __le32 coex_gpio_pin3; + /* number of vdevs allowed to perform tdls */ + __le32 num_tdls_vdevs; + /* number of peers to track per TDLS vdev */ + __le32 num_tdls_conn_table_entries; + /* number of tdls sleep sta supported */ + __le32 max_tdls_concurrent_sleep_sta; + /* number of tdls buffer sta supported */ + __le32 max_tdls_concurrent_buffer_sta; }; /* strucutre describing host memory chunk. */ @@ -6567,6 +6735,22 @@ struct wmi_tdls_peer_update_cmd_arg { #define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32 +#define WMI_TDLS_PEER_SP_MASK 0x60 +#define WMI_TDLS_PEER_SP_LSB 5 + +enum wmi_tdls_options { + WMI_TDLS_OFFCHAN_EN = BIT(0), + WMI_TDLS_BUFFER_STA_EN = BIT(1), + WMI_TDLS_SLEEP_STA_EN = BIT(2), +}; + +enum { + WMI_TDLS_PEER_QOS_AC_VO = BIT(0), + WMI_TDLS_PEER_QOS_AC_VI = BIT(1), + WMI_TDLS_PEER_QOS_AC_BK = BIT(2), + WMI_TDLS_PEER_QOS_AC_BE = BIT(3), +}; + struct wmi_tdls_peer_capab_arg { u8 peer_uapsd_queues; u8 peer_max_sp; @@ -6582,6 +6766,79 @@ struct wmi_tdls_peer_capab_arg { u32 pref_offchan_bw; }; +struct wmi_10_4_tdls_set_state_cmd { + __le32 vdev_id; + __le32 state; + __le32 notification_interval_ms; + __le32 tx_discovery_threshold; + __le32 tx_teardown_threshold; + __le32 rssi_teardown_threshold; + __le32 rssi_delta; + __le32 tdls_options; + __le32 tdls_peer_traffic_ind_window; + __le32 tdls_peer_traffic_response_timeout_ms; + __le32 tdls_puapsd_mask; + __le32 tdls_puapsd_inactivity_time_ms; + __le32 tdls_puapsd_rx_frame_threshold; + __le32 teardown_notification_ms; + __le32 tdls_peer_kickout_threshold; +} __packed; + +struct wmi_tdls_peer_capabilities { + __le32 peer_qos; + __le32 buff_sta_support; + __le32 off_chan_support; + __le32 peer_curr_operclass; + __le32 self_curr_operclass; + __le32 peer_chan_len; + __le32 peer_operclass_len; + u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES]; + __le32 is_peer_responder; + __le32 pref_offchan_num; + __le32 pref_offchan_bw; + struct wmi_channel peer_chan_list[1]; +} __packed; + +struct wmi_10_4_tdls_peer_update_cmd { + __le32 vdev_id; + struct wmi_mac_addr peer_macaddr; + __le32 peer_state; + __le32 reserved[4]; + struct wmi_tdls_peer_capabilities peer_capab; +} __packed; + +enum wmi_tdls_peer_reason { + WMI_TDLS_TEARDOWN_REASON_TX, + WMI_TDLS_TEARDOWN_REASON_RSSI, + WMI_TDLS_TEARDOWN_REASON_SCAN, + WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE, + WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT, + WMI_TDLS_TEARDOWN_REASON_BAD_PTR, + WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE, + WMI_TDLS_ENTER_BUF_STA, + WMI_TDLS_EXIT_BUF_STA, + WMI_TDLS_ENTER_BT_BUSY_MODE, + WMI_TDLS_EXIT_BT_BUSY_MODE, + WMI_TDLS_SCAN_STARTED_EVENT, + WMI_TDLS_SCAN_COMPLETED_EVENT, +}; + +enum wmi_tdls_peer_notification { + WMI_TDLS_SHOULD_DISCOVER, + WMI_TDLS_SHOULD_TEARDOWN, + WMI_TDLS_PEER_DISCONNECTED, + WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION, +}; + +struct wmi_tdls_peer_event { + struct wmi_mac_addr peer_macaddr; + /* see enum wmi_tdls_peer_notification*/ + __le32 peer_status; + /* see enum wmi_tdls_peer_reason */ + __le32 peer_reason; + __le32 vdev_id; +} __packed; + enum wmi_txbf_conf { WMI_TXBF_CONF_UNSUPPORTED, WMI_TXBF_CONF_BEFORE_ASSOC, -- cgit v1.2.3-55-g7522 From be5b4f4002a6c1c37279c69e093567109be982f4 Mon Sep 17 00:00:00 2001 From: Manikanta Pubbisetty Date: Fri, 28 Jul 2017 15:15:43 +0300 Subject: ath10k: push peer type to target for TDLS peers WMI interface for all the firmwares(except QCA6174) does not include the type of peer(default/bss/tdls) requested during peer creation, therefore target creates a default peer. TDLS implementation on 10.4 firmware requires host to configure the peer type(tdls) for TDLS peers. This patch adds peer type parameter to the existing WMI interface for peer creation to accommodate this requirement. Tested this change on QCA9888(10.4-3.5.1-00018) and QCA988x(10.2.4.70.9-2) with ping tests for AP/STA modes. Signed-off-by: Manikanta Pubbisetty Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 1 + drivers/net/wireless/ath/ath10k/wmi.h | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index a09e6bcfb52f..38a97086708b 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -6498,6 +6498,7 @@ ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id, cmd = (struct wmi_peer_create_cmd *)skb->data; cmd->vdev_id = __cpu_to_le32(vdev_id); ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); + cmd->peer_type = __cpu_to_le32(peer_type); ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi peer create vdev_id %d peer_addr %pM\n", diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 60d2bb045042..7a3606dde227 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -5877,6 +5877,7 @@ struct wmi_tbtt_offset_event { struct wmi_peer_create_cmd { __le32 vdev_id; struct wmi_mac_addr peer_macaddr; + __le32 peer_type; } __packed; enum wmi_peer_type { -- cgit v1.2.3-55-g7522 From a764284f34f9b0b07f34907947d38d6632f61a1b Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 28 Jul 2017 15:15:44 +0300 Subject: ath10k: explicitly request exclusive reset control Commit a53e35db70d1 ("reset: Ensure drivers are explicit when requesting reset lines") started to transition the reset control request API calls to explicitly state whether the driver needs exclusive or shared reset control behavior. Convert all drivers requesting exclusive resets to the explicit API call so the temporary transition helpers can be removed. No functional changes. Cc: Kalle Valo Cc: ath10k@lists.infradead.org Cc: linux-wireless@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Philipp Zabel Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index b36dd792fbb2..ff6815e95684 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -197,35 +197,40 @@ static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) dev = &ar_ahb->pdev->dev; - ar_ahb->core_cold_rst = devm_reset_control_get(dev, "wifi_core_cold"); + ar_ahb->core_cold_rst = devm_reset_control_get_exclusive(dev, + "wifi_core_cold"); if (IS_ERR(ar_ahb->core_cold_rst)) { ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n", PTR_ERR(ar_ahb->core_cold_rst)); return PTR_ERR(ar_ahb->core_cold_rst); } - ar_ahb->radio_cold_rst = devm_reset_control_get(dev, "wifi_radio_cold"); + ar_ahb->radio_cold_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_cold"); if (IS_ERR(ar_ahb->radio_cold_rst)) { ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n", PTR_ERR(ar_ahb->radio_cold_rst)); return PTR_ERR(ar_ahb->radio_cold_rst); } - ar_ahb->radio_warm_rst = devm_reset_control_get(dev, "wifi_radio_warm"); + ar_ahb->radio_warm_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_warm"); if (IS_ERR(ar_ahb->radio_warm_rst)) { ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n", PTR_ERR(ar_ahb->radio_warm_rst)); return PTR_ERR(ar_ahb->radio_warm_rst); } - ar_ahb->radio_srif_rst = devm_reset_control_get(dev, "wifi_radio_srif"); + ar_ahb->radio_srif_rst = devm_reset_control_get_exclusive(dev, + "wifi_radio_srif"); if (IS_ERR(ar_ahb->radio_srif_rst)) { ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n", PTR_ERR(ar_ahb->radio_srif_rst)); return PTR_ERR(ar_ahb->radio_srif_rst); } - ar_ahb->cpu_init_rst = devm_reset_control_get(dev, "wifi_cpu_init"); + ar_ahb->cpu_init_rst = devm_reset_control_get_exclusive(dev, + "wifi_cpu_init"); if (IS_ERR(ar_ahb->cpu_init_rst)) { ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n", PTR_ERR(ar_ahb->cpu_init_rst)); -- cgit v1.2.3-55-g7522 From f085c105479f05bf47cd8f1f9786be2a73de29cb Mon Sep 17 00:00:00 2001 From: Christoph Fritz Date: Fri, 28 Jul 2017 15:15:46 +0300 Subject: ath9k: fix debugfs file permission This patch fixes a trivial debugfs file permission issue. Debugfs file ack_to has no write function, so S_IWUSR gets purged. Signed-off-by: Christoph Fritz Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 2e64977a8ab6..01fa30117288 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1452,7 +1452,7 @@ int ath9k_init_debug(struct ath_hw *ah) #endif #ifdef CONFIG_ATH9K_DYNACK - debugfs_create_file("ack_to", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + debugfs_create_file("ack_to", S_IRUSR, sc->debug.debugfs_phy, sc, &fops_ackto); #endif debugfs_create_file("tpc", S_IRUSR | S_IWUSR, -- cgit v1.2.3-55-g7522 From 129e12a9eb42fbccf4dd5bcc2ad3d4645e14e788 Mon Sep 17 00:00:00 2001 From: Rosen Penev Date: Fri, 28 Jul 2017 15:15:47 +0300 Subject: ath9k: Add Dell Wireless 1802 with wowlan capability Add the Dell Wireless 1802 card as an AR9462 in the ath9k pci list. Note that the wowlan feature is supported and has been tested successfully. Signed-off by: Rosen Penev Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/pci.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 7b7627f85d3a..223606311261 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -388,6 +388,11 @@ static const struct pci_device_id ath_pci_id_table[] = { PCI_VENDOR_ID_DELL, 0x020B), .driver_data = ATH9K_PCI_WOW }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, + 0x0034, + PCI_VENDOR_ID_DELL, + 0x0300), + .driver_data = ATH9K_PCI_WOW }, /* Killer Wireless (2x2) */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS, -- cgit v1.2.3-55-g7522 From 4165cf7ba52a381c95d1cce0d42a48aac2c96ea3 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 3 Jul 2017 09:38:45 -0400 Subject: wcn36xx: check dma_mapping_error() Fixes splat: wcn36xx a204000.wcnss:smd-edge:wcnss:wifi: DMA-API: device driver failed to check map error[device address=0x00000000b45ba000] [size=3872 bytes] [mapped as single] ------------[ cut here ]------------ WARNING: CPU: 0 PID: 0 at ../lib/dma-debug.c:1167 check_unmap+0x474/0x8d0 Modules linked in: bnep(E) arc4(E) wcn36xx(E) mac80211(E) btqcomsmd(E) btqca(E) bluetooth(E) cfg80211(E) ecdh_generic(E) rfkill(E) vfat(E) fat(E) wcnss_ctrl qcom_wcnss_pil(E) mdt_loader(E) qcom_common(E) remoteproc(E) crc32_ce(E) virtio_ring(E) snd_soc_lpass_apq8016(E) snd_soc_lpass_cpu(E) virtio(E) snd_soc_lpass_platform(E) leds_gpio(E) snd_soc_hdmi_codec(E) snd_soc_apq8016_sbc(E) snd_soc_msm8916_digital(E) snd_soc_core(E) qcom_spmi_temp_alarm(E) ac97_bus(E) snd_pcm_dmaengine(E) snd_seq(E) snd_seq_device(E) snd_pcm(E) spi_qup(E) nvmem_qfprom(E) snd_timer(E) snd(E) soundcore(E) msm_rng(E) qcom_tsens(E) nvmem_core(E) uas(E) usb_storage(E) dm9601(E) cdc_ether(E) usbnet(E) mii(E) mmc_block(E) sdhci_msm(E) sdhci_pltfm(E) qcom_spmi_vadc(E) qcom_vadc_common(PE) clk_smd_rpm(E) industrialio(E) qcom_smd_regulator(E) pinctrl_spmi_mpp(E) pinctrl_spmi_gpio(E) rtc_pm8xxx(E) adv7511(E) smd_rpm(E) qcom_spmi_pmic(E) regmap_spmi(E) phy_msm_usb(E) usb3503(E) extcon_usb_gpio(E) ci_hdrc_msm(E) ci_hdrc(E) qcom_hwspinlock(E) udc_core(E) extcon_core(E) ehci_msm(E) i2c_qup(E) sdhci(E) msm(E) mmc_core(E) drm_kms_helper(E) syscopyarea(E) sysfillrect(E) sysimgblt(E) fb_sys_fops(E) spmi_pmic_arb(E) drm(E) spmi(E) qcom_smd(E) rpmsg_core smsm(E) gpio_keys(E) smp2p(E) smem(E) hwspinlock_core(E) sunrpc(E) scsi_transport_iscsi(E) CPU: 0 PID: 0 Comm: swapper/0 Tainted: P E 4.12.0-rc7+ #1476 Hardware name: qualcomm dragonboard410c/dragonboard410c, BIOS 2017.07-rc1-00234-g22fa70a-dirty 06/26/2017 task: ffff000009049780 task.stack: ffff000009030000 PC is at check_unmap+0x474/0x8d0 LR is at check_unmap+0x474/0x8d0 ... Mapped at: dma_entry_alloc+0x68/0xa8 debug_dma_map_page+0x94/0x148 wcn36xx_dxe_fill_skb.isra.1+0xbc/0xf8 [wcn36xx] wcn36xx_dxe_init+0x244/0x398 [wcn36xx] wcn36xx_start+0xf4/0x298 [wcn36xx] Signed-off-by: Rob Clark Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/dxe.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c index 87dfdaf9044c..d5c810a8cc52 100644 --- a/drivers/net/wireless/ath/wcn36xx/dxe.c +++ b/drivers/net/wireless/ath/wcn36xx/dxe.c @@ -289,6 +289,11 @@ static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl) skb_tail_pointer(skb), WCN36XX_PKT_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dxe->dst_addr_l)) { + dev_err(dev, "unable to map skb\n"); + kfree_skb(skb); + return -ENOMEM; + } ctl->skb = skb; return 0; -- cgit v1.2.3-55-g7522 From e9e6c2329a518d43a4734b26349bb19d8dfd7e6b Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Wed, 2 Aug 2017 11:35:00 +0200 Subject: X25: constify null_x25_address null_x25_address is only used to access the string it contains, so it can be const. Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- net/x25/af_x25.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5a1a98df3499..ac095936552d 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -74,7 +74,7 @@ DEFINE_RWLOCK(x25_list_lock); static const struct proto_ops x25_proto_ops; -static struct x25_address null_x25_address = {" "}; +static const struct x25_address null_x25_address = {" "}; #ifdef CONFIG_COMPAT struct compat_x25_subscrip_struct { -- cgit v1.2.3-55-g7522 From b8c17f7088310e7ee34ca61929f737045adfd449 Mon Sep 17 00:00:00 2001 From: Lin Yun Sheng Date: Wed, 2 Aug 2017 17:57:37 +0800 Subject: net: hns: Add self-adaptive interrupt coalesce support in hns driver When deal with low and high throughput, it is hard to achiece both high performance and low latency. In order to achiece that, this patch calculates the rx rate, and adjust the interrupt coalesce parameter accordingly. Signed-off-by: Yunsheng Lin Tested-by: Weiwei Deng Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hnae.c | 1 + drivers/net/ethernet/hisilicon/hns/hnae.h | 15 +++ drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c | 1 + drivers/net/ethernet/hisilicon/hns/hns_enet.c | 134 ++++++++++++++++++++-- drivers/net/ethernet/hisilicon/hns/hns_enet.h | 2 +- drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 7 +- 6 files changed, 149 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 9d9b6e6dd988..a051e582d541 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -202,6 +202,7 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) ring->q = q; ring->flags = flags; spin_lock_init(&ring->lock); + ring->coal_param = q->handle->coal_param; assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); /* not matter for tx or rx ring, the ntc and ntc start from 0 */ diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 7ba653af19cb..3e62692af011 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -89,6 +89,10 @@ do { \ #define RCB_RING_NAME_LEN 16 +#define HNAE_LOWEST_LATENCY_COAL_PARAM 30 +#define HNAE_LOW_LATENCY_COAL_PARAM 80 +#define HNAE_BULK_LATENCY_COAL_PARAM 150 + enum hnae_led_state { HNAE_LED_INACTIVE, HNAE_LED_ACTIVE, @@ -292,6 +296,12 @@ struct hnae_ring { int flags; /* ring attribute */ int irq_init_flag; + + /* total rx bytes after last rx rate calucated */ + u64 coal_last_rx_bytes; + unsigned long coal_last_jiffies; + u32 coal_param; + u32 coal_rx_rate; /* rx rate in MB */ }; #define ring_ptr_move_fw(ring, p) \ @@ -548,8 +558,13 @@ struct hnae_handle { u32 if_support; int q_num; int vf_id; + unsigned long coal_last_jiffies; + u32 coal_param; /* self adapt coalesce param */ + /* the ring index of last ring that set coal param */ + u32 coal_ring_idx; u32 eport_id; u32 dport_id; /* v2 tx bd should fill the dport_id */ + bool coal_adapt_en; enum hnae_port_type port_type; enum hnae_media_type media_type; struct list_head node; /* list to hnae_ae_dev->handle_list */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index a37166ee577b..bd68379d2bea 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -99,6 +99,7 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev, ae_handle->owner_dev = dsaf_dev->dev; ae_handle->dev = dev; ae_handle->q_num = qnum_per_vf; + ae_handle->coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM; /* find ring pair, and set vf id*/ for (ae_handle->vf_id = 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 3987699f8fe6..832f27792e3f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -812,6 +812,112 @@ static int hns_desc_unused(struct hnae_ring *ring) return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; } +#define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */ +#define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */ + +#define HNS_COAL_BDNUM 3 + +static u32 hns_coal_rx_bdnum(struct hnae_ring *ring) +{ + bool coal_enable = ring->q->handle->coal_adapt_en; + + if (coal_enable && + ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE) + return HNS_COAL_BDNUM; + else + return 0; +} + +static void hns_update_rx_rate(struct hnae_ring *ring) +{ + bool coal_enable = ring->q->handle->coal_adapt_en; + u32 time_passed_ms; + u64 total_bytes; + + if (!coal_enable || + time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4))) + return; + + /* ring->stats.rx_bytes overflowed */ + if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) { + ring->coal_last_rx_bytes = ring->stats.rx_bytes; + ring->coal_last_jiffies = jiffies; + return; + } + + total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes; + time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies); + ring->coal_rx_rate = (total_bytes / time_passed_ms) >> 10; + + ring->coal_last_rx_bytes = ring->stats.rx_bytes; + ring->coal_last_jiffies = jiffies; +} + +/** + * smooth_alg - smoothing algrithm for adjusting coalesce parameter + **/ +static u32 smooth_alg(u32 new_param, u32 old_param) +{ + u32 gap = (new_param > old_param) ? new_param - old_param + : old_param - new_param; + + if (gap > 8) + gap >>= 3; + + if (new_param > old_param) + return old_param + gap; + else + return old_param - gap; +} + +/** + * hns_nic_adp_coalesce - self adapte coalesce according to rx rate + * @ring_data: pointer to hns_nic_ring_data + **/ +static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data) +{ + struct hnae_ring *ring = ring_data->ring; + struct hnae_handle *handle = ring->q->handle; + u32 new_coal_param, old_coal_param = ring->coal_param; + + if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE) + new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM; + else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE) + new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM; + else + new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM; + + if (new_coal_param == old_coal_param && + new_coal_param == handle->coal_param) + return; + + new_coal_param = smooth_alg(new_coal_param, old_coal_param); + ring->coal_param = new_coal_param; + + /** + * Because all ring in one port has one coalesce param, when one ring + * calculate its own coalesce param, it cannot write to hardware at + * once. There are three conditions as follows: + * 1. current ring's coalesce param is larger than the hardware. + * 2. or ring which adapt last time can change again. + * 3. timeout. + */ + if (new_coal_param == handle->coal_param) { + handle->coal_last_jiffies = jiffies; + handle->coal_ring_idx = ring_data->queue_index; + } else if (new_coal_param > handle->coal_param || + handle->coal_ring_idx == ring_data->queue_index || + time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) { + handle->dev->ops->set_coalesce_usecs(handle, + new_coal_param); + handle->dev->ops->set_coalesce_frames(handle, + 1, new_coal_param); + handle->coal_param = new_coal_param; + handle->coal_ring_idx = ring_data->queue_index; + handle->coal_last_jiffies = jiffies; + } +} + static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, int budget, void *v) { @@ -868,20 +974,27 @@ static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) { struct hnae_ring *ring = ring_data->ring; int num = 0; + bool rx_stopped; - ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); + hns_update_rx_rate(ring); /* for hardware bug fixed */ + ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - if (num > 0) { + if (num <= hns_coal_rx_bdnum(ring)) { + if (ring->q->handle->coal_adapt_en) + hns_nic_adpt_coalesce(ring_data); + + rx_stopped = true; + } else { ring_data->ring->q->handle->dev->ops->toggle_ring_irq( ring_data->ring, 1); - return false; - } else { - return true; + rx_stopped = false; } + + return rx_stopped; } static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) @@ -889,12 +1002,17 @@ static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data) struct hnae_ring *ring = ring_data->ring; int num; + hns_update_rx_rate(ring); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); - if (!num) + if (num <= hns_coal_rx_bdnum(ring)) { + if (ring->q->handle->coal_adapt_en) + hns_nic_adpt_coalesce(ring_data); + return true; - else - return false; + } + + return false; } static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 9cb4c7884201..26e9afcbdd50 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -38,7 +38,7 @@ struct hns_nic_ring_data { struct hnae_ring *ring; struct napi_struct napi; cpumask_t mask; /* affinity mask */ - int queue_index; + u32 queue_index; int (*poll_one)(struct hns_nic_ring_data *, int, void *); void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *); bool (*fini_process)(struct hns_nic_ring_data *); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 78cb20c67aa6..7ea7f8a4aa2a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -735,8 +735,8 @@ static int hns_get_coalesce(struct net_device *net_dev, ops = priv->ae_handle->dev->ops; - ec->use_adaptive_rx_coalesce = 1; - ec->use_adaptive_tx_coalesce = 1; + ec->use_adaptive_rx_coalesce = priv->ae_handle->coal_adapt_en; + ec->use_adaptive_tx_coalesce = priv->ae_handle->coal_adapt_en; if ((!ops->get_coalesce_usecs) || (!ops->get_max_coalesced_frames)) @@ -787,6 +787,9 @@ static int hns_set_coalesce(struct net_device *net_dev, (!ops->set_coalesce_frames)) return -ESRCH; + if (ec->use_adaptive_rx_coalesce != priv->ae_handle->coal_adapt_en) + priv->ae_handle->coal_adapt_en = ec->use_adaptive_rx_coalesce; + rc1 = ops->set_coalesce_usecs(priv->ae_handle, ec->rx_coalesce_usecs); -- cgit v1.2.3-55-g7522 From eb48d682814c8b0e44111c45d7c554e1989fd10d Mon Sep 17 00:00:00 2001 From: William Tu Date: Wed, 2 Aug 2017 08:43:52 -0700 Subject: bpf: fix the printing of ifindex Save the ifindex before it gets zeroed so the invalid ifindex can be printed out. Signed-off-by: William Tu Acked-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: David S. Miller --- net/core/filter.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 7e9708653c6f..78d00933dbe7 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2509,15 +2509,16 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, { struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct net_device *fwd; + u32 index = ri->ifindex; if (ri->map) return xdp_do_redirect_map(dev, xdp, xdp_prog); - fwd = dev_get_by_index_rcu(dev_net(dev), ri->ifindex); + fwd = dev_get_by_index_rcu(dev_net(dev), index); ri->ifindex = 0; ri->map = NULL; if (unlikely(!fwd)) { - bpf_warn_invalid_xdp_redirect(ri->ifindex); + bpf_warn_invalid_xdp_redirect(index); return -EINVAL; } @@ -2531,11 +2532,12 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); unsigned int len; + u32 index = ri->ifindex; - dev = dev_get_by_index_rcu(dev_net(dev), ri->ifindex); + dev = dev_get_by_index_rcu(dev_net(dev), index); ri->ifindex = 0; if (unlikely(!dev)) { - bpf_warn_invalid_xdp_redirect(ri->ifindex); + bpf_warn_invalid_xdp_redirect(index); goto err; } -- cgit v1.2.3-55-g7522 From 93b1b31f87ae7c0795afc7586d28f3ed4f859d20 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Wed, 2 Aug 2017 09:34:15 -0700 Subject: ipv4: Introduce ipip_offload_init helper function. It's convenient to init ipip offload. We will check the return value, and print KERN_CRIT info on failure. Signed-off-by: Tonghao Zhang Signed-off-by: David S. Miller --- net/ipv4/af_inet.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f0103ffe1cdb..683ffafb6ff9 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1765,6 +1765,11 @@ static const struct net_offload ipip_offload = { }, }; +static int __init ipip_offload_init(void) +{ + return inet_add_offload(&ipip_offload, IPPROTO_IPIP); +} + static int __init ipv4_offload_init(void) { /* @@ -1774,9 +1779,10 @@ static int __init ipv4_offload_init(void) pr_crit("%s: Cannot add UDP protocol offload\n", __func__); if (tcpv4_offload_init() < 0) pr_crit("%s: Cannot add TCP protocol offload\n", __func__); + if (ipip_offload_init() < 0) + pr_crit("%s: Cannot add IPIP protocol offload\n", __func__); dev_add_offload(&ip_packet_offload); - inet_add_offload(&ipip_offload, IPPROTO_IPIP); return 0; } -- cgit v1.2.3-55-g7522 From 840df162b3eb3ec02e2613411fad1285a0017c13 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Wed, 2 Aug 2017 10:34:31 -0700 Subject: rds: reduce memory footprint for RDS when transport is RDMA RDS over IB does not use multipath RDS, so the array of additional rds_conn_path structures is always superfluous in this case. Reduce the memory footprint of the rds module by making this a dynamic allocation predicated on whether the transport is mp_capable. Signed-off-by: Sowmini Varadhan Acked-by: Santosh Shilimkar Tested-by: Efrain Galaviz Signed-off-by: David S. Miller --- net/rds/connection.c | 34 +++++++++++++++++++++++++--------- net/rds/rds.h | 2 +- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/net/rds/connection.c b/net/rds/connection.c index 005bca68aa94..7ee2d5d68b78 100644 --- a/net/rds/connection.c +++ b/net/rds/connection.c @@ -151,6 +151,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, struct rds_transport *loop_trans; unsigned long flags; int ret, i; + int npaths = (trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); rcu_read_lock(); conn = rds_conn_lookup(net, head, laddr, faddr, trans); @@ -172,6 +173,12 @@ static struct rds_connection *__rds_conn_create(struct net *net, conn = ERR_PTR(-ENOMEM); goto out; } + conn->c_path = kcalloc(npaths, sizeof(struct rds_conn_path), gfp); + if (!conn->c_path) { + kmem_cache_free(rds_conn_slab, conn); + conn = ERR_PTR(-ENOMEM); + goto out; + } INIT_HLIST_NODE(&conn->c_hash_node); conn->c_laddr = laddr; @@ -181,6 +188,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, ret = rds_cong_get_maps(conn); if (ret) { + kfree(conn->c_path); kmem_cache_free(rds_conn_slab, conn); conn = ERR_PTR(ret); goto out; @@ -207,13 +215,14 @@ static struct rds_connection *__rds_conn_create(struct net *net, conn->c_trans = trans; init_waitqueue_head(&conn->c_hs_waitq); - for (i = 0; i < RDS_MPATH_WORKERS; i++) { + for (i = 0; i < npaths; i++) { __rds_conn_path_init(conn, &conn->c_path[i], is_outgoing); conn->c_path[i].cp_index = i; } ret = trans->conn_alloc(conn, gfp); if (ret) { + kfree(conn->c_path); kmem_cache_free(rds_conn_slab, conn); conn = ERR_PTR(ret); goto out; @@ -236,6 +245,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, /* Creating passive conn */ if (parent->c_passive) { trans->conn_free(conn->c_path[0].cp_transport_data); + kfree(conn->c_path); kmem_cache_free(rds_conn_slab, conn); conn = parent->c_passive; } else { @@ -252,7 +262,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, struct rds_conn_path *cp; int i; - for (i = 0; i < RDS_MPATH_WORKERS; i++) { + for (i = 0; i < npaths; i++) { cp = &conn->c_path[i]; /* The ->conn_alloc invocation may have * allocated resource for all paths, so all @@ -261,6 +271,7 @@ static struct rds_connection *__rds_conn_create(struct net *net, if (cp->cp_transport_data) trans->conn_free(cp->cp_transport_data); } + kfree(conn->c_path); kmem_cache_free(rds_conn_slab, conn); conn = found; } else { @@ -407,6 +418,7 @@ void rds_conn_destroy(struct rds_connection *conn) unsigned long flags; int i; struct rds_conn_path *cp; + int npaths = (conn->c_trans->t_mp_capable ? RDS_MPATH_WORKERS : 1); rdsdebug("freeing conn %p for %pI4 -> " "%pI4\n", conn, &conn->c_laddr, @@ -420,7 +432,7 @@ void rds_conn_destroy(struct rds_connection *conn) synchronize_rcu(); /* shut the connection down */ - for (i = 0; i < RDS_MPATH_WORKERS; i++) { + for (i = 0; i < npaths; i++) { cp = &conn->c_path[i]; rds_conn_path_destroy(cp); BUG_ON(!list_empty(&cp->cp_retrans)); @@ -434,6 +446,7 @@ void rds_conn_destroy(struct rds_connection *conn) rds_cong_remove_conn(conn); put_net(conn->c_net); + kfree(conn->c_path); kmem_cache_free(rds_conn_slab, conn); spin_lock_irqsave(&rds_conn_lock, flags); @@ -464,8 +477,12 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, i++, head++) { hlist_for_each_entry_rcu(conn, head, c_hash_node) { struct rds_conn_path *cp; + int npaths; + + npaths = (conn->c_trans->t_mp_capable ? + RDS_MPATH_WORKERS : 1); - for (j = 0; j < RDS_MPATH_WORKERS; j++) { + for (j = 0; j < npaths; j++) { cp = &conn->c_path[j]; if (want_send) list = &cp->cp_send_queue; @@ -486,8 +503,6 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len, } spin_unlock_irqrestore(&cp->cp_lock, flags); - if (!conn->c_trans->t_mp_capable) - break; } } } @@ -571,15 +586,16 @@ static void rds_walk_conn_path_info(struct socket *sock, unsigned int len, i++, head++) { hlist_for_each_entry_rcu(conn, head, c_hash_node) { struct rds_conn_path *cp; + int npaths; - for (j = 0; j < RDS_MPATH_WORKERS; j++) { + npaths = (conn->c_trans->t_mp_capable ? + RDS_MPATH_WORKERS : 1); + for (j = 0; j < npaths; j++) { cp = &conn->c_path[j]; /* XXX no cp_lock usage.. */ if (!visitor(cp, buffer)) continue; - if (!conn->c_trans->t_mp_capable) - break; } /* We copy as much as we can fit in the buffer, diff --git a/net/rds/rds.h b/net/rds/rds.h index 3382695bf46c..2e0315b159cb 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h @@ -154,7 +154,7 @@ struct rds_connection { struct list_head c_map_item; unsigned long c_map_queued; - struct rds_conn_path c_path[RDS_MPATH_WORKERS]; + struct rds_conn_path *c_path; wait_queue_head_t c_hs_waitq; /* handshake waitq */ u32 c_my_gen_num; -- cgit v1.2.3-55-g7522 From 2d29b39739b94f02196d99502126f269fd8ec2ea Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Wed, 2 Aug 2017 23:27:14 +0530 Subject: qlcnic: add const to bin_attribute structure Add const to bin_attribute structure as it is only passed to the functions sysfs_{remove/create}_bin_file. The corresponding arguments are of type const, so declare the structure to be const. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 73027a6c06c7..82fcb83ea3c8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1248,7 +1248,7 @@ static const struct bin_attribute bin_attr_pm_config = { .write = qlcnic_sysfs_write_pm_config, }; -static struct bin_attribute bin_attr_flash = { +static const struct bin_attribute bin_attr_flash = { .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)}, .size = 0, .read = qlcnic_83xx_sysfs_flash_read_handler, -- cgit v1.2.3-55-g7522 From 3a8f6f874fa1ac004157fb3df742a5034718a2ce Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Wed, 2 Aug 2017 15:48:25 -0400 Subject: net: dsa: bcm_sf2: dst in not an array It's been a while now since ds->dst is not an array anymore, but a simple pointer to a dsa_switch_tree. Fortunately, SF2 does not support multi-chip and thus ds->index is always 0. This patch substitutes 'ds->dst[ds->index].' with 'ds->dst->'. Signed-off-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 6bbfa6ea1efb..558667c814c9 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -788,7 +788,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst[ds->index].cpu_dp->netdev; + struct net_device *p = ds->dst->cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); struct ethtool_wolinfo pwol; @@ -811,7 +811,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, struct ethtool_wolinfo *wol) { - struct net_device *p = ds->dst[ds->index].cpu_dp->netdev; + struct net_device *p = ds->dst->cpu_dp->netdev; struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->dst->cpu_dp->index; struct ethtool_wolinfo pwol; -- cgit v1.2.3-55-g7522 From d06c3583c2cf6c27a56ee05aa128a950e31b224a Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Wed, 2 Aug 2017 15:59:58 -0400 Subject: tcp: remove extra POLL_OUT added for finished active connect() Commit 45f119bf936b ("tcp: remove header prediction") introduced a minor bug: the sk_state_change() and sk_wake_async() notifications for a completed active connection happen twice: once in this new spot inside tcp_finish_connect() and once in the existing code in tcp_rcv_synsent_state_process() immediately after it calls tcp_finish_connect(). This commit remoes the duplicate POLL_OUT notifications. Fixes: 45f119bf936b ("tcp: remove header prediction") Signed-off-by: Neal Cardwell Cc: Florian Westphal Cc: Eric Dumazet Cc: Yuchung Cheng Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index df670d7ed98d..99cdf4ccabb8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5342,11 +5342,6 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) if (sock_flag(sk, SOCK_KEEPOPEN)) inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); - - if (!sock_flag(sk, SOCK_DEAD)) { - sk->sk_state_change(sk); - sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); - } } static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, -- cgit v1.2.3-55-g7522 From 3d52b5949d0311c4b7bc4c223918fee0f5c0dc39 Mon Sep 17 00:00:00 2001 From: John Allen Date: Wed, 2 Aug 2017 16:44:14 -0500 Subject: ibmvnic: Implement per-queue statistics reporting Add counters to report number of packets, bytes, and dropped packets for each transmit queue and number of packets, bytes, and interrupts for each receive queue. Modify ethtool callbacks to report the new statistics. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 86 +++++++++++++++++++++++++++++++++++++- drivers/net/ethernet/ibm/ibmvnic.h | 17 ++++++++ 2 files changed, 101 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 9d8af464dc44..b45ade6123a4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -346,6 +346,31 @@ static void replenish_pools(struct ibmvnic_adapter *adapter) } } +static void release_stats_buffers(struct ibmvnic_adapter *adapter) +{ + kfree(adapter->tx_stats_buffers); + kfree(adapter->rx_stats_buffers); +} + +static int init_stats_buffers(struct ibmvnic_adapter *adapter) +{ + adapter->tx_stats_buffers = + kcalloc(adapter->req_tx_queues, + sizeof(struct ibmvnic_tx_queue_stats), + GFP_KERNEL); + if (!adapter->tx_stats_buffers) + return -ENOMEM; + + adapter->rx_stats_buffers = + kcalloc(adapter->req_rx_queues, + sizeof(struct ibmvnic_rx_queue_stats), + GFP_KERNEL); + if (!adapter->rx_stats_buffers) + return -ENOMEM; + + return 0; +} + static void release_stats_token(struct ibmvnic_adapter *adapter) { struct device *dev = &adapter->vdev->dev; @@ -686,6 +711,7 @@ static void release_resources(struct ibmvnic_adapter *adapter) release_rx_pools(adapter); release_stats_token(adapter); + release_stats_buffers(adapter); release_error_buffers(adapter); if (adapter->napi) { @@ -763,6 +789,10 @@ static int init_resources(struct ibmvnic_adapter *adapter) if (rc) return rc; + rc = init_stats_buffers(adapter); + if (rc) + return rc; + rc = init_stats_token(adapter); if (rc) return rc; @@ -1245,6 +1275,9 @@ out: netdev->stats.tx_packets += tx_packets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; + adapter->tx_stats_buffers[queue_num].packets += tx_packets; + adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; + adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; return ret; } @@ -1585,6 +1618,8 @@ restart_poll: napi_gro_receive(napi, skb); /* send it up */ netdev->stats.rx_packets++; netdev->stats.rx_bytes += length; + adapter->rx_stats_buffers[scrq_num].packets++; + adapter->rx_stats_buffers[scrq_num].bytes += length; frames_processed++; } @@ -1706,6 +1741,7 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { + struct ibmvnic_adapter *adapter = netdev_priv(dev); int i; if (stringset != ETH_SS_STATS) @@ -1713,13 +1749,39 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN) memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); + + for (i = 0; i < adapter->req_tx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i); + data += ETH_GSTRING_LEN; + } + + for (i = 0; i < adapter->req_rx_queues; i++) { + snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i); + data += ETH_GSTRING_LEN; + } } static int ibmvnic_get_sset_count(struct net_device *dev, int sset) { + struct ibmvnic_adapter *adapter = netdev_priv(dev); + switch (sset) { case ETH_SS_STATS: - return ARRAY_SIZE(ibmvnic_stats); + return ARRAY_SIZE(ibmvnic_stats) + + adapter->req_tx_queues * NUM_TX_STATS + + adapter->req_rx_queues * NUM_RX_STATS; default: return -EOPNOTSUPP; } @@ -1730,7 +1792,7 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, { struct ibmvnic_adapter *adapter = netdev_priv(dev); union ibmvnic_crq crq; - int i; + int i, j; memset(&crq, 0, sizeof(crq)); crq.request_statistics.first = IBMVNIC_CRQ_CMD; @@ -1746,6 +1808,24 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset); + + for (j = 0; j < adapter->req_tx_queues; j++) { + data[i] = adapter->tx_stats_buffers[j].packets; + i++; + data[i] = adapter->tx_stats_buffers[j].bytes; + i++; + data[i] = adapter->tx_stats_buffers[j].dropped_packets; + i++; + } + + for (j = 0; j < adapter->req_rx_queues; j++) { + data[i] = adapter->rx_stats_buffers[j].packets; + i++; + data[i] = adapter->rx_stats_buffers[j].bytes; + i++; + data[i] = adapter->rx_stats_buffers[j].interrupts; + i++; + } } static const struct ethtool_ops ibmvnic_ethtool_ops = { @@ -2050,6 +2130,8 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance) struct ibmvnic_sub_crq_queue *scrq = instance; struct ibmvnic_adapter *adapter = scrq->adapter; + adapter->rx_stats_buffers[scrq->scrq_num].interrupts++; + if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) { disable_scrq_irq(adapter, scrq); __napi_schedule(&adapter->napi[scrq->scrq_num]); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 8eff6e15f4bb..d02257ccc377 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -166,6 +166,20 @@ struct ibmvnic_statistics { u8 reserved[72]; } __packed __aligned(8); +#define NUM_TX_STATS 3 +struct ibmvnic_tx_queue_stats { + u64 packets; + u64 bytes; + u64 dropped_packets; +}; + +#define NUM_RX_STATS 3 +struct ibmvnic_rx_queue_stats { + u64 packets; + u64 bytes; + u64 interrupts; +}; + struct ibmvnic_acl_buffer { __be32 len; __be32 version; @@ -956,6 +970,9 @@ struct ibmvnic_adapter { int tx_send_failed; int tx_map_failed; + struct ibmvnic_tx_queue_stats *tx_stats_buffers; + struct ibmvnic_rx_queue_stats *rx_stats_buffers; + int phys_link_state; int logical_link_state; -- cgit v1.2.3-55-g7522 From 52da5c114a884f8245dca5d3a69e1f45c759ba31 Mon Sep 17 00:00:00 2001 From: John Allen Date: Wed, 2 Aug 2017 16:45:28 -0500 Subject: ibmvnic: Convert vnic server reported statistics to cpu endian The vnic server reports the statistics buffer in big endian format and must be converted to cpu endian in order to be displayed correctly on little endian lpars. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index b45ade6123a4..ea0ff2841a1f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1807,7 +1807,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, wait_for_completion(&adapter->stats_done); for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++) - data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset); + data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter, + ibmvnic_stats[i].offset)); for (j = 0; j < adapter->req_tx_queues; j++) { data[i] = adapter->tx_stats_buffers[j].packets; -- cgit v1.2.3-55-g7522 From bc131b3a4a92290d6142e930622d6f97d2504241 Mon Sep 17 00:00:00 2001 From: John Allen Date: Wed, 2 Aug 2017 16:46:30 -0500 Subject: ibmvnic: Implement .get_ringparam Implement .get_ringparam (ethtool -g) functionality Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ea0ff2841a1f..c40456a399dd 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1729,12 +1729,14 @@ static u32 ibmvnic_get_link(struct net_device *netdev) static void ibmvnic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { - ring->rx_max_pending = 0; - ring->tx_max_pending = 0; + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq; + ring->tx_max_pending = adapter->max_tx_entries_per_subcrq; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; - ring->rx_pending = 0; - ring->tx_pending = 0; + ring->rx_pending = adapter->req_rx_add_entries_per_subcrq; + ring->tx_pending = adapter->req_tx_entries_per_subcrq; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } -- cgit v1.2.3-55-g7522 From c2dbeb671f44c526b7ba8808d4a8a617a36c7516 Mon Sep 17 00:00:00 2001 From: John Allen Date: Wed, 2 Aug 2017 16:47:17 -0500 Subject: ibmvnic: Implement .get_channels Implement .get_channels (ethtool -l) functionality Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c40456a399dd..5932160eb815 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1741,6 +1741,21 @@ static void ibmvnic_get_ringparam(struct net_device *netdev, ring->rx_jumbo_pending = 0; } +static void ibmvnic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct ibmvnic_adapter *adapter = netdev_priv(netdev); + + channels->max_rx = adapter->max_rx_queues; + channels->max_tx = adapter->max_tx_queues; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = adapter->req_rx_queues; + channels->tx_count = adapter->req_tx_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct ibmvnic_adapter *adapter = netdev_priv(dev); @@ -1837,6 +1852,7 @@ static const struct ethtool_ops ibmvnic_ethtool_ops = { .set_msglevel = ibmvnic_set_msglevel, .get_link = ibmvnic_get_link, .get_ringparam = ibmvnic_get_ringparam, + .get_channels = ibmvnic_get_channels, .get_strings = ibmvnic_get_strings, .get_sset_count = ibmvnic_get_sset_count, .get_ethtool_stats = ibmvnic_get_ethtool_stats, -- cgit v1.2.3-55-g7522 From e61e4055b165f4c645ce2a85890b313abf841f67 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:09 +0800 Subject: sctp: remove the typedef sctp_shutdownhdr_t This patch is to remove the typedef sctp_shutdownhdr_t, and replace with struct sctp_shutdownhdr in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/sm_make_chunk.c | 4 ++-- net/sctp/sm_statefuns.c | 10 +++++----- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index ba007163acfd..7a586ba7dcd4 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -416,13 +416,13 @@ struct sctp_abort_chunk { /* For the graceful shutdown we must carry the tag (in common header) * and the highest consecutive acking value. */ -typedef struct sctp_shutdownhdr { +struct sctp_shutdownhdr { __be32 cum_tsn_ack; -} sctp_shutdownhdr_t; +}; struct sctp_shutdown_chunk_t { struct sctp_chunkhdr chunk_hdr; - sctp_shutdownhdr_t shutdown_hdr; + struct sctp_shutdownhdr shutdown_hdr; }; /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 163004e7047c..8f1c6b639ac1 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -857,15 +857,15 @@ nodata: struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { + struct sctp_shutdownhdr shut; struct sctp_chunk *retval; - sctp_shutdownhdr_t shut; __u32 ctsn; ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); shut.cum_tsn_ack = htonl(ctsn); retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, - sizeof(sctp_shutdownhdr_t), GFP_ATOMIC); + sizeof(shut), GFP_ATOMIC); if (!retval) goto nodata; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index dc0c2c4188d8..5b95e2d8c227 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2656,8 +2656,8 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - sctp_shutdownhdr_t *sdh; sctp_disposition_t disposition; + struct sctp_shutdownhdr *sdh; struct sctp_ulpevent *ev; __u32 ctsn; @@ -2671,8 +2671,8 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, commands); /* Convert the elaborate header. */ - sdh = (sctp_shutdownhdr_t *)chunk->skb->data; - skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); + sdh = (struct sctp_shutdownhdr *)chunk->skb->data; + skb_pull(chunk->skb, sizeof(*sdh)); chunk->subh.shutdown_hdr = sdh; ctsn = ntohl(sdh->cum_tsn_ack); @@ -2746,7 +2746,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - sctp_shutdownhdr_t *sdh; + struct sctp_shutdownhdr *sdh; __u32 ctsn; if (!sctp_vtag_verify(chunk, asoc)) @@ -2758,7 +2758,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); - sdh = (sctp_shutdownhdr_t *)chunk->skb->data; + sdh = (struct sctp_shutdownhdr *)chunk->skb->data; ctsn = ntohl(sdh->cum_tsn_ack); if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { -- cgit v1.2.3-55-g7522 From ac23e68133f4570f40ec0910286ced08ced2d378 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:10 +0800 Subject: sctp: fix the name of struct sctp_shutdown_chunk_t This patch is to fix the name of struct sctp_shutdown_chunk_t , replace with struct sctp_initack_chunk in the places where it's using it. It is also to fix some indent problem. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 2 +- net/sctp/sm_statefuns.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 7a586ba7dcd4..553020cbf6f7 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -420,7 +420,7 @@ struct sctp_shutdownhdr { __be32 cum_tsn_ack; }; -struct sctp_shutdown_chunk_t { +struct sctp_shutdown_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_shutdownhdr shutdown_hdr; }; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5b95e2d8c227..d4d8fab52a11 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2665,8 +2665,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SHUTDOWN chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, - sizeof(struct sctp_shutdown_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); @@ -2753,8 +2752,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the SHUTDOWN chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, - sizeof(struct sctp_shutdown_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); @@ -5419,12 +5417,14 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( */ if (chunk) { if (!sctp_vtag_verify(chunk, asoc)) - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, + commands); /* Make sure that the SHUTDOWN chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t))) - return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, - commands); + if (!sctp_chunk_length_valid( + chunk, sizeof(struct sctp_shutdown_chunk))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, + arg, commands); } /* If it has no more outstanding DATA chunks, the SHUTDOWN receiver -- cgit v1.2.3-55-g7522 From d8238d9dab8fbea22dd04f4e77639c7f7b83eef7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:11 +0800 Subject: sctp: remove the typedef sctp_errhdr_t This patch is to remove the typedef sctp_errhdr_t, and replace with struct sctp_errhdr in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- include/net/sctp/sctp.h | 8 ++++---- net/sctp/sm_make_chunk.c | 38 ++++++++++++++++++++------------------ net/sctp/sm_sideeffect.c | 2 +- net/sctp/sm_statefuns.c | 29 +++++++++++++++-------------- net/sctp/ulpevent.c | 10 +++++----- 6 files changed, 48 insertions(+), 45 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 553020cbf6f7..d35bdd30fa0f 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -427,15 +427,15 @@ struct sctp_shutdown_chunk { /* RFC 2960. Section 3.3.10 Operation Error (ERROR) (9) */ -typedef struct sctp_errhdr { +struct sctp_errhdr { __be16 cause; __be16 length; __u8 variable[0]; -} sctp_errhdr_t; +}; typedef struct sctp_operr_chunk { struct sctp_chunkhdr chunk_hdr; - sctp_errhdr_t err_hdr; + struct sctp_errhdr err_hdr; } sctp_operr_chunk_t; /* RFC 2960 3.3.10 - Operation Error diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 45fd4c6056b5..84650fed1e6a 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -479,13 +479,13 @@ for (pos.v = chunk->member;\ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) #define _sctp_walk_errors(err, chunk_hdr, end)\ -for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ +for (err = (struct sctp_errhdr *)((void *)chunk_hdr + \ sizeof(struct sctp_chunkhdr));\ - ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\ + ((void *)err + offsetof(struct sctp_errhdr, length) + sizeof(err->length) <=\ (void *)chunk_hdr + end) &&\ (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ - ntohs(err->length) >= sizeof(sctp_errhdr_t); \ - err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length)))) + ntohs(err->length) >= sizeof(struct sctp_errhdr); \ + err = (struct sctp_errhdr *)((void *)err + SCTP_PAD4(ntohs(err->length)))) #define sctp_walk_fwdtsn(pos, chunk)\ _sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk)) diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 8f1c6b639ac1..0b2298bbb84e 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -135,14 +135,14 @@ static const struct sctp_paramhdr prsctp_param = { void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { - sctp_errhdr_t err; + struct sctp_errhdr err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; - len = sizeof(sctp_errhdr_t) + paylen; + len = sizeof(err) + paylen; err.length = htons(len); - chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); + chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(err), &err); } /* A helper to initialize an op error inside a @@ -153,19 +153,19 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, size_t paylen) { - sctp_errhdr_t err; + struct sctp_errhdr err; __u16 len; /* Cause code constants are now defined in network order. */ err.cause = cause_code; - len = sizeof(sctp_errhdr_t) + paylen; + len = sizeof(err) + paylen; err.length = htons(len); if (skb_tailroom(chunk->skb) < len) return -ENOSPC; - chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, - sizeof(sctp_errhdr_t), - &err); + + chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, sizeof(err), &err); + return 0; } /* 3.3.2 Initiation (INIT) (1) @@ -979,8 +979,8 @@ struct sctp_chunk *sctp_make_abort_no_data( struct sctp_chunk *retval; __be32 payload; - retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) - + sizeof(tsn)); + retval = sctp_make_abort(asoc, chunk, + sizeof(struct sctp_errhdr) + sizeof(tsn)); if (!retval) goto no_mem; @@ -1015,7 +1015,8 @@ struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, void *payload = NULL; int err; - retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); + retval = sctp_make_abort(asoc, NULL, + sizeof(struct sctp_errhdr) + paylen); if (!retval) goto err_chunk; @@ -1080,8 +1081,8 @@ struct sctp_chunk *sctp_make_abort_violation( struct sctp_chunk *retval; struct sctp_paramhdr phdr; - retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen + - sizeof(phdr)); + retval = sctp_make_abort(asoc, chunk, sizeof(struct sctp_errhdr) + + paylen + sizeof(phdr)); if (!retval) goto end; @@ -1104,7 +1105,7 @@ struct sctp_chunk *sctp_make_violation_paramlen( { struct sctp_chunk *retval; static const char error[] = "The following parameter had invalid length:"; - size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + + size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr) + sizeof(*param); retval = sctp_make_abort(asoc, chunk, payload_len); @@ -1126,7 +1127,7 @@ struct sctp_chunk *sctp_make_violation_max_retrans( { struct sctp_chunk *retval; static const char error[] = "Association exceeded its max_retans count"; - size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); + size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr); retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) @@ -1209,7 +1210,8 @@ static struct sctp_chunk *sctp_make_op_error_space( struct sctp_chunk *retval; retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, - sizeof(sctp_errhdr_t) + size, GFP_ATOMIC); + sizeof(struct sctp_errhdr) + size, + GFP_ATOMIC); if (!retval) goto nodata; @@ -2966,7 +2968,7 @@ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, __be16 err_code, sctp_addip_param_t *asconf_param) { sctp_addip_param_t ack_param; - sctp_errhdr_t err_param; + struct sctp_errhdr err_param; int asconf_param_len = 0; int err_param_len = 0; __be16 response_type; @@ -3351,7 +3353,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, int no_err) { sctp_addip_param_t *asconf_ack_param; - sctp_errhdr_t *err_param; + struct sctp_errhdr *err_param; int length; int asconf_ack_len; __be16 err_code; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index d6e5e9e0fd6d..5dda8c42b5f6 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -828,7 +828,7 @@ static void sctp_cmd_assoc_update(sctp_cmd_seq_t *cmds, if (!sctp_assoc_update(asoc, new)) return; - abort = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t)); + abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr)); if (abort) { sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d4d8fab52a11..7a2ba4c187d0 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1233,7 +1233,7 @@ static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa, union sctp_addr_param *addrparm; struct sctp_errhdr *errhdr; struct sctp_endpoint *ep; - char buffer[sizeof(struct sctp_errhdr)+sizeof(union sctp_addr_param)]; + char buffer[sizeof(*errhdr) + sizeof(*addrparm)]; struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family); /* Build the error on the stack. We are way to malloc crazy @@ -1244,7 +1244,7 @@ static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa, /* Copy into a parm format. */ len = af->to_addr_param(ssa, addrparm); - len += sizeof(sctp_errhdr_t); + len += sizeof(*errhdr); errhdr->cause = SCTP_ERROR_RESTART; errhdr->length = htons(len); @@ -2270,7 +2270,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - sctp_errhdr_t *err; + struct sctp_errhdr *err; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); @@ -2337,7 +2337,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, struct sctp_chunk *chunk = arg, *reply; struct sctp_cookie_preserve_param bht; struct sctp_bind_addr *bp; - sctp_errhdr_t *err; + struct sctp_errhdr *err; u32 stale; if (attempts > asoc->max_init_attempts) { @@ -2348,7 +2348,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, return SCTP_DISPOSITION_DELETE_TCB; } - err = (sctp_errhdr_t *)(chunk->skb->data); + err = (struct sctp_errhdr *)(chunk->skb->data); /* When calculating the time extension, an implementation * SHOULD use the RTT information measured based on the @@ -2364,7 +2364,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, * to give ample time to retransmit the new cookie and thus * yield a higher probability of success on the reattempt. */ - stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t))); + stale = ntohl(*(__be32 *)((u8 *)err + sizeof(*err))); stale = (stale * 2) / 1000; bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE; @@ -2499,13 +2499,14 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, /* See if we have an error cause code in the chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) { + struct sctp_errhdr *err; - sctp_errhdr_t *err; sctp_walk_errors(err, chunk->chunk_hdr); if ((void *)err != (void *)chunk->chunk_end) - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, + commands); - error = ((sctp_errhdr_t *)chunk->skb->data)->cause; + error = ((struct sctp_errhdr *)chunk->skb->data)->cause; } sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); @@ -2552,7 +2553,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, /* See if we have an error cause code in the chunk. */ len = ntohs(chunk->chunk_hdr->length); if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) - error = ((sctp_errhdr_t *)chunk->skb->data)->cause; + error = ((struct sctp_errhdr *)chunk->skb->data)->cause; return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc, chunk->transport); @@ -3310,7 +3311,7 @@ sctp_disposition_t sctp_sf_operr_notify(struct net *net, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; - sctp_errhdr_t *err; + struct sctp_errhdr *err; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); @@ -3433,7 +3434,7 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, struct sctp_chunk *chunk = arg; struct sk_buff *skb = chunk->skb; struct sctp_chunkhdr *ch; - sctp_errhdr_t *err; + struct sctp_errhdr *err; __u8 *ch_end; int ootb_shut_ack = 0; int ootb_cookie_ack = 0; @@ -3776,7 +3777,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) && !(asoc->addip_last_asconf)) { abort = sctp_make_abort(asoc, asconf_ack, - sizeof(sctp_errhdr_t)); + sizeof(struct sctp_errhdr)); if (abort) { sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, @@ -3812,7 +3813,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, } abort = sctp_make_abort(asoc, asconf_ack, - sizeof(sctp_errhdr_t)); + sizeof(struct sctp_errhdr)); if (abort) { sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 5f86c5062a98..67abc0194f30 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -371,19 +371,19 @@ sctp_ulpevent_make_remote_error(const struct sctp_association *asoc, struct sctp_chunk *chunk, __u16 flags, gfp_t gfp) { - struct sctp_ulpevent *event; struct sctp_remote_error *sre; + struct sctp_ulpevent *event; + struct sctp_errhdr *ch; struct sk_buff *skb; - sctp_errhdr_t *ch; __be16 cause; int elen; - ch = (sctp_errhdr_t *)(chunk->skb->data); + ch = (struct sctp_errhdr *)(chunk->skb->data); cause = ch->cause; - elen = SCTP_PAD4(ntohs(ch->length)) - sizeof(sctp_errhdr_t); + elen = SCTP_PAD4(ntohs(ch->length)) - sizeof(*ch); /* Pull off the ERROR header. */ - skb_pull(chunk->skb, sizeof(sctp_errhdr_t)); + skb_pull(chunk->skb, sizeof(*ch)); /* Copy the skb to a new skb with room for us to prepend * notification with. -- cgit v1.2.3-55-g7522 From 87caeba7914979ef6a5765dfa3163b51a30133ab Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:12 +0800 Subject: sctp: remove the typedef sctp_operr_chunk_t This patch is to remove the typedef sctp_operr_chunk_t, and replace with struct sctp_operr_chunk in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_statefuns.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index d35bdd30fa0f..07c12e98fa85 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -433,10 +433,10 @@ struct sctp_errhdr { __u8 variable[0]; }; -typedef struct sctp_operr_chunk { +struct sctp_operr_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_errhdr err_hdr; -} sctp_operr_chunk_t; +}; /* RFC 2960 3.3.10 - Operation Error * diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 7a2ba4c187d0..3b121d2ca309 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2278,7 +2278,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, /* Make sure that the ERROR chunk has a valid length. * The parameter walking depends on this as well. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); @@ -3317,7 +3317,7 @@ sctp_disposition_t sctp_sf_operr_notify(struct net *net, return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the ERROR chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_operr_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); sctp_walk_errors(err, chunk->chunk_hdr); -- cgit v1.2.3-55-g7522 From 2a4932167772874c5bc4b3dfebf61cfadb5554b9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:13 +0800 Subject: sctp: remove the typedef sctp_error_t This patch is to remove the typedef sctp_error_t, and replace with enum sctp_error in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_statefuns.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 07c12e98fa85..c74ea93e36eb 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -457,7 +457,7 @@ struct sctp_operr_chunk { * 9 No User Data * 10 Cookie Received While Shutting Down */ -typedef enum { +enum sctp_error { SCTP_ERROR_NO_ERROR = cpu_to_be16(0x00), SCTP_ERROR_INV_STRM = cpu_to_be16(0x01), @@ -512,7 +512,7 @@ typedef enum { * 0x0105 Unsupported HMAC Identifier */ SCTP_ERROR_UNSUP_HMAC = cpu_to_be16(0x0105) -} sctp_error_t; +}; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 3b121d2ca309..1d1249962993 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -528,7 +528,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, (struct sctp_init_chunk *)chunk->chunk_hdr, chunk, &err_chunk)) { - sctp_error_t error = SCTP_ERROR_NO_RESOURCE; + enum sctp_error error = SCTP_ERROR_NO_RESOURCE; /* This chunk contains fatal error. It is to be discarded. * Send an ABORT, with causes. If there are no causes, -- cgit v1.2.3-55-g7522 From 1fb6d83bd37dc7d0949b8a8005f7c24dddc3ee1e Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:14 +0800 Subject: sctp: remove the typedef sctp_ecnehdr_t This patch is to remove the typedef sctp_ecnehdr_t, and replace with struct sctp_ecnehdr in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/sm_make_chunk.c | 4 ++-- net/sctp/sm_statefuns.c | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index c74ea93e36eb..5ea739b99f8d 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -519,13 +519,13 @@ enum sctp_error { /* RFC 2960. Appendix A. Explicit Congestion Notification. * Explicit Congestion Notification Echo (ECNE) (12) */ -typedef struct sctp_ecnehdr { +struct sctp_ecnehdr { __be32 lowest_tsn; -} sctp_ecnehdr_t; +}; typedef struct sctp_ecne_chunk { struct sctp_chunkhdr chunk_hdr; - sctp_ecnehdr_t ence_hdr; + struct sctp_ecnehdr ence_hdr; } sctp_ecne_chunk_t; /* RFC 2960. Appendix A. Explicit Congestion Notification. diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 0b2298bbb84e..1c7cc6a48bde 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -697,11 +697,11 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, const __u32 lowest_tsn) { struct sctp_chunk *retval; - sctp_ecnehdr_t ecne; + struct sctp_ecnehdr ecne; ecne.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, - sizeof(sctp_ecnehdr_t), GFP_ATOMIC); + sizeof(ecne), GFP_ATOMIC); if (!retval) goto nodata; retval->subh.ecne_hdr = diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 1d1249962993..de3e5bf88484 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2918,8 +2918,8 @@ sctp_disposition_t sctp_sf_do_ecne(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - sctp_ecnehdr_t *ecne; struct sctp_chunk *chunk = arg; + struct sctp_ecnehdr *ecne; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); @@ -2928,8 +2928,8 @@ sctp_disposition_t sctp_sf_do_ecne(struct net *net, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); - ecne = (sctp_ecnehdr_t *) chunk->skb->data; - skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t)); + ecne = (struct sctp_ecnehdr *)chunk->skb->data; + skb_pull(chunk->skb, sizeof(*ecne)); /* If this is a newer ECNE than the last CWR packet we sent out */ sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE, -- cgit v1.2.3-55-g7522 From b515fd27591dea06bc9f0178fba361b43b76ab6b Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:15 +0800 Subject: sctp: remove the typedef sctp_ecne_chunk_t This patch is to remove the typedef sctp_ecne_chunk_t, and replace with struct sctp_ecne_chunk in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/sm_statefuns.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 5ea739b99f8d..026bbdfcaf44 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -523,10 +523,10 @@ struct sctp_ecnehdr { __be32 lowest_tsn; }; -typedef struct sctp_ecne_chunk { +struct sctp_ecne_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_ecnehdr ence_hdr; -} sctp_ecne_chunk_t; +}; /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index de3e5bf88484..286dce14c5cc 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2869,7 +2869,7 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); @@ -2924,7 +2924,7 @@ sctp_disposition_t sctp_sf_do_ecne(struct net *net, if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_ecne_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); -- cgit v1.2.3-55-g7522 From 65f77105438a7793836d7ba2d9a05fa585b8caf9 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:16 +0800 Subject: sctp: remove the typedef sctp_cwrhdr_t This patch is to remove the typedef sctp_cwrhdr_t, and replace with struct sctp_cwrhdr in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/sm_make_chunk.c | 4 ++-- net/sctp/sm_statefuns.c | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 026bbdfcaf44..3c8c418425e6 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -531,13 +531,13 @@ struct sctp_ecne_chunk { /* RFC 2960. Appendix A. Explicit Congestion Notification. * Congestion Window Reduced (CWR) (13) */ -typedef struct sctp_cwrhdr { +struct sctp_cwrhdr { __be32 lowest_tsn; -} sctp_cwrhdr_t; +}; typedef struct sctp_cwr_chunk { struct sctp_chunkhdr chunk_hdr; - sctp_cwrhdr_t cwr_hdr; + struct sctp_cwrhdr cwr_hdr; } sctp_cwr_chunk_t; /* PR-SCTP diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 1c7cc6a48bde..e8e506522193 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -663,11 +663,11 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const struct sctp_chunk *chunk) { struct sctp_chunk *retval; - sctp_cwrhdr_t cwr; + struct sctp_cwrhdr cwr; cwr.lowest_tsn = htonl(lowest_tsn); retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, - sizeof(sctp_cwrhdr_t), GFP_ATOMIC); + sizeof(cwr), GFP_ATOMIC); if (!retval) goto nodata; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 286dce14c5cc..e13c83f9a6ee 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2862,8 +2862,8 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - sctp_cwrhdr_t *cwr; struct sctp_chunk *chunk = arg; + struct sctp_cwrhdr *cwr; u32 lowest_tsn; if (!sctp_vtag_verify(chunk, asoc)) @@ -2873,8 +2873,8 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); - cwr = (sctp_cwrhdr_t *) chunk->skb->data; - skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t)); + cwr = (struct sctp_cwrhdr *)chunk->skb->data; + skb_pull(chunk->skb, sizeof(*cwr)); lowest_tsn = ntohl(cwr->lowest_tsn); -- cgit v1.2.3-55-g7522 From 05b25d0ba68d5b6d5d06f7c6cc3fec49ffa2df38 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:17 +0800 Subject: sctp: remove the typedef sctp_cwr_chunk_t Remove this typedef including the struct, there is even no places using it. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 3c8c418425e6..2a97639eb035 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -535,11 +535,6 @@ struct sctp_cwrhdr { __be32 lowest_tsn; }; -typedef struct sctp_cwr_chunk { - struct sctp_chunkhdr chunk_hdr; - struct sctp_cwrhdr cwr_hdr; -} sctp_cwr_chunk_t; - /* PR-SCTP * 3.2 Forward Cumulative TSN Chunk Definition (FORWARD TSN) * -- cgit v1.2.3-55-g7522 From 8b32f2348a0441ef202562618b13d1bb494f3a47 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:18 +0800 Subject: sctp: remove the typedef sctp_addip_param_t This patch is to remove the typedef sctp_addip_param_t, and replace with struct sctp_addip_param in the places where it's using this typedef. It is to use sizeof(variable) instead of sizeof(type), and also fix some indent problems. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 8 ++++---- net/sctp/sm_make_chunk.c | 39 ++++++++++++++++++++------------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 2a97639eb035..5b7d6d9d3fc5 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -629,10 +629,10 @@ struct sctp_fwdtsn_chunk { * The ASCONF Parameter Response is used in the ASCONF-ACK to * report status of ASCONF processing. */ -typedef struct sctp_addip_param { - struct sctp_paramhdr param_hdr; - __be32 crr_id; -} sctp_addip_param_t; +struct sctp_addip_param { + struct sctp_paramhdr param_hdr; + __be32 crr_id; +}; typedef struct sctp_addiphdr { __be32 serial; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e8e506522193..bab3354ecdc3 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2615,7 +2615,7 @@ do_addr_param: if (!net->sctp.addip_enable) goto fall_through; - addr_param = param.v + sizeof(sctp_addip_param_t); + addr_param = param.v + sizeof(struct sctp_addip_param); af = sctp_get_af_specific(param_type2af(addr_param->p.type)); if (af == NULL) @@ -2810,7 +2810,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, int addrcnt, __be16 flags) { - sctp_addip_param_t param; + struct sctp_addip_param param; struct sctp_chunk *retval; union sctp_addr_param addr_param; union sctp_addr *addr; @@ -2896,7 +2896,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) { - sctp_addip_param_t param; + struct sctp_addip_param param; struct sctp_chunk *retval; int len = sizeof(param); union sctp_addr_param addrparam; @@ -2965,9 +2965,10 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as /* Add response parameters to an ASCONF_ACK chunk. */ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, - __be16 err_code, sctp_addip_param_t *asconf_param) + __be16 err_code, + struct sctp_addip_param *asconf_param) { - sctp_addip_param_t ack_param; + struct sctp_addip_param ack_param; struct sctp_errhdr err_param; int asconf_param_len = 0; int err_param_len = 0; @@ -3006,15 +3007,15 @@ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, /* Process a asconf parameter. */ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, - struct sctp_chunk *asconf, - sctp_addip_param_t *asconf_param) + struct sctp_chunk *asconf, + struct sctp_addip_param *asconf_param) { struct sctp_transport *peer; struct sctp_af *af; union sctp_addr addr; union sctp_addr_param *addr_param; - addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); + addr_param = (void *)asconf_param + sizeof(*asconf_param); if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && @@ -3174,13 +3175,13 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, if (addr_param_needed && !addr_param_seen) return false; length = ntohs(param.addip->param_hdr.length); - if (length < sizeof(sctp_addip_param_t) + + if (length < sizeof(struct sctp_addip_param) + sizeof(**errp)) return false; break; case SCTP_PARAM_SUCCESS_REPORT: case SCTP_PARAM_ADAPTATION_LAYER_IND: - if (length != sizeof(sctp_addip_param_t)) + if (length != sizeof(struct sctp_addip_param)) return false; break; default: @@ -3289,7 +3290,7 @@ done: /* Process a asconf parameter that is successfully acked. */ static void sctp_asconf_param_success(struct sctp_association *asoc, - sctp_addip_param_t *asconf_param) + struct sctp_addip_param *asconf_param) { struct sctp_af *af; union sctp_addr addr; @@ -3298,7 +3299,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc, struct sctp_transport *transport; struct sctp_sockaddr_entry *saddr; - addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); + addr_param = (void *)asconf_param + sizeof(*asconf_param); /* We have checked the packet before, so we do not check again. */ af = sctp_get_af_specific(param_type2af(addr_param->p.type)); @@ -3349,10 +3350,10 @@ static void sctp_asconf_param_success(struct sctp_association *asoc, * specific success indication is present for the parameter. */ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, - sctp_addip_param_t *asconf_param, - int no_err) + struct sctp_addip_param *asconf_param, + int no_err) { - sctp_addip_param_t *asconf_ack_param; + struct sctp_addip_param *asconf_ack_param; struct sctp_errhdr *err_param; int length; int asconf_ack_len; @@ -3370,8 +3371,8 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, * the first asconf_ack parameter. */ length = sizeof(sctp_addiphdr_t); - asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + - length); + asconf_ack_param = (struct sctp_addip_param *)(asconf_ack->skb->data + + length); asconf_ack_len -= length; while (asconf_ack_len > 0) { @@ -3380,7 +3381,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, case SCTP_PARAM_SUCCESS_REPORT: return SCTP_ERROR_NO_ERROR; case SCTP_PARAM_ERR_CAUSE: - length = sizeof(sctp_addip_param_t); + length = sizeof(*asconf_ack_param); err_param = (void *)asconf_ack_param + length; asconf_ack_len -= length; if (asconf_ack_len > 0) @@ -3407,7 +3408,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, { struct sctp_chunk *asconf = asoc->addip_last_asconf; union sctp_addr_param *addr_param; - sctp_addip_param_t *asconf_param; + struct sctp_addip_param *asconf_param; int length = 0; int asconf_len = asconf->skb->len; int all_param_pass = 0; -- cgit v1.2.3-55-g7522 From 65205cc465e9b37abbdbb3d595c46081b97e35bc Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:19 +0800 Subject: sctp: remove the typedef sctp_addiphdr_t This patch is to remove the typedef sctp_addiphdr_t, and replace with struct sctp_addiphdr in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/sm_make_chunk.c | 14 +++++++------- net/sctp/sm_statefuns.c | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 5b7d6d9d3fc5..83dac9b0b4bd 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -634,14 +634,14 @@ struct sctp_addip_param { __be32 crr_id; }; -typedef struct sctp_addiphdr { +struct sctp_addiphdr { __be32 serial; __u8 params[0]; -} sctp_addiphdr_t; +}; typedef struct sctp_addip_chunk { struct sctp_chunkhdr chunk_hdr; - sctp_addiphdr_t addip_hdr; + struct sctp_addiphdr addip_hdr; } sctp_addip_chunk_t; /* AUTH diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index bab3354ecdc3..0e71e5054ead 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2752,7 +2752,7 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, union sctp_addr *addr, int vparam_len) { - sctp_addiphdr_t asconf; + struct sctp_addiphdr asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; union sctp_addr_param addrparam; @@ -2945,7 +2945,7 @@ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) { - sctp_addiphdr_t asconf; + struct sctp_addiphdr asconf; struct sctp_chunk *retval; int length = sizeof(asconf) + vparam_len; @@ -3210,7 +3210,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; bool all_param_pass = true; union sctp_params param; - sctp_addiphdr_t *hdr; + struct sctp_addiphdr *hdr; union sctp_addr_param *addr_param; struct sctp_chunk *asconf_ack; __be16 err_code; @@ -3220,11 +3220,11 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(struct sctp_chunkhdr); - hdr = (sctp_addiphdr_t *)asconf->skb->data; + hdr = (struct sctp_addiphdr *)asconf->skb->data; serial = ntohl(hdr->serial); /* Skip the addiphdr and store a pointer to address parameter. */ - length = sizeof(sctp_addiphdr_t); + length = sizeof(*hdr); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); chunk_len -= length; @@ -3370,7 +3370,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, /* Skip the addiphdr from the asconf_ack chunk and store a pointer to * the first asconf_ack parameter. */ - length = sizeof(sctp_addiphdr_t); + length = sizeof(struct sctp_addiphdr); asconf_ack_param = (struct sctp_addip_param *)(asconf_ack->skb->data + length); asconf_ack_len -= length; @@ -3435,7 +3435,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, * failures are indicated, then all request(s) are considered * successful. */ - if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) + if (asconf_ack->skb->len == sizeof(struct sctp_addiphdr)) all_param_pass = 1; /* Process the TLVs contained in the last sent ASCONF chunk. */ diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index e13c83f9a6ee..d722f380b36e 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3613,7 +3613,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; struct sctp_paramhdr *err_param = NULL; - sctp_addiphdr_t *hdr; + struct sctp_addiphdr *hdr; __u32 serial; if (!sctp_vtag_verify(chunk, asoc)) { @@ -3636,7 +3636,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); - hdr = (sctp_addiphdr_t *)chunk->skb->data; + hdr = (struct sctp_addiphdr *)chunk->skb->data; serial = ntohl(hdr->serial); /* Verify the ASCONF chunk before processing it. */ @@ -3730,7 +3730,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, struct sctp_chunk *last_asconf = asoc->addip_last_asconf; struct sctp_chunk *abort; struct sctp_paramhdr *err_param = NULL; - sctp_addiphdr_t *addip_hdr; + struct sctp_addiphdr *addip_hdr; __u32 sent_serial, rcvd_serial; if (!sctp_vtag_verify(asconf_ack, asoc)) { @@ -3753,7 +3753,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); - addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; + addip_hdr = (struct sctp_addiphdr *)asconf_ack->skb->data; rcvd_serial = ntohl(addip_hdr->serial); /* Verify the ASCONF-ACK chunk before processing it. */ @@ -3762,7 +3762,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, (void *)err_param, commands); if (last_asconf) { - addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; + addip_hdr = (struct sctp_addiphdr *)last_asconf->subh.addip_hdr; sent_serial = ntohl(addip_hdr->serial); } else { sent_serial = asoc->addip_serial - 1; -- cgit v1.2.3-55-g7522 From 68d75469468620c37cd58dc352f1dcec8c3896b6 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:20 +0800 Subject: sctp: remove the typedef sctp_addip_chunk_t This patch is to remove the typedef sctp_addip_chunk_t, and replace with struct sctp_addip_chunk in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/input.c | 2 +- net/sctp/sm_make_chunk.c | 8 +++++--- net/sctp/sm_statefuns.c | 11 +++++++---- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 83dac9b0b4bd..e7b439ed6371 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -639,10 +639,10 @@ struct sctp_addiphdr { __u8 params[0]; }; -typedef struct sctp_addip_chunk { +struct sctp_addip_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_addiphdr addip_hdr; -} sctp_addip_chunk_t; +}; /* AUTH * Section 4.1 Authentication Chunk (AUTH) diff --git a/net/sctp/input.c b/net/sctp/input.c index 41eb2ec10460..92a07141fd07 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -1111,7 +1111,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup( __be16 peer_port, struct sctp_transport **transportp) { - sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch; + struct sctp_addip_chunk *asconf = (struct sctp_addip_chunk *)ch; struct sctp_af *af; union sctp_addr_param *param; union sctp_addr paddr; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 0e71e5054ead..ae54c6ecd26a 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -3140,10 +3140,11 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, bool addr_param_needed, struct sctp_paramhdr **errp) { - sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; + struct sctp_addip_chunk *addip; union sctp_params param; bool addr_param_seen = false; + addip = (struct sctp_addip_chunk *)chunk->chunk_hdr; sctp_walk_params(param, addip, addip_hdr.params) { size_t length = ntohs(param.p->length); @@ -3207,7 +3208,7 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf) { - sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; + struct sctp_addip_chunk *addip; bool all_param_pass = true; union sctp_params param; struct sctp_addiphdr *hdr; @@ -3218,6 +3219,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, int chunk_len; __u32 serial; + addip = (struct sctp_addip_chunk *)asconf->chunk_hdr; chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(struct sctp_chunkhdr); hdr = (struct sctp_addiphdr *)asconf->skb->data; @@ -3419,7 +3421,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc, /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. */ - length = sizeof(sctp_addip_chunk_t); + length = sizeof(struct sctp_addip_chunk); addr_param = (union sctp_addr_param *)(asconf->skb->data + length); asconf_len -= length; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index d722f380b36e..9c235bb7aafe 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -3629,10 +3629,11 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!net->sctp.addip_noauth && !chunk->auth) - return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, + commands); /* Make sure that the ASCONF ADDIP chunk has a valid length. */ - if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_addip_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); @@ -3746,10 +3747,12 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, * described in [I-D.ietf-tsvwg-sctp-auth]. */ if (!net->sctp.addip_noauth && !asconf_ack->auth) - return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, + commands); /* Make sure that the ADDIP chunk has a valid length. */ - if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t))) + if (!sctp_chunk_length_valid(asconf_ack, + sizeof(struct sctp_addip_chunk))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); -- cgit v1.2.3-55-g7522 From 96f7ef4d58100d7168e0c4d01a59fbd8589f5756 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:21 +0800 Subject: sctp: remove the typedef sctp_authhdr_t This patch is to remove the typedef sctp_authhdr_t, and replace with struct sctp_authhdr in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 6 +++--- net/sctp/sm_make_chunk.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index e7b439ed6371..b7603f5efebe 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -693,15 +693,15 @@ struct sctp_addip_chunk { * HMAC: n bytes (unsigned integer) This hold the result of the HMAC * calculation. */ -typedef struct sctp_authhdr { +struct sctp_authhdr { __be16 shkey_id; __be16 hmac_id; __u8 hmac[0]; -} sctp_authhdr_t; +}; typedef struct sctp_auth_chunk { struct sctp_chunkhdr chunk_hdr; - sctp_authhdr_t auth_hdr; + struct sctp_authhdr auth_hdr; } sctp_auth_chunk_t; struct sctp_infox { diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ae54c6ecd26a..d17e8d1f2ed9 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1282,16 +1282,16 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) return NULL; retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, - hmac_desc->hmac_len + sizeof(sctp_authhdr_t), - GFP_ATOMIC); + hmac_desc->hmac_len + sizeof(auth_hdr), + GFP_ATOMIC); if (!retval) return NULL; auth_hdr.hmac_id = htons(hmac_desc->hmac_id); auth_hdr.shkey_id = htons(asoc->active_key_id); - retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), - &auth_hdr); + retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(auth_hdr), + &auth_hdr); hmac = skb_put_zero(retval->skb, hmac_desc->hmac_len); -- cgit v1.2.3-55-g7522 From bb96dec74543bb3ceb4ac5caf39341dadb4cb559 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Thu, 3 Aug 2017 15:42:22 +0800 Subject: sctp: remove the typedef sctp_auth_chunk_t This patch is to remove the typedef sctp_auth_chunk_t, and replace with struct sctp_auth_chunk in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/linux/sctp.h | 4 ++-- net/sctp/chunk.c | 2 +- net/sctp/sm_statefuns.c | 9 +++++---- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/include/linux/sctp.h b/include/linux/sctp.h index b7603f5efebe..82b171e1aa0b 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h @@ -699,10 +699,10 @@ struct sctp_authhdr { __u8 hmac[0]; }; -typedef struct sctp_auth_chunk { +struct sctp_auth_chunk { struct sctp_chunkhdr chunk_hdr; struct sctp_authhdr auth_hdr; -} sctp_auth_chunk_t; +}; struct sctp_infox { struct sctp_info *sctpinfo; diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 681b181e7ae3..3afac275ee82 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -201,7 +201,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); if (hmac_desc) - max_data -= SCTP_PAD4(sizeof(sctp_auth_chunk_t) + + max_data -= SCTP_PAD4(sizeof(struct sctp_auth_chunk) + hmac_desc->hmac_len); } diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 9c235bb7aafe..8af90a5f23cd 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -4093,7 +4093,7 @@ static sctp_ierror_t sctp_sf_authenticate(struct net *net, /* Pull in the auth header, so we can do some more verification */ auth_hdr = (struct sctp_authhdr *)chunk->skb->data; chunk->subh.auth_hdr = auth_hdr; - skb_pull(chunk->skb, sizeof(struct sctp_authhdr)); + skb_pull(chunk->skb, sizeof(*auth_hdr)); /* Make sure that we support the HMAC algorithm from the auth * chunk. @@ -4112,7 +4112,8 @@ static sctp_ierror_t sctp_sf_authenticate(struct net *net, /* Make sure that the length of the signature matches what * we expect. */ - sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t); + sig_len = ntohs(chunk->chunk_hdr->length) - + sizeof(struct sctp_auth_chunk); hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id)); if (sig_len != hmac->hmac_len) return SCTP_IERROR_PROTO_VIOLATION; @@ -4134,8 +4135,8 @@ static sctp_ierror_t sctp_sf_authenticate(struct net *net, memset(digest, 0, sig_len); sctp_auth_calculate_hmac(asoc, chunk->skb, - (struct sctp_auth_chunk *)chunk->chunk_hdr, - GFP_ATOMIC); + (struct sctp_auth_chunk *)chunk->chunk_hdr, + GFP_ATOMIC); /* Discard the packet if the digests do not match */ if (memcmp(save_digest, digest, sig_len)) { -- cgit v1.2.3-55-g7522 From 76ad4f0ee74758341e44a8871df6ca60906d2795 Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:45 +0100 Subject: net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC This patch adds the support of Hisilicon Network Subsystem 3 Ethernet driver to hip08 family of SoCs. This driver includes basic Rx/Tx functionality. It also includes the client registration code with the HNAE3(Hisilicon Network Acceleration Engine 3) framework. This work provides the initial support to the hip08 SoC and would incrementally add features or enhancements. Signed-off-by: Daode Huang Signed-off-by: lipeng Signed-off-by: Salil Mehta Signed-off-by: Yisen Zhuang Signed-off-by: Wei Hu (Xavier) Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2848 ++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 592 ++++ 2 files changed, 3440 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c new file mode 100644 index 000000000000..ad9481c7ceae --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -0,0 +1,2848 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hnae3.h" +#include "hns3_enet.h" + +const char hns3_driver_name[] = "hns3"; +const char hns3_driver_version[] = VERMAGIC_STRING; +static const char hns3_driver_string[] = + "Hisilicon Ethernet Network Driver for Hip08 Family"; +static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; +static struct hnae3_client client; + +/* hns3_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id hns3_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + /* required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); + +static irqreturn_t hns3_irq_handle(int irq, void *dev) +{ + struct hns3_enet_tqp_vector *tqp_vector = dev; + + napi_schedule(&tqp_vector->napi); + + return IRQ_HANDLED; +} + +static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + unsigned int i; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) + continue; + + /* release the irq resource */ + free_irq(tqp_vectors->vector_irq, tqp_vectors); + tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; + } +} + +static int hns3_nic_init_irq(struct hns3_nic_priv *priv) +{ + struct hns3_enet_tqp_vector *tqp_vectors; + int txrx_int_idx = 0; + int rx_int_idx = 0; + int tx_int_idx = 0; + unsigned int i; + int ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vectors = &priv->tqp_vector[i]; + + if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) + continue; + + if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "TxRx", + txrx_int_idx++); + txrx_int_idx++; + } else if (tqp_vectors->rx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Rx", + rx_int_idx++); + } else if (tqp_vectors->tx_group.ring) { + snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1, + "%s-%s-%d", priv->netdev->name, "Tx", + tx_int_idx++); + } else { + /* Skip this unused q_vector */ + continue; + } + + tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; + + ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, + tqp_vectors->name, + tqp_vectors); + if (ret) { + netdev_err(priv->netdev, "request irq(%d) fail\n", + tqp_vectors->vector_irq); + return ret; + } + + tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; + } + + return 0; +} + +static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, + u32 mask_en) +{ + writel(mask_en, tqp_vector->mask_addr); +} + +static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +{ + napi_enable(&tqp_vector->napi); + + /* enable vector */ + hns3_mask_vector_irq(tqp_vector, 1); +} + +static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* disable vector */ + hns3_mask_vector_irq(tqp_vector, 0); + + disable_irq(tqp_vector->vector_irq); + napi_disable(&tqp_vector->napi); +} + +static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector, + u32 gl_value) +{ + /* this defines the configuration for GL (Interrupt Gap Limiter) + * GL defines inter interrupt gap. + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); + writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET); +} + +static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector, + u32 rl_value) +{ + /* this defines the configuration for RL (Interrupt Rate Limiter). + * Rl defines rate of interrupts i.e. number of interrupts-per-second + * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing + */ + writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); +} + +static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector) +{ + /* initialize the configuration for interrupt coalescing. + * 1. GL (Interrupt Gap Limiter) + * 2. RL (Interrupt Rate Limiter) + */ + + /* Default :enable interrupt coalesce */ + tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K; + tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K; + hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K); + /* for now we are disabling Interrupt RL - we + * will re-enable later + */ + hns3_set_vector_coalesc_rl(tqp_vector, 0); + tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW; + tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW; +} + +static int hns3_nic_net_up(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int i, j; + int ret; + + /* get irq resource for all vectors */ + ret = hns3_nic_init_irq(priv); + if (ret) { + netdev_err(netdev, "hns init irq failed! ret=%d\n", ret); + return ret; + } + + /* enable the vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_enable(&priv->tqp_vector[i]); + + /* start the ae_dev */ + ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; + if (ret) + goto out_start_err; + + return 0; + +out_start_err: + for (j = i - 1; j >= 0; j--) + hns3_vector_disable(&priv->tqp_vector[j]); + + hns3_nic_uninit_irq(priv); + + return ret; +} + +static int hns3_nic_net_open(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret; + + netif_carrier_off(netdev); + + ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_tx_queues fail, ret=%d!\n", + ret); + return ret; + } + + ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps); + if (ret) { + netdev_err(netdev, + "netif_set_real_num_rx_queues fail, ret=%d!\n", ret); + return ret; + } + + ret = hns3_nic_net_up(netdev); + if (ret) { + netdev_err(netdev, + "hns net up fail, ret=%d!\n", ret); + return ret; + } + + return 0; +} + +static void hns3_nic_net_down(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + const struct hnae3_ae_ops *ops; + int i; + + /* stop ae_dev */ + ops = priv->ae_handle->ae_algo->ops; + if (ops->stop) + ops->stop(priv->ae_handle); + + /* disable vectors */ + for (i = 0; i < priv->vector_num; i++) + hns3_vector_disable(&priv->tqp_vector[i]); + + /* free irq resources */ + hns3_nic_uninit_irq(priv); +} + +static int hns3_nic_net_stop(struct net_device *netdev) +{ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + + hns3_nic_net_down(netdev); + + return 0; +} + +void hns3_set_multicast_list(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct netdev_hw_addr *ha = NULL; + + if (h->ae_algo->ops->set_mc_addr) { + netdev_for_each_mc_addr(ha, netdev) + if (h->ae_algo->ops->set_mc_addr(h, ha->addr)) + netdev_err(netdev, "set multicast fail\n"); + } +} + +static int hns3_nic_uc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_uc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_uc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_sync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->add_uc_addr) + return h->ae_algo->ops->add_mc_addr(h, addr); + + return 0; +} + +static int hns3_nic_mc_unsync(struct net_device *netdev, + const unsigned char *addr) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->rm_uc_addr) + return h->ae_algo->ops->rm_mc_addr(h, addr); + + return 0; +} + +void hns3_nic_set_rx_mode(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo->ops->set_promisc_mode) { + if (netdev->flags & IFF_PROMISC) + h->ae_algo->ops->set_promisc_mode(h, 1); + else + h->ae_algo->ops->set_promisc_mode(h, 0); + } + if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync)) + netdev_err(netdev, "sync uc address fail\n"); + if (netdev->flags & IFF_MULTICAST) + if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync)) + netdev_err(netdev, "sync mc address fail\n"); +} + +static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, + u16 *mss, u32 *type_cs_vlan_tso) +{ + u32 l4_offset, hdr_len; + union l3_hdr_info l3; + union l4_hdr_info l4; + u32 l4_paylen; + int ret; + + if (!skb_is_gso(skb)) + return 0; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* Software should clear the IPv4's checksum field when tso is + * needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + + /* tunnel packet.*/ + if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | + SKB_GSO_GRE_CSUM | + SKB_GSO_UDP_TUNNEL | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if ((!(skb_shinfo(skb)->gso_type & + SKB_GSO_PARTIAL)) && + (skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM)) { + /* Software should clear the udp's checksum + * field when tso is needed. + */ + l4.udp->check = 0; + } + /* reset l3&l4 pointers from outer to inner headers */ + l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* Software should clear the IPv4's checksum field when + * tso is needed. + */ + if (l3.v4->version == 4) + l3.v4->check = 0; + } + + /* normal or tunnel packet*/ + l4_offset = l4.hdr - skb->data; + hdr_len = (l4.tcp->doff * 4) + l4_offset; + + /* remove payload length from inner pseudo checksum when tso*/ + l4_paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, + (__force __wsum)htonl(l4_paylen)); + + /* find the txbd field values */ + *paylen = skb->len - hdr_len; + hnae_set_bit(*type_cs_vlan_tso, + HNS3_TXD_TSO_B, 1); + + /* get MSS for TSO */ + *mss = skb_shinfo(skb)->gso_size; + + return 0; +} + +static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + u8 *il4_proto) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + unsigned char *l4_hdr; + unsigned char *exthdr; + u8 l4_proto_tmp; + __be16 frag_off; + + /* find outer header point */ + l3.hdr = skb_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (skb->protocol == htons(ETH_P_IPV6)) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (skb->protocol == htons(ETH_P_IP)) { + l4_proto_tmp = l3.v4->protocol; + } + + *ol4_proto = l4_proto_tmp; + + /* tunnel packet */ + if (!skb->encapsulation) { + *il4_proto = 0; + return; + } + + /* find inner header point */ + l3.hdr = skb_inner_network_header(skb); + l4_hdr = skb_inner_transport_header(skb); + + if (l3.v6->version == 6) { + exthdr = l3.hdr + sizeof(*l3.v6); + l4_proto_tmp = l3.v6->nexthdr; + if (l4_hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto_tmp, &frag_off); + } else if (l3.v4->version == 4) { + l4_proto_tmp = l3.v4->protocol; + } + + *il4_proto = l4_proto_tmp; +} + +static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + union { + struct tcphdr *tcp; + struct udphdr *udp; + struct gre_base_hdr *gre; + unsigned char *hdr; + } l4; + unsigned char *l2_hdr; + u8 l4_proto = ol4_proto; + u32 ol2_len; + u32 ol3_len; + u32 ol4_len; + u32 l2_len; + u32 l3_len; + + l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); + + /* compute L2 header size for normal packet, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* tunnel packet*/ + if (skb->encapsulation) { + /* compute OL2 header size, defined in 2 Bytes */ + ol2_len = l2_len; + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, ol2_len >> 1); + + /* compute OL3 header size, defined in 4 Bytes */ + ol3_len = l4.hdr - l3.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, ol3_len >> 2); + + /* MAC in UDP, MAC in GRE (0x6558)*/ + if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { + /* switch MAC header ptr from outer to inner header.*/ + l2_hdr = skb_inner_mac_header(skb); + + /* compute OL4 header size, defined in 4 Bytes. */ + ol4_len = l2_hdr - l4.hdr; + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, ol4_len >> 2); + + /* switch IP header ptr from outer to inner header */ + l3.hdr = skb_inner_network_header(skb); + + /* compute inner l2 header size, defined in 2 Bytes. */ + l2_len = l3.hdr - l2_hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M, + HNS3_TXD_L2LEN_S, l2_len >> 1); + } else { + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } + + /* switch L4 header pointer from outer to inner */ + l4.hdr = skb_inner_transport_header(skb); + + l4_proto = il4_proto; + } + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M, + HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, l4.tcp->doff); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2)); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M, + HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2)); + break; + default: + /* skb packet types not supported by hardware, + * txbd len fild doesn't be filled. + */ + return; + } +} + +static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } l3; + u32 l4_proto = ol4_proto; + + l3.hdr = skb_network_header(skb); + + /* define OL3 type and tunnel type(OL4).*/ + if (skb->encapsulation) { + /* define outer network header type.*/ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M, + HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6); + } + + /* define tunnel type(OL4).*/ + switch (l4_proto) { + case IPPROTO_UDP: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + break; + case IPPROTO_GRE: + hnae_set_field(*ol_type_vlan_len_msec, + HNS3_TXD_TUNTYPE_M, + HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + l3.hdr = skb_inner_network_header(skb); + l4_proto = il4_proto; + } + + if (l3.v4->version == 4) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV4); + + /* the stack computes the IP header already, the only time we + * need the hardware to recompute it is in the case of TSO. + */ + if (skb_is_gso(skb)) + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1); + + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } else if (l3.v6->version == 6) { + hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M, + HNS3_TXD_L3T_S, HNS3_L3T_IPV6); + hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); + } + + switch (l4_proto) { + case IPPROTO_TCP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_TCP); + break; + case IPPROTO_UDP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_UDP); + break; + case IPPROTO_SCTP: + hnae_set_field(*type_cs_vlan_tso, + HNS3_TXD_L4T_M, + HNS3_TXD_L4T_S, + HNS3_L4T_SCTP); + break; + default: + /* drop the skb tunnel packet if hardware don't support, + * because hardware can't calculate csum when TSO. + */ + if (skb_is_gso(skb)) + return -EDOM; + + /* the stack computes the IP header already, + * driver calculate l4 checksum when not TSO. + */ + skb_checksum_help(skb); + return 0; + } + + return 0; +} + +static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end) +{ + /* Config bd buffer end */ + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M, + HNS3_TXD_BDTYPE_M, 0); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end); + hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1); + hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 1); +} + +static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; + struct hns3_desc *desc = &ring->desc[ring->next_to_use]; + u32 ol_type_vlan_len_msec = 0; + u16 bdtp_fe_sc_vld_ra_ri = 0; + u32 type_cs_vlan_tso = 0; + struct sk_buff *skb; + u32 paylen = 0; + u16 mss = 0; + __be16 protocol; + u8 ol4_proto; + u8 il4_proto; + int ret; + + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ + desc_cb->priv = priv; + desc_cb->length = size; + desc_cb->dma = dma; + desc_cb->type = type; + + /* now, fill the descriptor */ + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16((u16)size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + if (type == DESC_TYPE_SKB) { + skb = (struct sk_buff *)priv; + paylen = cpu_to_le16(skb->len); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + skb_reset_mac_len(skb); + protocol = skb->protocol; + + /* vlan packet*/ + if (protocol == htons(ETH_P_8021Q)) { + protocol = vlan_get_protocol(skb); + skb->protocol = protocol; + } + hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); + if (ret) + return ret; + + ret = hns3_set_tso(skb, &paylen, &mss, + &type_cs_vlan_tso); + if (ret) + return ret; + } + + /* Set txbd */ + desc->tx.ol_type_vlan_len_msec = + cpu_to_le32(ol_type_vlan_len_msec); + desc->tx.type_cs_vlan_tso_len = + cpu_to_le32(type_cs_vlan_tso); + desc->tx.paylen = cpu_to_le16(paylen); + desc->tx.mss = cpu_to_le16(mss); + } + + /* move ring pointer to next.*/ + ring_ptr_move_fw(ring, next_to_use); + + return 0; +} + +static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type) +{ + unsigned int frag_buf_num; + unsigned int k; + int sizeoflast; + int ret; + + frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + sizeoflast = size % HNS3_MAX_BD_SIZE; + sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; + + /* When the frag size is bigger than hardware, split this frag */ + for (k = 0; k < frag_buf_num; k++) { + ret = hns3_fill_desc(ring, priv, + (k == frag_buf_num - 1) ? + sizeoflast : HNS3_MAX_BD_SIZE, + dma + HNS3_MAX_BD_SIZE * k, + frag_end && (k == frag_buf_num - 1) ? 1 : 0, + (type == DESC_TYPE_SKB && !k) ? + DESC_TYPE_SKB : DESC_TYPE_PAGE); + if (ret) + return ret; + } + + return 0; +} + +static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + struct skb_frag_struct *frag; + int bdnum_for_frag; + int frag_num; + int buf_num; + int size; + int i; + + size = skb_headlen(skb); + buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + + frag_num = skb_shinfo(skb)->nr_frags; + for (i = 0; i < frag_num; i++) { + frag = &skb_shinfo(skb)->frags[i]; + size = skb_frag_size(frag); + bdnum_for_frag = + (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; + if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG) + return -ENOMEM; + + buf_num += bdnum_for_frag; + } + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + return 0; +} + +static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, + struct hns3_enet_ring *ring) +{ + struct sk_buff *skb = *out_skb; + int buf_num; + + /* No. of segments (plus a header) */ + buf_num = skb_shinfo(skb)->nr_frags + 1; + + if (buf_num > ring_space(ring)) + return -EBUSY; + + *bnum = buf_num; + + return 0; +} + +static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) +{ + struct device *dev = ring_to_dev(ring); + unsigned int i; + + for (i = 0; i < ring->desc_num; i++) { + /* check if this is where we started */ + if (ring->next_to_use == next_to_use_orig) + break; + + /* unmap the descriptor dma address */ + if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) + dma_unmap_single(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + else + dma_unmap_page(dev, + ring->desc_cb[ring->next_to_use].dma, + ring->desc_cb[ring->next_to_use].length, + DMA_TO_DEVICE); + + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + } +} + +static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_nic_ring_data *ring_data = + &tx_ring_data(priv, skb->queue_mapping); + struct hns3_enet_ring *ring = ring_data->ring; + struct device *dev = priv->dev; + struct netdev_queue *dev_queue; + struct skb_frag_struct *frag; + int next_to_use_head; + int next_to_use_frag; + dma_addr_t dma; + int buf_num; + int seg_num; + int size; + int ret; + int i; + + /* Prefetch the data used later */ + prefetch(skb->data); + + switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { + case -EBUSY: + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + + goto out_net_tx_busy; + case -ENOMEM: + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + netdev_err(netdev, "no memory to xmit!\n"); + + goto out_err_tx_ok; + default: + break; + } + + /* No. of segments (plus a header) */ + seg_num = skb_shinfo(skb)->nr_frags + 1; + /* Fill the first part */ + size = skb_headlen(skb); + + next_to_use_head = ring->next_to_use; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX head DMA map failed\n"); + ring->stats.sw_err_cnt++; + goto out_err_tx_ok; + } + + ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0, + DESC_TYPE_SKB); + if (ret) + goto head_dma_map_err; + + next_to_use_frag = ring->next_to_use; + /* Fill the fragments */ + for (i = 1; i < seg_num; i++) { + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma)) { + netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); + ring->stats.sw_err_cnt++; + goto frag_dma_map_err; + } + ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma, + seg_num - 1 == i ? 1 : 0, + DESC_TYPE_PAGE); + + if (ret) + goto frag_dma_map_err; + } + + /* Complete translate all packets */ + dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); + netdev_tx_sent_queue(dev_queue, skb->len); + + wmb(); /* Commit all data before submit */ + + hnae_queue_xmit(ring->tqp, buf_num); + + return NETDEV_TX_OK; + +frag_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_frag); + +head_dma_map_err: + hns_nic_dma_unmap(ring, next_to_use_head); + +out_err_tx_ok: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + +out_net_tx_busy: + netif_stop_subqueue(netdev, ring_data->queue_index); + smp_mb(); /* Commit all data before submit */ + + return NETDEV_TX_BUSY; +} + +static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct sockaddr *mac_addr = p; + int ret; + + if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) + return -EADDRNOTAVAIL; + + ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data); + if (ret) { + netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret); + return ret; + } + + ether_addr_copy(netdev->dev_addr, mac_addr->sa_data); + + return 0; +} + +static int hns3_nic_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } + + netdev->features = features; + return 0; +} + +static void +hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct hns3_enet_ring *ring; + unsigned int start; + unsigned int idx; + u64 tx_bytes = 0; + u64 rx_bytes = 0; + u64 tx_pkts = 0; + u64 rx_pkts = 0; + + for (idx = 0; idx < queue_num; idx++) { + /* fetch the tx stats */ + ring = priv->ring_data[idx].ring; + do { + tx_bytes += ring->stats.tx_bytes; + tx_pkts += ring->stats.tx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + + /* fetch the rx stats */ + ring = priv->ring_data[idx + queue_num].ring; + do { + rx_bytes += ring->stats.rx_bytes; + rx_pkts += ring->stats.rx_pkts; + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); + } + + stats->tx_bytes = tx_bytes; + stats->tx_packets = tx_pkts; + stats->rx_bytes = rx_bytes; + stats->rx_packets = rx_pkts; + + stats->rx_errors = netdev->stats.rx_errors; + stats->multicast = netdev->stats.multicast; + stats->rx_length_errors = netdev->stats.rx_length_errors; + stats->rx_crc_errors = netdev->stats.rx_crc_errors; + stats->rx_missed_errors = netdev->stats.rx_missed_errors; + + stats->tx_errors = netdev->stats.tx_errors; + stats->rx_dropped = netdev->stats.rx_dropped; + stats->tx_dropped = netdev->stats.tx_dropped; + stats->collisions = netdev->stats.collisions; + stats->rx_over_errors = netdev->stats.rx_over_errors; + stats->rx_frame_errors = netdev->stats.rx_frame_errors; + stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; + stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; + stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; + stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; + stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; + stats->tx_window_errors = netdev->stats.tx_window_errors; + stats->rx_compressed = netdev->stats.rx_compressed; + stats->tx_compressed = netdev->stats.tx_compressed; +} + +static void hns3_add_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (udp_tnl->used && udp_tnl->dst_port == port) { + udp_tnl->used++; + return; + } + + if (udp_tnl->used) { + netdev_warn(netdev, + "UDP tunnel [%d], port [%d] offload\n", type, port); + return; + } + + udp_tnl->dst_port = port; + udp_tnl->used = 1; + /* TBD send command to hardware to add port */ + if (h->ae_algo->ops->add_tunnel_udp) + h->ae_algo->ops->add_tunnel_udp(h, port); +} + +static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, + enum hns3_udp_tnl_type type) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type]; + struct hnae3_handle *h = priv->ae_handle; + + if (!udp_tnl->used || udp_tnl->dst_port != port) { + netdev_warn(netdev, + "Invalid UDP tunnel port %d\n", port); + return; + } + + udp_tnl->used--; + if (udp_tnl->used) + return; + + udp_tnl->dst_port = 0; + /* TBD send command to hardware to del port */ + if (h->ae_algo->ops->del_tunnel_udp) + h->ae_algo->ops->add_tunnel_udp(h, port); +} + +/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports + * @netdev: This physical ports's netdev + * @ti: Tunnel information + */ +static void hns3_nic_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + netdev_err(netdev, "unsupported tunnel type %d\n", ti->type); + break; + } +} + +static void hns3_nic_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + u16 port_n = ntohs(ti->port); + + switch (ti->type) { + case UDP_TUNNEL_TYPE_VXLAN: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN); + break; + case UDP_TUNNEL_TYPE_GENEVE: + hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE); + break; + default: + break; + } +} + +static int hns3_setup_tc(struct net_device *netdev, u8 tc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + struct hnae3_knic_private_info *kinfo = &h->kinfo; + unsigned int i; + int ret; + + if (tc > HNAE3_MAX_TC) + return -EINVAL; + + if (kinfo->num_tc == tc) + return 0; + + if (!netdev) + return -EINVAL; + + if (!tc) { + netdev_reset_tc(netdev); + return 0; + } + + /* Set num_tc for netdev */ + ret = netdev_set_num_tc(netdev, tc); + if (ret) + return ret; + + /* Set per TC queues for the VSI */ + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (kinfo->tc_info[i].enable) + netdev_set_tc_queue(netdev, + kinfo->tc_info[i].tc, + kinfo->tc_info[i].tqp_count, + kinfo->tc_info[i].tqp_offset); + } + + return 0; +} + +static int hns3_nic_setup_tc(struct net_device *dev, u32 handle, + u32 chain_index, __be16 protocol, + struct tc_to_netdev *tc) +{ + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return hns3_setup_tc(dev, tc->mqprio->num_tc); +} + +static int hns3_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); + + return ret; +} + +static int hns3_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vlan_filter) + ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); + + return ret; +} + +static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, + u8 qos, __be16 vlan_proto) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + int ret = -EIO; + + if (h->ae_algo->ops->set_vf_vlan_filter) + ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, + qos, vlan_proto); + + return ret; +} + +static const struct net_device_ops hns3_nic_netdev_ops = { + .ndo_open = hns3_nic_net_open, + .ndo_stop = hns3_nic_net_stop, + .ndo_start_xmit = hns3_nic_net_xmit, + .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_set_features = hns3_nic_set_features, + .ndo_get_stats64 = hns3_nic_get_stats64, + .ndo_setup_tc = hns3_nic_setup_tc, + .ndo_set_rx_mode = hns3_nic_set_rx_mode, + .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add, + .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del, + .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid, + .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, +}; + +/* hns3_probe - Device initialization routine + * @pdev: PCI device information struct + * @ent: entry in hns3_pci_tbl + * + * hns3_probe initializes a PF identified by a pci_dev structure. + * The OS initialization, configuring of the PF private structure, + * and a hardware reset occur. + * + * Returns 0 on success, negative on failure + */ +static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct hnae3_ae_dev *ae_dev; + int ret; + + ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), + GFP_KERNEL); + if (!ae_dev) { + ret = -ENOMEM; + return ret; + } + + ae_dev->pdev = pdev; + ae_dev->dev_type = HNAE3_DEV_KNIC; + pci_set_drvdata(pdev, ae_dev); + + return hnae3_register_ae_dev(ae_dev); +} + +/* hns3_remove - Device removal routine + * @pdev: PCI device information struct + */ +static void hns3_remove(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + hnae3_unregister_ae_dev(ae_dev); + + devm_kfree(&pdev->dev, ae_dev); + + pci_set_drvdata(pdev, NULL); +} + +static struct pci_driver hns3_driver = { + .name = hns3_driver_name, + .id_table = hns3_pci_tbl, + .probe = hns3_probe, + .remove = hns3_remove, +}; + +/* set default feature to hns3 */ +static void hns3_set_default_feature(struct net_device *netdev) +{ + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; + + netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; + + netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->vlan_features |= + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; +} + +static int hns3_alloc_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + unsigned int order = hnae_page_order(ring); + struct page *p; + + p = dev_alloc_pages(order); + if (!p) + return -ENOMEM; + + cb->priv = p; + cb->page_offset = 0; + cb->reuse_flag = 0; + cb->buf = page_address(p); + cb->length = hnae_page_size(ring); + cb->type = DESC_TYPE_PAGE; + + memset(cb->buf, 0, cb->length); + + return 0; +} + +static void hns3_free_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dev_kfree_skb_any((struct sk_buff *)cb->priv); + else if (!HNAE3_IS_TX_RING(ring)) + put_page((struct page *)cb->priv); + memset(cb, 0, sizeof(*cb)); +} + +static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) +{ + cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, + cb->length, ring_to_dma_dir(ring)); + + if (dma_mapping_error(ring_to_dev(ring), cb->dma)) + return -EIO; + + return 0; +} + +static void hns3_unmap_buffer(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + if (cb->type == DESC_TYPE_SKB) + dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); + else + dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, + ring_to_dma_dir(ring)); +} + +static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc[i].addr = 0; +} + +static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i) +{ + struct hns3_desc_cb *cb = &ring->desc_cb[i]; + + if (!ring->desc_cb[i].dma) + return; + + hns3_buffer_detach(ring, i); + hns3_free_buffer(ring, cb); +} + +static void hns3_free_buffers(struct hns3_enet_ring *ring) +{ + int i; + + for (i = 0; i < ring->desc_num; i++) + hns3_free_buffer_detach(ring, i); +} + +/* free desc along with its attached buffer */ +static void hns3_free_desc(struct hns3_enet_ring *ring) +{ + hns3_free_buffers(ring); + + dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hns3_alloc_desc(struct hns3_enet_ring *ring) +{ + int size = ring->desc_num * sizeof(ring->desc[0]); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring, + struct hns3_desc_cb *cb) +{ + int ret; + + ret = hns3_alloc_buffer(ring, cb); + if (ret) + goto out; + + ret = hns3_map_buffer(ring, cb); + if (ret) + goto out_with_buf; + + return 0; + +out_with_buf: + hns3_free_buffers(ring); +out: + return ret; +} + +static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i) +{ + int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]); + + if (ret) + return ret; + + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + + return 0; +} + +/* Allocate memory for raw pkg, and map with dma */ +static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) +{ + int i, j, ret; + + for (i = 0; i < ring->desc_num; i++) { + ret = hns3_alloc_buffer_attach(ring, i); + if (ret) + goto out_buffer_fail; + } + + return 0; + +out_buffer_fail: + for (j = i - 1; j >= 0; j--) + hns3_free_buffer_detach(ring, j); + return ret; +} + +/* detach a in-used buffer and replace with a reserved one */ +static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, + struct hns3_desc_cb *res_cb) +{ + hns3_map_buffer(ring, &ring->desc_cb[i]); + ring->desc_cb[i] = *res_cb; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); +} + +static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) +{ + ring->desc_cb[i].reuse_flag = 0; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); +} + +static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, + int *pkts) +{ + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); + (*bytes) += desc_cb->length; + /* desc_cb will be cleaned, after hnae_free_buffer_detach*/ + hns3_free_buffer_detach(ring, ring->next_to_clean); + + ring_ptr_move_fw(ring, next_to_clean); +} + +static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) +{ + int u = ring->next_to_use; + int c = ring->next_to_clean; + + if (unlikely(h > ring->desc_num)) + return 0; + + return u > c ? (h > c && h <= u) : (h > c || h <= u); +} + +int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct netdev_queue *dev_queue; + int bytes, pkts; + int head; + + head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); + rmb(); /* Make sure head is ready before touch any data */ + + if (is_ring_empty(ring) || head == ring->next_to_clean) + return 0; /* no data to poll */ + + if (!is_valid_clean_head(ring, head)) { + netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, + ring->next_to_use, ring->next_to_clean); + + u64_stats_update_begin(&ring->syncp); + ring->stats.io_err_cnt++; + u64_stats_update_end(&ring->syncp); + return -EIO; + } + + bytes = 0; + pkts = 0; + while (head != ring->next_to_clean && budget) { + hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ring->next_to_clean]); + budget--; + } + + ring->tqp_vector->tx_group.total_bytes += bytes; + ring->tqp_vector->tx_group.total_packets += pkts; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_bytes += bytes; + ring->stats.tx_pkts += pkts; + u64_stats_update_end(&ring->syncp); + + dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); + netdev_tx_completed_queue(dev_queue, pkts, bytes); + + if (unlikely(pkts && netif_carrier_ok(netdev) && + (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_tx_queue_stopped(dev_queue)) { + netif_tx_wake_queue(dev_queue); + ring->stats.restart_queue++; + } + } + + return !!budget; +} + +static int hns3_desc_unused(struct hns3_enet_ring *ring) +{ + int ntc = ring->next_to_clean; + int ntu = ring->next_to_use; + + return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; +} + +static void +hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) +{ + struct hns3_desc_cb *desc_cb; + struct hns3_desc_cb res_cbs; + int i, ret; + + for (i = 0; i < cleand_count; i++) { + desc_cb = &ring->desc_cb[ring->next_to_use]; + if (desc_cb->reuse_flag) { + u64_stats_update_begin(&ring->syncp); + ring->stats.reuse_pg_cnt++; + u64_stats_update_end(&ring->syncp); + + hns3_reuse_buffer(ring, ring->next_to_use); + } else { + ret = hns3_reserve_buffer_map(ring, &res_cbs); + if (ret) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + netdev_err(ring->tqp->handle->kinfo.netdev, + "hnae reserve buffer map failed.\n"); + break; + } + hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + } + + ring_ptr_move_fw(ring, next_to_use); + } + + wmb(); /* Make all data has been write before submit */ + writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); +} + +/* hns3_nic_get_headlen - determine size of header for LRO/GRO + * @data: pointer to the start of the headers + * @max: total length of section to find headers in + * + * This function is meant to determine the length of headers that will + * be recognized by hardware for LRO, GRO, and RSC offloads. The main + * motivation of doing this is to only perform one pull for IPv4 TCP + * packets so that we can do basic things like calculating the gso_size + * based on the average data per packet. + */ +static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag, + unsigned int max_size) +{ + unsigned char *network; + u8 hlen; + + /* This should never happen, but better safe than sorry */ + if (max_size < ETH_HLEN) + return max_size; + + /* Initialize network frame pointer */ + network = data; + + /* Set first protocol and move network header forward */ + network += ETH_HLEN; + + /* Handle any vlan tag if present */ + if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S) + == HNS3_RX_FLAG_VLAN_PRESENT) { + if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN)) + return max_size; + + network += VLAN_HLEN; + } + + /* Handle L3 protocols */ + if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV4) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct iphdr))) + return max_size; + + /* Access ihl as a u8 to avoid unaligned access on ia64 */ + hlen = (network[0] & 0x0F) << 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct iphdr)) + return network - data; + + /* Record next protocol if header is present */ + } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S) + == HNS3_RX_FLAG_L3ID_IPV6) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct ipv6hdr))) + return max_size; + + /* Record next protocol */ + hlen = sizeof(struct ipv6hdr); + } else { + return network - data; + } + + /* Relocate pointer to start of L4 header */ + network += hlen; + + /* Finally sort out TCP/UDP */ + if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_TCP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct tcphdr))) + return max_size; + + /* Access doff as a u8 to avoid unaligned access on ia64 */ + hlen = (network[12] & 0xF0) >> 2; + + /* Verify hlen meets minimum size requirements */ + if (hlen < sizeof(struct tcphdr)) + return network - data; + + network += hlen; + } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S) + == HNS3_RX_FLAG_L4ID_UDP) { + if ((typeof(max_size))(network - data) > + (max_size - sizeof(struct udphdr))) + return max_size; + + network += sizeof(struct udphdr); + } + + /* If everything has gone correctly network should be the + * data section of the packet and will be the end of the header. + * If not then it probably represents the end of the last recognized + * header. + */ + if ((typeof(max_size))(network - data) < max_size) + return network - data; + else + return max_size; +} + +static void hns3_nic_reuse_page(struct sk_buff *skb, int i, + struct hns3_enet_ring *ring, int pull_len, + struct hns3_desc_cb *desc_cb) +{ + struct hns3_desc *desc; + int truesize, size; + int last_offset; + bool twobufs; + + twobufs = ((PAGE_SIZE < 8192) && + hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048); + + desc = &ring->desc[ring->next_to_clean]; + size = le16_to_cpu(desc->rx.size); + + if (twobufs) { + truesize = hnae_buf_size(ring); + } else { + truesize = ALIGN(size, L1_CACHE_BYTES); + last_offset = hnae_page_size(ring) - hnae_buf_size(ring); + } + + skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, + size - pull_len, truesize - pull_len); + + /* Avoid re-using remote pages,flag default unreuse */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) + return; + + if (twobufs) { + /* If we are only owner of page we can reuse it */ + if (likely(page_count(desc_cb->priv) == 1)) { + /* Flip page offset to other buffer */ + desc_cb->page_offset ^= truesize; + + desc_cb->reuse_flag = 1; + /* bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } + return; + } + + /* Move offset up to the next cache line */ + desc_cb->page_offset += truesize; + + if (desc_cb->page_offset <= last_offset) { + desc_cb->reuse_flag = 1; + /* Bump ref count on page before it is given*/ + get_page(desc_cb->priv); + } +} + +static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, + struct hns3_desc *desc) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int l3_type, l4_type; + u32 bd_base_info; + int ol4_type; + u32 l234info; + + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + skb->ip_summed = CHECKSUM_NONE; + + skb_checksum_none_assert(skb); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + /* check if hardware has done checksum */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) + return; + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) || + hnae_get_bit(l234info, HNS3_RXD_L4E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL3E_B) || + hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) { + netdev_err(netdev, "L3/L4 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l3l4_csum_err++; + u64_stats_update_end(&ring->syncp); + + return; + } + + l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); + switch (ol4_type) { + case HNS3_OL4_TYPE_MAC_IN_UDP: + case HNS3_OL4_TYPE_NVGRE: + skb->csum_level = 1; + case HNS3_OL4_TYPE_NO_TUN: + /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */ + if (l3_type == HNS3_L3_TYPE_IPV4 || + (l3_type == HNS3_L3_TYPE_IPV6 && + (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + break; + } +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb, int *out_bnum) +{ + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *desc; + struct sk_buff *skb; + unsigned char *va; + u32 bd_base_info; + int pull_len; + u32 l234info; + int length; + int bnum; + + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + + prefetch(desc); + + length = le16_to_cpu(desc->rx.pkt_len); + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + + /* Check valid BD */ + if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + return -EFAULT; + + va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + + /* Prefetch first cache line of first page + * Idea is to cache few bytes of the header of the packet. Our L1 Cache + * line size is 64B so need to prefetch twice to make it 128B. But in + * actual we can have greater size of caches with 128B Level 1 cache + * lines. In such a case, single fetch would suffice to cache in the + * relevant part of the header. + */ + prefetch(va); +#if L1_CACHE_BYTES < 128 + prefetch(va + L1_CACHE_BYTES); +#endif + + skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, + HNS3_RX_HEAD_SIZE); + if (unlikely(!skb)) { + netdev_err(netdev, "alloc rx skb fail\n"); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + return -ENOMEM; + } + + prefetchw(skb->data); + + bnum = 1; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + put_page(desc_cb->priv); + + ring_ptr_move_fw(ring, next_to_clean); + } else { + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + pull_len = hns3_nic_get_headlen(va, l234info, + HNS3_RX_HEAD_SIZE); + memcpy(__skb_put(skb, pull_len), va, + ALIGN(pull_len, sizeof(long))); + + hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + + while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + bnum++; + } + } + + *out_bnum = bnum; + + if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) { + netdev_err(netdev, "no valid bd,%016llx,%016llx\n", + ((u64 *)desc)[0], ((u64 *)desc)[1]); + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EINVAL; + } + + if (unlikely((!desc->rx.pkt_len) || + hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) { + netdev_err(netdev, "truncated pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) { + netdev_err(netdev, "L2 error pkt\n"); + u64_stats_update_begin(&ring->syncp); + ring->stats.l2_err++; + u64_stats_update_end(&ring->syncp); + + dev_kfree_skb_any(skb); + return -EFAULT; + } + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += skb->len; + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += skb->len; + + hns3_rx_checksum(ring, skb, desc); + return 0; +} + +static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget) +{ +#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + int recv_pkts, recv_bds, clean_count, err; + int unused_count = hns3_desc_unused(ring); + struct sk_buff *skb = NULL; + int num, bnum = 0; + + num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); + rmb(); /* Make sure num taken effect before the other data is touched */ + + recv_pkts = 0, recv_bds = 0, clean_count = 0; + num -= unused_count; + + while (recv_pkts < budget && recv_bds < num) { + /* Reuse or realloc buffers */ + if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + clean_count = 0; + unused_count = hns3_desc_unused(ring); + } + + /* Poll one pkt */ + err = hns3_handle_rx_bd(ring, &skb, &bnum); + if (unlikely(!skb)) /* This fault cannot be repaired */ + goto out; + + recv_bds += bnum; + clean_count += bnum; + if (unlikely(err)) { /* Do jump the err */ + recv_pkts++; + continue; + } + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + (void)napi_gro_receive(&ring->tqp_vector->napi, skb); + + recv_pkts++; + } + +out: + /* Make all data has been write before submit */ + if (clean_count + unused_count > 0) + hns3_nic_alloc_rx_buffers(ring, + clean_count + unused_count); + + return recv_pkts; +} + +static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group) +{ +#define HNS3_RX_ULTRA_PACKET_RATE 40000 + enum hns3_flow_level_range new_flow_level; + struct hns3_enet_tqp_vector *tqp_vector; + int packets_per_secs; + int bytes_per_usecs; + u16 new_int_gl; + int usecs; + + if (!ring_group->int_gl) + return false; + + if (ring_group->total_packets == 0) { + ring_group->int_gl = HNS3_INT_GL_50K; + ring_group->flow_level = HNS3_FLOW_LOW; + return true; + } + + /* Simple throttlerate management + * 0-10MB/s lower (50000 ints/s) + * 10-20MB/s middle (20000 ints/s) + * 20-1249MB/s high (18000 ints/s) + * > 40000pps ultra (8000 ints/s) + */ + new_flow_level = ring_group->flow_level; + new_int_gl = ring_group->int_gl; + tqp_vector = ring_group->ring->tqp_vector; + usecs = (ring_group->int_gl << 1); + bytes_per_usecs = ring_group->total_bytes / usecs; + /* 1000000 microseconds */ + packets_per_secs = ring_group->total_packets * 1000000 / usecs; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + if (bytes_per_usecs > 10) + new_flow_level = HNS3_FLOW_MID; + break; + case HNS3_FLOW_MID: + if (bytes_per_usecs > 20) + new_flow_level = HNS3_FLOW_HIGH; + else if (bytes_per_usecs <= 10) + new_flow_level = HNS3_FLOW_LOW; + break; + case HNS3_FLOW_HIGH: + case HNS3_FLOW_ULTRA: + default: + if (bytes_per_usecs <= 20) + new_flow_level = HNS3_FLOW_MID; + break; + } + + if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) && + (&tqp_vector->rx_group == ring_group)) + new_flow_level = HNS3_FLOW_ULTRA; + + switch (new_flow_level) { + case HNS3_FLOW_LOW: + new_int_gl = HNS3_INT_GL_50K; + break; + case HNS3_FLOW_MID: + new_int_gl = HNS3_INT_GL_20K; + break; + case HNS3_FLOW_HIGH: + new_int_gl = HNS3_INT_GL_18K; + break; + case HNS3_FLOW_ULTRA: + new_int_gl = HNS3_INT_GL_8K; + break; + default: + break; + } + + ring_group->total_bytes = 0; + ring_group->total_packets = 0; + ring_group->flow_level = new_flow_level; + if (new_int_gl != ring_group->int_gl) { + ring_group->int_gl = new_int_gl; + return true; + } + return false; +} + +static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) +{ + u16 rx_int_gl, tx_int_gl; + bool rx, tx; + + rx = hns3_get_new_int_gl(&tqp_vector->rx_group); + tx = hns3_get_new_int_gl(&tqp_vector->tx_group); + rx_int_gl = tqp_vector->rx_group.int_gl; + tx_int_gl = tqp_vector->tx_group.int_gl; + if (rx && tx) { + if (rx_int_gl > tx_int_gl) { + tqp_vector->tx_group.int_gl = rx_int_gl; + tqp_vector->tx_group.flow_level = + tqp_vector->rx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl); + } else { + tqp_vector->rx_group.int_gl = tx_int_gl; + tqp_vector->rx_group.flow_level = + tqp_vector->tx_group.flow_level; + hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl); + } + } +} + +static int hns3_nic_common_poll(struct napi_struct *napi, int budget) +{ + struct hns3_enet_ring *ring; + int rx_pkt_total = 0; + + struct hns3_enet_tqp_vector *tqp_vector = + container_of(napi, struct hns3_enet_tqp_vector, napi); + bool clean_complete = true; + int rx_budget; + + /* Since the actual Tx work is minimal, we can give the Tx a larger + * budget and be more aggressive about cleaning up the Tx descriptors. + */ + hns3_for_each_ring(ring, tqp_vector->tx_group) { + if (!hns3_clean_tx_ring(ring, budget)) + clean_complete = false; + } + + /* make sure rx ring budget not smaller than 1 */ + rx_budget = max(budget / tqp_vector->num_tqps, 1); + + hns3_for_each_ring(ring, tqp_vector->rx_group) { + int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget); + + if (rx_cleaned >= rx_budget) + clean_complete = false; + + rx_pkt_total += rx_cleaned; + } + + tqp_vector->rx_group.total_packets += rx_pkt_total; + + if (!clean_complete) + return budget; + + napi_complete(napi); + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + + return rx_pkt_total; +} + +static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *cur_chain = head; + struct hnae3_ring_chain_node *chain; + struct hns3_enet_ring *tx_ring; + struct hns3_enet_ring *rx_ring; + + tx_ring = tqp_vector->tx_group.ring; + if (tx_ring) { + cur_chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain->next = NULL; + + while (tx_ring->next) { + tx_ring = tx_ring->next; + + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), + GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = tx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_TX); + + cur_chain = chain; + } + } + + rx_ring = tqp_vector->rx_group.ring; + if (!tx_ring && rx_ring) { + cur_chain->next = NULL; + cur_chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + + rx_ring = rx_ring->next; + } + + while (rx_ring) { + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return -ENOMEM; + + cur_chain->next = chain; + chain->tqp_index = rx_ring->tqp->tqp_index; + hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B, + HNAE3_RING_TYPE_RX); + cur_chain = chain; + + rx_ring = rx_ring->next; + } + + return 0; +} + +static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, + struct hnae3_ring_chain_node *head) +{ + struct pci_dev *pdev = tqp_vector->handle->pdev; + struct hnae3_ring_chain_node *chain_tmp, *chain; + + chain = head->next; + + while (chain) { + chain_tmp = chain->next; + devm_kfree(&pdev->dev, chain); + chain = chain_tmp; + } +} + +static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group, + struct hns3_enet_ring *ring) +{ + ring->next = group->ring; + group->ring = ring; + + group->count++; +} + +static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_vector_info *vector; + struct pci_dev *pdev = h->pdev; + u16 tqp_num = h->kinfo.num_tqps; + u16 vector_num; + int ret = 0; + u16 i; + + /* RSS size, cpu online and vector_num should be the same */ + /* Should consider 2p/4p later */ + vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), + GFP_KERNEL); + if (!vector) + return -ENOMEM; + + vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); + + priv->vector_num = vector_num; + priv->tqp_vector = (struct hns3_enet_tqp_vector *) + devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), + GFP_KERNEL); + if (!priv->tqp_vector) + return -ENOMEM; + + for (i = 0; i < tqp_num; i++) { + u16 vector_i = i % vector_num; + + tqp_vector = &priv->tqp_vector[vector_i]; + + hns3_add_ring_to_group(&tqp_vector->tx_group, + priv->ring_data[i].ring); + + hns3_add_ring_to_group(&tqp_vector->rx_group, + priv->ring_data[i + tqp_num].ring); + + tqp_vector->idx = vector_i; + tqp_vector->mask_addr = vector[vector_i].io_addr; + tqp_vector->vector_irq = vector[vector_i].vector; + tqp_vector->num_tqps++; + + priv->ring_data[i].ring->tqp_vector = tqp_vector; + priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; + } + + for (i = 0; i < vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + tqp_vector->rx_group.total_bytes = 0; + tqp_vector->rx_group.total_packets = 0; + tqp_vector->tx_group.total_bytes = 0; + tqp_vector->tx_group.total_packets = 0; + hns3_vector_gl_rl_init(tqp_vector); + tqp_vector->handle = h; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + goto out; + + ret = h->ae_algo->ops->map_ring_to_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + goto out; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + netif_napi_add(priv->netdev, &tqp_vector->napi, + hns3_nic_common_poll, NAPI_POLL_WEIGHT); + } + +out: + devm_kfree(&pdev->dev, vector); + return ret; +} + +static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) +{ + struct hnae3_ring_chain_node vector_ring_chain; + struct hnae3_handle *h = priv->ae_handle; + struct hns3_enet_tqp_vector *tqp_vector; + struct pci_dev *pdev = h->pdev; + int i, ret; + + for (i = 0; i < priv->vector_num; i++) { + tqp_vector = &priv->tqp_vector[i]; + + ret = hns3_get_vector_ring_chain(tqp_vector, + &vector_ring_chain); + if (ret) + return ret; + + ret = h->ae_algo->ops->unmap_ring_from_vector(h, + tqp_vector->vector_irq, &vector_ring_chain); + if (ret) + return ret; + + hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); + + if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { + (void)irq_set_affinity_hint( + priv->tqp_vector[i].vector_irq, + NULL); + devm_free_irq(&pdev->dev, + priv->tqp_vector[i].vector_irq, + &priv->tqp_vector[i]); + } + + priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; + + netif_napi_del(&priv->tqp_vector[i].napi); + } + + devm_kfree(&pdev->dev, priv->tqp_vector); + + return 0; +} + +static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, + int ring_type) +{ + struct hns3_nic_ring_data *ring_data = priv->ring_data; + int queue_num = priv->ae_handle->kinfo.num_tqps; + struct pci_dev *pdev = priv->ae_handle->pdev; + struct hns3_enet_ring *ring; + + ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + + if (ring_type == HNAE3_RING_TYPE_TX) { + ring_data[q->tqp_index].ring = ring; + ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; + } else { + ring_data[q->tqp_index + queue_num].ring = ring; + ring->io_base = q->io_base; + } + + hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); + + ring_data[q->tqp_index].queue_index = q->tqp_index; + + ring->tqp = q; + ring->desc = NULL; + ring->desc_cb = NULL; + ring->dev = priv->dev; + ring->desc_dma_addr = 0; + ring->buf_size = q->buf_size; + ring->desc_num = q->desc_num; + ring->next_to_use = 0; + ring->next_to_clean = 0; + + return 0; +} + +static int hns3_queue_to_ring(struct hnae3_queue *tqp, + struct hns3_nic_priv *priv) +{ + int ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX); + if (ret) + return ret; + + ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX); + if (ret) + return ret; + + return 0; +} + +static int hns3_get_ring_config(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + struct pci_dev *pdev = h->pdev; + int i, ret; + + priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps * + sizeof(*priv->ring_data) * 2, + GFP_KERNEL); + if (!priv->ring_data) + return -ENOMEM; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); + if (ret) + goto err; + } + + return 0; +err: + devm_kfree(&pdev->dev, priv->ring_data); + return ret; +} + +static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) +{ + int ret; + + if (ring->desc_num <= 0 || ring->buf_size <= 0) + return -EINVAL; + + ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), + GFP_KERNEL); + if (!ring->desc_cb) { + ret = -ENOMEM; + goto out; + } + + ret = hns3_alloc_desc(ring); + if (ret) + goto out_with_desc_cb; + + if (!HNAE3_IS_TX_RING(ring)) { + ret = hns3_alloc_ring_buffers(ring); + if (ret) + goto out_with_desc; + } + + return 0; + +out_with_desc: + hns3_free_desc(ring); +out_with_desc_cb: + kfree(ring->desc_cb); + ring->desc_cb = NULL; +out: + return ret; +} + +static void hns3_fini_ring(struct hns3_enet_ring *ring) +{ + hns3_free_desc(ring); + kfree(ring->desc_cb); + ring->desc_cb = NULL; + ring->next_to_clean = 0; + ring->next_to_use = 0; +} + +int hns3_buf_size2type(u32 buf_size) +{ + int bd_size_type; + + switch (buf_size) { + case 512: + bd_size_type = HNS3_BD_SIZE_512_TYPE; + break; + case 1024: + bd_size_type = HNS3_BD_SIZE_1024_TYPE; + break; + case 2048: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + break; + case 4096: + bd_size_type = HNS3_BD_SIZE_4096_TYPE; + break; + default: + bd_size_type = HNS3_BD_SIZE_2048_TYPE; + } + + return bd_size_type; +} + +static void hns3_init_ring_hw(struct hns3_enet_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hnae3_queue *q = ring->tqp; + + if (!HNAE3_IS_TX_RING(ring)) { + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + + } else { + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, + (u32)dma); + hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + + hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG, + hns3_buf_size2type(ring->buf_size)); + hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, + ring->desc_num / 8 - 1); + } +} + +static int hns3_init_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int ring_num = h->kinfo.num_tqps * 2; + int i, j; + int ret; + + for (i = 0; i < ring_num; i++) { + ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); + if (ret) { + dev_err(priv->dev, + "Alloc ring memory fail! ret=%d\n", ret); + goto out_when_alloc_ring_memory; + } + + hns3_init_ring_hw(priv->ring_data[i].ring); + + u64_stats_init(&priv->ring_data[i].ring->syncp); + } + + return 0; + +out_when_alloc_ring_memory: + for (j = i - 1; j >= 0; j--) + hns3_fini_ring(priv->ring_data[i].ring); + + return -ENOMEM; +} + +static int hns3_uninit_all_ring(struct hns3_nic_priv *priv) +{ + struct hnae3_handle *h = priv->ae_handle; + int i; + + for (i = 0; i < h->kinfo.num_tqps; i++) { + if (h->ae_algo->ops->reset_queue) + h->ae_algo->ops->reset_queue(h, i); + + hns3_fini_ring(priv->ring_data[i].ring); + hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); + } + + return 0; +} + +/* Set mac addr if it is configured. or leave it to the AE driver */ +static void hns3_init_mac_addr(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u8 mac_addr_temp[ETH_ALEN]; + + if (h->ae_algo->ops->get_mac_addr) { + h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); + ether_addr_copy(netdev->dev_addr, mac_addr_temp); + } + + /* Check if the MAC address is valid, if not get a random one */ + if (!is_valid_ether_addr(netdev->dev_addr)) { + eth_hw_addr_random(netdev); + dev_warn(priv->dev, "using random MAC address %pM\n", + netdev->dev_addr); + /* Also copy this new MAC address into hdev */ + if (h->ae_algo->ops->set_mac_addr) + h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr); + } +} + +static void hns3_nic_set_priv_ops(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + if ((netdev->features & NETIF_F_TSO) || + (netdev->features & NETIF_F_TSO6)) { + priv->ops.fill_desc = hns3_fill_desc_tso; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; + } else { + priv->ops.fill_desc = hns3_fill_desc; + priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; + } +} + +static int hns3_client_init(struct hnae3_handle *handle) +{ + struct pci_dev *pdev = handle->pdev; + struct hns3_nic_priv *priv; + struct net_device *netdev; + int ret; + + netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), + handle->kinfo.num_tqps); + if (!netdev) + return -ENOMEM; + + priv = netdev_priv(netdev); + priv->dev = &pdev->dev; + priv->netdev = netdev; + priv->ae_handle = handle; + + handle->kinfo.netdev = netdev; + handle->priv = (void *)priv; + + hns3_init_mac_addr(netdev); + + hns3_set_default_feature(netdev); + + netdev->watchdog_timeo = HNS3_TX_TIMEOUT; + netdev->priv_flags |= IFF_UNICAST_FLT; + netdev->netdev_ops = &hns3_nic_netdev_ops; + SET_NETDEV_DEV(netdev, &pdev->dev); + hns3_ethtool_set_ops(netdev); + hns3_nic_set_priv_ops(netdev); + + /* Carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + ret = hns3_get_ring_config(priv); + if (ret) { + ret = -ENOMEM; + goto out_get_ring_cfg; + } + + ret = hns3_nic_init_vector_data(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_vector_data; + } + + ret = hns3_init_all_ring(priv); + if (ret) { + ret = -ENOMEM; + goto out_init_ring_data; + } + + ret = register_netdev(netdev); + if (ret) { + dev_err(priv->dev, "probe register netdev fail!\n"); + goto out_reg_netdev_fail; + } + + return ret; + +out_reg_netdev_fail: +out_init_ring_data: + (void)hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +out_init_vector_data: +out_get_ring_cfg: + priv->ae_handle = NULL; + free_netdev(netdev); + return ret; +} + +static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) +{ + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + int ret; + + if (netdev->reg_state != NETREG_UNINITIALIZED) + unregister_netdev(netdev); + + ret = hns3_nic_uninit_vector_data(priv); + if (ret) + netdev_err(netdev, "uninit vector error\n"); + + ret = hns3_uninit_all_ring(priv); + if (ret) + netdev_err(netdev, "uninit ring error\n"); + + priv->ring_data = NULL; + + free_netdev(netdev); +} + +static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) +{ + struct net_device *netdev = handle->kinfo.netdev; + + if (!netdev) + return; + + if (linkup) { + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + netdev_info(netdev, "link up\n"); + } else { + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + netdev_info(netdev, "link down\n"); + } +} + +const struct hnae3_client_ops client_ops = { + .init_instance = hns3_client_init, + .uninit_instance = hns3_client_uninit, + .link_status_change = hns3_link_status_change, +}; + +/* hns3_init_module - Driver registration routine + * hns3_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init hns3_init_module(void) +{ + int ret; + + pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); + pr_info("%s: %s\n", hns3_driver_name, hns3_copyright); + + client.type = HNAE3_CLIENT_KNIC; + snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s", + hns3_driver_name); + + client.ops = &client_ops; + + ret = hnae3_register_client(&client); + if (ret) + return ret; + + ret = pci_register_driver(&hns3_driver); + if (ret) + hnae3_unregister_client(&client); + + return ret; +} +module_init(hns3_init_module); + +/* hns3_exit_module - Driver exit cleanup routine + * hns3_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit hns3_exit_module(void) +{ + pci_unregister_driver(&hns3_driver); + hnae3_unregister_client(&client); +} +module_exit(hns3_exit_module); + +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("pci:hns-nic"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h new file mode 100644 index 000000000000..a6e8f15a4669 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -0,0 +1,592 @@ +/* + * Copyright (c) 2016 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNS3_ENET_H +#define __HNS3_ENET_H + +#include "hnae3.h" + +extern const char hns3_driver_version[]; + +enum hns3_nic_state { + HNS3_NIC_STATE_TESTING, + HNS3_NIC_STATE_RESETTING, + HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_DOWN, + HNS3_NIC_STATE_DISABLED, + HNS3_NIC_STATE_REMOVING, + HNS3_NIC_STATE_SERVICE_INITED, + HNS3_NIC_STATE_SERVICE_SCHED, + HNS3_NIC_STATE2_RESET_REQUESTED, + HNS3_NIC_STATE_MAX +}; + +#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000 +#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004 +#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008 +#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C +#define HNS3_RING_RX_RING_TAIL_REG 0x00018 +#define HNS3_RING_RX_RING_HEAD_REG 0x0001C +#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020 +#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C + +#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040 +#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044 +#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048 +#define HNS3_RING_TX_RING_BD_LEN_REG 0x0004C +#define HNS3_RING_TX_RING_TAIL_REG 0x00058 +#define HNS3_RING_TX_RING_HEAD_REG 0x0005C +#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 +#define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C + +#define HNS3_RING_PREFETCH_EN_REG 0x0007C +#define HNS3_RING_CFG_VF_NUM_REG 0x00080 +#define HNS3_RING_ASID_REG 0x0008C +#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_T0_BE_RST 0x00094 +#define HNS3_RING_COULD_BE_RST 0x00098 +#define HNS3_RING_WRR_WEIGHT_REG 0x0009c + +#define HNS3_RING_INTMSK_RXWL_REG 0x000A0 +#define HNS3_RING_INTSTS_RX_RING_REG 0x000A4 +#define HNS3_RX_RING_INT_STS_REG 0x000A8 +#define HNS3_RING_INTMSK_TXWL_REG 0x000AC +#define HNS3_RING_INTSTS_TX_RING_REG 0x000B0 +#define HNS3_TX_RING_INT_STS_REG 0x000B4 +#define HNS3_RING_INTMSK_RX_OVERTIME_REG 0x000B8 +#define HNS3_RING_INTSTS_RX_OVERTIME_REG 0x000BC +#define HNS3_RING_INTMSK_TX_OVERTIME_REG 0x000C4 +#define HNS3_RING_INTSTS_TX_OVERTIME_REG 0x000C8 + +#define HNS3_RING_MB_CTRL_REG 0x00100 +#define HNS3_RING_MB_DATA_BASE_REG 0x00200 + +#define HNS3_TX_REG_OFFSET 0x40 + +#define HNS3_RX_HEAD_SIZE 256 + +#define HNS3_TX_TIMEOUT (5 * HZ) +#define HNS3_RING_NAME_LEN 16 +#define HNS3_BUFFER_SIZE_2048 2048 +#define HNS3_RING_MAX_PENDING 32768 + +#define HNS3_BD_SIZE_512_TYPE 0 +#define HNS3_BD_SIZE_1024_TYPE 1 +#define HNS3_BD_SIZE_2048_TYPE 2 +#define HNS3_BD_SIZE_4096_TYPE 3 + +#define HNS3_RX_FLAG_VLAN_PRESENT 0x1 +#define HNS3_RX_FLAG_L3ID_IPV4 0x0 +#define HNS3_RX_FLAG_L3ID_IPV6 0x1 +#define HNS3_RX_FLAG_L4ID_UDP 0x0 +#define HNS3_RX_FLAG_L4ID_TCP 0x1 + +#define HNS3_RXD_DMAC_S 0 +#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S) +#define HNS3_RXD_VLAN_S 2 +#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S) +#define HNS3_RXD_L3ID_S 4 +#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S) +#define HNS3_RXD_L4ID_S 8 +#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S) +#define HNS3_RXD_FRAG_B 12 +#define HNS3_RXD_L2E_B 16 +#define HNS3_RXD_L3E_B 17 +#define HNS3_RXD_L4E_B 18 +#define HNS3_RXD_TRUNCAT_B 19 +#define HNS3_RXD_HOI_B 20 +#define HNS3_RXD_DOI_B 21 +#define HNS3_RXD_OL3E_B 22 +#define HNS3_RXD_OL4E_B 23 + +#define HNS3_RXD_ODMAC_S 0 +#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) +#define HNS3_RXD_OVLAN_S 2 +#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S) +#define HNS3_RXD_OL3ID_S 4 +#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S) +#define HNS3_RXD_OL4ID_S 8 +#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S) +#define HNS3_RXD_FBHI_S 12 +#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S) +#define HNS3_RXD_FBLI_S 14 +#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S) + +#define HNS3_RXD_BDTYPE_S 0 +#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S) +#define HNS3_RXD_VLD_B 4 +#define HNS3_RXD_UDP0_B 5 +#define HNS3_RXD_EXTEND_B 7 +#define HNS3_RXD_FE_B 8 +#define HNS3_RXD_LUM_B 9 +#define HNS3_RXD_CRCP_B 10 +#define HNS3_RXD_L3L4P_B 11 +#define HNS3_RXD_TSIND_S 12 +#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) +#define HNS3_RXD_LKBK_B 15 +#define HNS3_RXD_HDL_S 16 +#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) +#define HNS3_RXD_HSIND_B 31 + +#define HNS3_TXD_L3T_S 0 +#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) +#define HNS3_TXD_L4T_S 2 +#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S) +#define HNS3_TXD_L3CS_B 4 +#define HNS3_TXD_L4CS_B 5 +#define HNS3_TXD_VLAN_B 6 +#define HNS3_TXD_TSO_B 7 + +#define HNS3_TXD_L2LEN_S 8 +#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S) +#define HNS3_TXD_L3LEN_S 16 +#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S) +#define HNS3_TXD_L4LEN_S 24 +#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S) + +#define HNS3_TXD_OL3T_S 0 +#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S) +#define HNS3_TXD_OVLAN_B 2 +#define HNS3_TXD_MACSEC_B 3 +#define HNS3_TXD_TUNTYPE_S 4 +#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S) + +#define HNS3_TXD_BDTYPE_S 0 +#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S) +#define HNS3_TXD_FE_B 4 +#define HNS3_TXD_SC_S 5 +#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S) +#define HNS3_TXD_EXTEND_B 7 +#define HNS3_TXD_VLD_B 8 +#define HNS3_TXD_RI_B 9 +#define HNS3_TXD_RA_B 10 +#define HNS3_TXD_TSYN_B 11 +#define HNS3_TXD_DECTTL_S 12 +#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S) + +#define HNS3_TXD_MSS_S 0 +#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S) + +#define HNS3_VECTOR_TX_IRQ BIT_ULL(0) +#define HNS3_VECTOR_RX_IRQ BIT_ULL(1) + +#define HNS3_VECTOR_NOT_INITED 0 +#define HNS3_VECTOR_INITED 1 + +#define HNS3_MAX_BD_SIZE 65535 +#define HNS3_MAX_BD_PER_FRAG 8 +#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS + +#define HNS3_VECTOR_GL0_OFFSET 0x100 +#define HNS3_VECTOR_GL1_OFFSET 0x200 +#define HNS3_VECTOR_GL2_OFFSET 0x300 +#define HNS3_VECTOR_RL_OFFSET 0x900 +#define HNS3_VECTOR_RL_EN_B 6 + +enum hns3_pkt_l3t_type { + HNS3_L3T_NONE, + HNS3_L3T_IPV6, + HNS3_L3T_IPV4, + HNS3_L3T_RESERVED +}; + +enum hns3_pkt_l4t_type { + HNS3_L4T_UNKNOWN, + HNS3_L4T_TCP, + HNS3_L4T_UDP, + HNS3_L4T_SCTP +}; + +enum hns3_pkt_ol3t_type { + HNS3_OL3T_NONE, + HNS3_OL3T_IPV6, + HNS3_OL3T_IPV4_NO_CSUM, + HNS3_OL3T_IPV4_CSUM +}; + +enum hns3_pkt_tun_type { + HNS3_TUN_NONE, + HNS3_TUN_MAC_IN_UDP, + HNS3_TUN_NVGRE, + HNS3_TUN_OTHER +}; + +/* hardware spec ring buffer format */ +struct __packed hns3_desc { + __le64 addr; + union { + struct { + __le16 vlan_tag; + __le16 send_size; + union { + __le32 type_cs_vlan_tso_len; + struct { + __u8 type_cs_vlan_tso; + __u8 l2_len; + __u8 l3_len; + __u8 l4_len; + }; + }; + __le16 outer_vlan_tag; + __le16 tv; + + union { + __le32 ol_type_vlan_len_msec; + struct { + __u8 ol_type_vlan_msec; + __u8 ol2_len; + __u8 ol3_len; + __u8 ol4_len; + }; + }; + + __le32 paylen; + __le16 bdtp_fe_sc_vld_ra_ri; + __le16 mss; + } tx; + + struct { + __le32 l234_info; + __le16 pkt_len; + __le16 size; + + __le32 rss_hash; + __le16 fd_id; + __le16 vlan_tag; + + union { + __le32 ol_info; + struct { + __le16 o_dm_vlan_id_fb; + __le16 ot_vlan_tag; + }; + }; + + __le32 bd_base_info; + } rx; + }; +}; + +struct hns3_desc_cb { + dma_addr_t dma; /* dma address of this desc */ + void *buf; /* cpu addr for a desc */ + + /* priv data for the desc, e.g. skb when use with ip stack*/ + void *priv; + u16 page_offset; + u16 reuse_flag; + + u16 length; /* length of the buffer */ + + /* desc type, used by the ring user to mark the type of the priv data */ + u16 type; +}; + +enum hns3_pkt_l3type { + HNS3_L3_TYPE_IPV4, + HNS3_L3_TYPE_IPV6, + HNS3_L3_TYPE_ARP, + HNS3_L3_TYPE_RARP, + HNS3_L3_TYPE_IPV4_OPT, + HNS3_L3_TYPE_IPV6_EXT, + HNS3_L3_TYPE_LLDP, + HNS3_L3_TYPE_BPDU, + HNS3_L3_TYPE_MAC_PAUSE, + HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + + /* reserved for 0xA~0xB*/ + + HNS3_L3_TYPE_CNM = 0xc, + + /* reserved for 0xD~0xE*/ + + HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_l4type { + HNS3_L4_TYPE_UDP, + HNS3_L4_TYPE_TCP, + HNS3_L4_TYPE_GRE, + HNS3_L4_TYPE_SCTP, + HNS3_L4_TYPE_IGMP, + HNS3_L4_TYPE_ICMP, + + /* reserved for 0x6~0xE */ + + HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol3type { + HNS3_OL3_TYPE_IPV4 = 0, + HNS3_OL3_TYPE_IPV6, + /* reserved for 0x2~0x3 */ + HNS3_OL3_TYPE_IPV4_OPT = 4, + HNS3_OL3_TYPE_IPV6_EXT, + + /* reserved for 0x6~0xE*/ + + HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */ +}; + +enum hns3_pkt_ol4type { + HNS3_OL4_TYPE_NO_TUN, + HNS3_OL4_TYPE_MAC_IN_UDP, + HNS3_OL4_TYPE_NVGRE, + HNS3_OL4_TYPE_UNKNOWN +}; + +struct ring_stats { + u64 io_err_cnt; + u64 sw_err_cnt; + u64 seg_pkt_cnt; + union { + struct { + u64 tx_pkts; + u64 tx_bytes; + u64 tx_err_cnt; + u64 restart_queue; + u64 tx_busy; + }; + struct { + u64 rx_pkts; + u64 rx_bytes; + u64 rx_err_cnt; + u64 reuse_pg_cnt; + u64 err_pkt_len; + u64 non_vld_descs; + u64 err_bd_num; + u64 l2_err; + u64 l3l4_csum_err; + }; + }; +}; + +struct hns3_enet_ring { + u8 __iomem *io_base; /* base io address for the ring */ + struct hns3_desc *desc; /* dma map address space */ + struct hns3_desc_cb *desc_cb; + struct hns3_enet_ring *next; + struct hns3_enet_tqp_vector *tqp_vector; + struct hnae3_queue *tqp; + char ring_name[HNS3_RING_NAME_LEN]; + struct device *dev; /* will be used for DMA mapping of descriptors */ + + /* statistic */ + struct ring_stats stats; + struct u64_stats_sync syncp; + + dma_addr_t desc_dma_addr; + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ + u16 max_desc_num_per_pkt; + u16 max_raw_data_sz_per_desc; + u16 max_pkt_size; + int next_to_use; /* idx of next spare desc */ + + /* idx of lastest sent desc, the ring is empty when equal to + * next_to_use + */ + int next_to_clean; + + u32 flag; /* ring attribute */ + int irq_init_flag; + + int numa_node; + cpumask_t affinity_mask; +}; + +struct hns_queue; + +struct hns3_nic_ring_data { + struct hns3_enet_ring *ring; + struct napi_struct napi; + int queue_index; + int (*poll_one)(struct hns3_nic_ring_data *, int, void *); + void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *); + void (*fini_process)(struct hns3_nic_ring_data *); +}; + +struct hns3_nic_ops { + int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, + int size, dma_addr_t dma, int frag_end, + enum hns_desc_type type); + int (*maybe_stop_tx)(struct sk_buff **out_skb, + int *bnum, struct hns3_enet_ring *ring); + void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); +}; + +enum hns3_flow_level_range { + HNS3_FLOW_LOW = 0, + HNS3_FLOW_MID = 1, + HNS3_FLOW_HIGH = 2, + HNS3_FLOW_ULTRA = 3, +}; + +enum hns3_link_mode_bits { + HNS3_LM_FIBRE_BIT = BIT(0), + HNS3_LM_AUTONEG_BIT = BIT(1), + HNS3_LM_TP_BIT = BIT(2), + HNS3_LM_PAUSE_BIT = BIT(3), + HNS3_LM_BACKPLANE_BIT = BIT(4), + HNS3_LM_10BASET_HALF_BIT = BIT(5), + HNS3_LM_10BASET_FULL_BIT = BIT(6), + HNS3_LM_100BASET_HALF_BIT = BIT(7), + HNS3_LM_100BASET_FULL_BIT = BIT(8), + HNS3_LM_1000BASET_FULL_BIT = BIT(9), + HNS3_LM_10000BASEKR_FULL_BIT = BIT(10), + HNS3_LM_25000BASEKR_FULL_BIT = BIT(11), + HNS3_LM_40000BASELR4_FULL_BIT = BIT(12), + HNS3_LM_50000BASEKR2_FULL_BIT = BIT(13), + HNS3_LM_100000BASEKR4_FULL_BIT = BIT(14), + HNS3_LM_COUNT = 15 +}; + +#define HNS3_INT_GL_50K 0x000A +#define HNS3_INT_GL_20K 0x0019 +#define HNS3_INT_GL_18K 0x001B +#define HNS3_INT_GL_8K 0x003E + +struct hns3_enet_ring_group { + /* array of pointers to rings */ + struct hns3_enet_ring *ring; + u64 total_bytes; /* total bytes processed this group */ + u64 total_packets; /* total packets processed this group */ + u16 count; + enum hns3_flow_level_range flow_level; + u16 int_gl; +}; + +struct hns3_enet_tqp_vector { + struct hnae3_handle *handle; + u8 __iomem *mask_addr; + int vector_irq; + int irq_init_flag; + + u16 idx; /* index in the TQP vector array per handle. */ + + struct napi_struct napi; + + struct hns3_enet_ring_group rx_group; + struct hns3_enet_ring_group tx_group; + + u16 num_tqps; /* total number of tqps in TQP vector */ + + cpumask_t affinity_mask; + char name[HNAE3_INT_NAME_LEN]; + + /* when 0 should adjust interrupt coalesce parameter */ + u8 int_adapt_down; +} ____cacheline_internodealigned_in_smp; + +enum hns3_udp_tnl_type { + HNS3_UDP_TNL_VXLAN, + HNS3_UDP_TNL_GENEVE, + HNS3_UDP_TNL_MAX, +}; + +struct hns3_udp_tunnel { + u16 dst_port; + int used; +}; + +struct hns3_nic_priv { + struct hnae3_handle *ae_handle; + u32 enet_ver; + u32 port_id; + struct net_device *netdev; + struct device *dev; + struct hns3_nic_ops ops; + + /** + * the cb for nic to manage the ring buffer, the first half of the + * array is for tx_ring and vice versa for the second half + */ + struct hns3_nic_ring_data *ring_data; + struct hns3_enet_tqp_vector *tqp_vector; + u16 vector_num; + + /* The most recently read link state */ + int link; + u64 tx_timeout_count; + + unsigned long state; + + struct timer_list service_timer; + + struct work_struct service_task; + + struct notifier_block notifier_block; + /* Vxlan/Geneve information */ + struct hns3_udp_tunnel udp_tnl[HNS3_UDP_TNL_MAX]; +}; + +union l3_hdr_info { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; +}; + +union l4_hdr_info { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; +}; + +/* the distance between [begin, end) in a ring buffer + * note: there is a unuse slot between the begin and the end + */ +static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) +{ + return (end - begin + ring->desc_num) % ring->desc_num; +} + +static inline int ring_space(struct hns3_enet_ring *ring) +{ + return ring->desc_num - + ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; +} + +static inline int is_ring_empty(struct hns3_enet_ring *ring) +{ + return ring->next_to_use == ring->next_to_clean; +} + +static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + writel(value, reg_addr + reg); +} + +#define hns3_write_dev(a, reg, value) \ + hns3_write_reg((a)->io_base, (reg), (value)) + +#define hnae_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ + (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) + +#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) + +#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) + +#define tx_ring_data(priv, idx) ((priv)->ring_data[idx]) + +#define hnae_buf_size(_ring) ((_ring)->buf_size) +#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring))) +#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring)) + +/* iterator for handling rings in ring group */ +#define hns3_for_each_ring(pos, head) \ + for (pos = (head).ring; pos; pos = pos->next) + +void hns3_ethtool_set_ops(struct net_device *netdev); + +int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget); +#endif -- cgit v1.2.3-55-g7522 From 38caee9d3ee8b32b36912f54e19438eddd126772 Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:46 +0100 Subject: net: hns3: Add support of the HNAE3 framework This patch adds the support of the HNAE3 (Hisilicon Network Acceleration Engine 3) framework support to the HNS3 driver. Framework facilitates clients like ENET(HNS3 Ethernet Driver), RoCE and user-space Ethernet drivers (like ODP etc.) to register with HNAE3 devices and their associated operations. Signed-off-by: Daode Huang Signed-off-by: lipeng Signed-off-by: Salil Mehta Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hnae3.c | 300 +++++++++++++++++++ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 444 ++++++++++++++++++++++++++++ 2 files changed, 744 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hnae3.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hnae3.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c new file mode 100644 index 000000000000..59efbd605416 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -0,0 +1,300 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "hnae3.h" + +static LIST_HEAD(hnae3_ae_algo_list); +static LIST_HEAD(hnae3_client_list); +static LIST_HEAD(hnae3_ae_dev_list); + +/* we are keeping things simple and using single lock for all the + * list. This is a non-critical code so other updations, if happen + * in parallel, can wait. + */ +static DEFINE_MUTEX(hnae3_common_lock); + +static bool hnae3_client_match(enum hnae3_client_type client_type, + enum hnae3_dev_type dev_type) +{ + if ((dev_type == HNAE3_DEV_KNIC) && (client_type == HNAE3_CLIENT_KNIC || + client_type == HNAE3_CLIENT_ROCE)) + return true; + + if (dev_type == HNAE3_DEV_UNIC && client_type == HNAE3_CLIENT_UNIC) + return true; + + return false; +} + +static int hnae3_match_n_instantiate(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev, + bool is_reg, bool *matched) +{ + int ret; + + *matched = false; + + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type, ae_dev->dev_type) && + hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { + return 0; + } + /* there is a match of client and dev */ + *matched = true; + + /* now, (un-)instantiate client by calling lower layer */ + if (is_reg) { + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client\n"); + return ret; + } + + ae_dev->ops->uninit_client_instance(client, ae_dev); + return 0; +} + +int hnae3_register_client(struct hnae3_client *client) +{ + struct hnae3_client *client_tmp; + struct hnae3_ae_dev *ae_dev; + bool matched; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + /* one system should only have one client for every type */ + list_for_each_entry(client_tmp, &hnae3_client_list, node) { + if (client_tmp->type == client->type) + goto exit; + } + + list_add_tail(&client->node, &hnae3_client_list); + + /* initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + /* if the client could not be initialized on current port, for + * any error reasons, move on to next available port + */ + ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed for port\n"); + } + +exit: + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_client); + +void hnae3_unregister_client(struct hnae3_client *client) +{ + struct hnae3_ae_dev *ae_dev; + bool matched; + + mutex_lock(&hnae3_common_lock); + /* un-initialize the client on every matched port */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + hnae3_match_n_instantiate(client, ae_dev, false, &matched); + } + + list_del(&client->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_client); + +/* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework + * @ae_algo: AE algorithm + * NOTE: the duplicated name will not be checked + */ +int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + bool matched; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + + list_add_tail(&ae_algo->node, &hnae3_ae_algo_list); + + /* Check if this algo/ops matches the list of ae_devs */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + /* ae_dev init should set flag */ + ae_dev->ops = ae_algo->ops; + ret = ae_algo->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, "init ae_dev error.\n"); + continue; + } + + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_match_n_instantiate(client, ae_dev, true, + &matched); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed\n"); + if (matched) + break; + } + } + + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_ae_algo); + +/* hnae3_unregister_ae_algo - unregisters a AE algorithm + * @ae_algo: the AE algorithm to unregister + */ +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) +{ + const struct pci_device_id *id; + struct hnae3_ae_dev *ae_dev; + struct hnae3_client *client; + bool matched; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_dev */ + list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + /* check the client list for the match with this ae_dev type and + * un-initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + hnae3_match_n_instantiate(client, ae_dev, false, + &matched); + if (matched) + break; + } + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + } + + list_del(&ae_algo->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_algo); + +/* hnae3_register_ae_dev - registers a AE device to hnae3 framework + * @ae_dev: the AE device + * NOTE: the duplicated name will not be checked + */ +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + bool matched; + int ret = 0; + + mutex_lock(&hnae3_common_lock); + list_add_tail(&ae_dev->node, &hnae3_ae_dev_list); + + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + ae_dev->ops = ae_algo->ops; + + if (!ae_dev->ops) { + dev_err(&ae_dev->pdev->dev, "ae_dev ops are null\n"); + goto out_err; + } + + /* ae_dev init should set flag */ + ret = ae_dev->ops->init_ae_dev(ae_dev); + if (ret) { + dev_err(&ae_dev->pdev->dev, "init ae_dev error\n"); + goto out_err; + } + + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1); + break; + } + + /* check the client list for the match with this ae_dev type and + * initialize the figure out client instance + */ + list_for_each_entry(client, &hnae3_client_list, node) { + ret = hnae3_match_n_instantiate(client, ae_dev, true, + &matched); + if (ret) + dev_err(&ae_dev->pdev->dev, + "match and instantiation failed\n"); + if (matched) + break; + } + +out_err: + mutex_unlock(&hnae3_common_lock); + + return ret; +} +EXPORT_SYMBOL(hnae3_register_ae_dev); + +/* hnae3_unregister_ae_dev - unregisters a AE device + * @ae_dev: the AE device to unregister + */ +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + const struct pci_device_id *id; + struct hnae3_ae_algo *ae_algo; + struct hnae3_client *client; + bool matched; + + mutex_lock(&hnae3_common_lock); + /* Check if there are matched ae_algo */ + list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) { + id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev); + if (!id) + continue; + + list_for_each_entry(client, &hnae3_client_list, node) { + hnae3_match_n_instantiate(client, ae_dev, false, + &matched); + if (matched) + break; + } + + ae_algo->ops->uninit_ae_dev(ae_dev); + hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); + } + + list_del(&ae_dev->node); + mutex_unlock(&hnae3_common_lock); +} +EXPORT_SYMBOL(hnae3_unregister_ae_dev); + +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework"); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h new file mode 100644 index 000000000000..b2f28ae81273 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -0,0 +1,444 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HNAE3_H +#define __HNAE3_H + +/* Names used in this framework: + * ae handle (handle): + * a set of queues provided by AE + * ring buffer queue (rbq): + * the channel between upper layer and the AE, can do tx and rx + * ring: + * a tx or rx channel within a rbq + * ring description (desc): + * an element in the ring with packet information + * buffer: + * a memory region referred by desc with the full packet payload + * + * "num" means a static number set as a parameter, "count" mean a dynamic + * number set while running + * "cb" means control block + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Device IDs */ +#define HNAE3_DEV_ID_GE 0xA220 +#define HNAE3_DEV_ID_25GE 0xA221 +#define HNAE3_DEV_ID_25GE_RDMA 0xA222 +#define HNAE3_DEV_ID_25GE_RDMA_MACSEC 0xA223 +#define HNAE3_DEV_ID_50GE_RDMA 0xA224 +#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225 +#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226 +#define HNAE3_DEV_ID_100G_VF 0xA22E +#define HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF 0xA22F + +#define HNAE3_CLASS_NAME_SIZE 16 + +#define HNAE3_DEV_INITED_B 0x0 +#define HNAE_DEV_SUPPORT_ROCE_B 0x1 + +#define ring_ptr_move_fw(ring, p) \ + ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) +#define ring_ptr_move_bw(ring, p) \ + ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num) + +enum hns_desc_type { + DESC_TYPE_SKB, + DESC_TYPE_PAGE, +}; + +struct hnae3_handle; + +struct hnae3_queue { + void __iomem *io_base; + struct hnae3_ae_algo *ae_algo; + struct hnae3_handle *handle; + int tqp_index; /* index in a handle */ + u32 buf_size; /* size for hnae_desc->addr, preset by AE */ + u16 desc_num; /* total number of desc */ +}; + +/*hnae3 loop mode*/ +enum hnae3_loop { + HNAE3_MAC_INTER_LOOP_MAC, + HNAE3_MAC_INTER_LOOP_SERDES, + HNAE3_MAC_INTER_LOOP_PHY, + HNAE3_MAC_LOOP_NONE, +}; + +enum hnae3_client_type { + HNAE3_CLIENT_KNIC, + HNAE3_CLIENT_UNIC, + HNAE3_CLIENT_ROCE, +}; + +enum hnae3_dev_type { + HNAE3_DEV_KNIC, + HNAE3_DEV_UNIC, +}; + +/* mac media type */ +enum hnae3_media_type { + HNAE3_MEDIA_TYPE_UNKNOWN, + HNAE3_MEDIA_TYPE_FIBER, + HNAE3_MEDIA_TYPE_COPPER, + HNAE3_MEDIA_TYPE_BACKPLANE, +}; + +struct hnae3_vector_info { + u8 __iomem *io_addr; + int vector; +}; + +#define HNAE3_RING_TYPE_B 0 +#define HNAE3_RING_TYPE_TX 0 +#define HNAE3_RING_TYPE_RX 1 + +struct hnae3_ring_chain_node { + struct hnae3_ring_chain_node *next; + u32 tqp_index; + u32 flag; +}; + +#define HNAE3_IS_TX_RING(node) \ + (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX) + +struct hnae3_client_ops { + int (*init_instance)(struct hnae3_handle *handle); + void (*uninit_instance)(struct hnae3_handle *handle, bool reset); + void (*link_status_change)(struct hnae3_handle *handle, bool state); +}; + +#define HNAE3_CLIENT_NAME_LENGTH 16 +struct hnae3_client { + char name[HNAE3_CLIENT_NAME_LENGTH]; + u16 version; + unsigned long state; + enum hnae3_client_type type; + const struct hnae3_client_ops *ops; + struct list_head node; +}; + +struct hnae3_ae_dev { + struct pci_dev *pdev; + const struct hnae3_ae_ops *ops; + struct list_head node; + u32 flag; + enum hnae3_dev_type dev_type; + void *priv; +}; + +/* This struct defines the operation on the handle. + * + * init_ae_dev(): (mandatory) + * Get PF configure from pci_dev and initialize PF hardware + * uninit_ae_dev() + * Disable PF device and release PF resource + * register_client + * Register client to ae_dev + * unregister_client() + * Unregister client from ae_dev + * start() + * Enable the hardware + * stop() + * Disable the hardware + * get_status() + * Get the carrier state of the back channel of the handle, 1 for ok, 0 for + * non-ok + * get_ksettings_an_result() + * Get negotiation status,speed and duplex + * update_speed_duplex_h() + * Update hardware speed and duplex + * get_media_type() + * Get media type of MAC + * adjust_link() + * Adjust link status + * set_loopback() + * Set loopback + * set_promisc_mode + * Set promisc mode + * set_mtu() + * set mtu + * get_pauseparam() + * get tx and rx of pause frame use + * set_pauseparam() + * set tx and rx of pause frame use + * set_autoneg() + * set auto autonegotiation of pause frame use + * get_autoneg() + * get auto autonegotiation of pause frame use + * get_coalesce_usecs() + * get usecs to delay a TX interrupt after a packet is sent + * get_rx_max_coalesced_frames() + * get Maximum number of packets to be sent before a TX interrupt. + * set_coalesce_usecs() + * set usecs to delay a TX interrupt after a packet is sent + * set_coalesce_frames() + * set Maximum number of packets to be sent before a TX interrupt. + * get_mac_addr() + * get mac address + * set_mac_addr() + * set mac address + * add_uc_addr + * Add unicast addr to mac table + * rm_uc_addr + * Remove unicast addr from mac table + * set_mc_addr() + * Set multicast address + * add_mc_addr + * Add multicast address to mac table + * rm_mc_addr + * Remove multicast address from mac table + * update_stats() + * Update Old network device statistics + * get_ethtool_stats() + * Get ethtool network device statistics + * get_strings() + * Get a set of strings that describe the requested objects + * get_sset_count() + * Get number of strings that @get_strings will write + * update_led_status() + * Update the led status + * set_led_id() + * Set led id + * get_regs() + * Get regs dump + * get_regs_len() + * Get the len of the regs dump + * get_rss_key_size() + * Get rss key size + * get_rss_indir_size() + * Get rss indirection table size + * get_rss() + * Get rss table + * set_rss() + * Set rss table + * get_tc_size() + * Get tc size of handle + * get_vector() + * Get vector number and vector information + * map_ring_to_vector() + * Map rings to vector + * unmap_ring_from_vector() + * Unmap rings from vector + * add_tunnel_udp() + * Add tunnel information to hardware + * del_tunnel_udp() + * Delete tunnel information from hardware + * reset_queue() + * Reset queue + * get_fw_version() + * Get firmware version + * get_mdix_mode() + * Get media typr of phy + * set_vlan_filter() + * Set vlan filter config of Ports + * set_vf_vlan_filter() + * Set vlan filter config of vf + */ +struct hnae3_ae_ops { + int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); + void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); + + int (*init_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + void (*uninit_client_instance)(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev); + int (*start)(struct hnae3_handle *handle); + void (*stop)(struct hnae3_handle *handle); + int (*get_status)(struct hnae3_handle *handle); + void (*get_ksettings_an_result)(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex); + + int (*update_speed_duplex_h)(struct hnae3_handle *handle); + int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed, + u8 duplex); + + void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type); + void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex); + int (*set_loopback)(struct hnae3_handle *handle, + enum hnae3_loop loop_mode, bool en); + + void (*set_promisc_mode)(struct hnae3_handle *handle, u32 en); + int (*set_mtu)(struct hnae3_handle *handle, int new_mtu); + + void (*get_pauseparam)(struct hnae3_handle *handle, + u32 *auto_neg, u32 *rx_en, u32 *tx_en); + int (*set_pauseparam)(struct hnae3_handle *handle, + u32 auto_neg, u32 rx_en, u32 tx_en); + + int (*set_autoneg)(struct hnae3_handle *handle, bool enable); + int (*get_autoneg)(struct hnae3_handle *handle); + + void (*get_coalesce_usecs)(struct hnae3_handle *handle, + u32 *tx_usecs, u32 *rx_usecs); + void (*get_rx_max_coalesced_frames)(struct hnae3_handle *handle, + u32 *tx_frames, u32 *rx_frames); + int (*set_coalesce_usecs)(struct hnae3_handle *handle, u32 timeout); + int (*set_coalesce_frames)(struct hnae3_handle *handle, + u32 coalesce_frames); + void (*get_coalesce_range)(struct hnae3_handle *handle, + u32 *tx_frames_low, u32 *rx_frames_low, + u32 *tx_frames_high, u32 *rx_frames_high, + u32 *tx_usecs_low, u32 *rx_usecs_low, + u32 *tx_usecs_high, u32 *rx_usecs_high); + + void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p); + int (*set_mac_addr)(struct hnae3_handle *handle, void *p); + int (*add_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_uc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*set_mc_addr)(struct hnae3_handle *handle, void *addr); + int (*add_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + int (*rm_mc_addr)(struct hnae3_handle *handle, + const unsigned char *addr); + + void (*set_tso_stats)(struct hnae3_handle *handle, int enable); + void (*update_stats)(struct hnae3_handle *handle, + struct net_device_stats *net_stats); + void (*get_stats)(struct hnae3_handle *handle, u64 *data); + + void (*get_strings)(struct hnae3_handle *handle, + u32 stringset, u8 *data); + int (*get_sset_count)(struct hnae3_handle *handle, int stringset); + + void (*get_regs)(struct hnae3_handle *handle, void *data); + int (*get_regs_len)(struct hnae3_handle *handle); + + u32 (*get_rss_key_size)(struct hnae3_handle *handle); + u32 (*get_rss_indir_size)(struct hnae3_handle *handle); + int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key, + u8 *hfunc); + int (*set_rss)(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc); + + int (*get_tc_size)(struct hnae3_handle *handle); + + int (*get_vector)(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info); + int (*map_ring_to_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + int (*unmap_ring_from_vector)(struct hnae3_handle *handle, + int vector_num, + struct hnae3_ring_chain_node *vr_chain); + + int (*add_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); + int (*del_tunnel_udp)(struct hnae3_handle *handle, u16 port_num); + + void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id); + u32 (*get_fw_version)(struct hnae3_handle *handle); + void (*get_mdix_mode)(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix); + + int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill); + int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto); +}; + +struct hnae3_ae_algo { + const struct hnae3_ae_ops *ops; + struct list_head node; + char name[HNAE3_CLASS_NAME_SIZE]; + const struct pci_device_id *pdev_id_table; +}; + +#define HNAE3_INT_NAME_LEN (IFNAMSIZ + 16) +#define HNAE3_ITR_COUNTDOWN_START 100 + +struct hnae3_tc_info { + u16 tqp_offset; /* TQP offset from base TQP */ + u16 tqp_count; /* Total TQPs */ + u8 up; /* user priority */ + u8 tc; /* TC index */ + bool enable; /* If this TC is enable or not */ +}; + +#define HNAE3_MAX_TC 8 +struct hnae3_knic_private_info { + struct net_device *netdev; /* Set by KNIC client when init instance */ + u16 rss_size; /* Allocated RSS queues */ + u16 rx_buf_len; + u16 num_desc; + + u8 num_tc; /* Total number of enabled TCs */ + struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ + + u16 num_tqps; /* total number of TQPs in this handle */ + struct hnae3_queue **tqp; /* array base of all TQPs in this instance */ +}; + +struct hnae3_roce_private_info { + struct net_device *netdev; + void __iomem *roce_io_base; + int base_vector; + int num_vectors; +}; + +struct hnae3_unic_private_info { + struct net_device *netdev; + u16 rx_buf_len; + u16 num_desc; + u16 num_tqps; /* total number of tqps in this handle */ + struct hnae3_queue **tqp; /* array base of all TQPs of this instance */ +}; + +#define HNAE3_SUPPORT_MAC_LOOPBACK 1 +#define HNAE3_SUPPORT_PHY_LOOPBACK 2 +#define HNAE3_SUPPORT_SERDES_LOOPBACK 4 + +struct hnae3_handle { + struct hnae3_client *client; + struct pci_dev *pdev; + void *priv; + struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ + u64 flags; /* Indicate the capabilities for this handle*/ + + union { + struct net_device *netdev; /* first member */ + struct hnae3_knic_private_info kinfo; + struct hnae3_unic_private_info uinfo; + struct hnae3_roce_private_info rinfo; + }; + + u32 numa_node_mask; /* for multi-chip support */ +}; + +#define hnae_set_field(origin, mask, shift, val) \ + do { \ + (origin) &= (~(mask)); \ + (origin) |= ((val) << (shift)) & (mask); \ + } while (0) +#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift)) + +#define hnae_set_bit(origin, shift, val) \ + hnae_set_field((origin), (0x1 << (shift)), (shift), (val)) +#define hnae_get_bit(origin, shift) \ + hnae_get_field((origin), (0x1 << (shift)), (shift)) + +int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev); +void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev); + +void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo); +int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo); + +void hnae3_unregister_client(struct hnae3_client *client); +int hnae3_register_client(struct hnae3_client *client); +#endif -- cgit v1.2.3-55-g7522 From 68c0a5c70614ce0adf8c6ff849534dd6d2c0ca43 Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:47 +0100 Subject: net: hns3: Add HNS3 IMP(Integrated Mgmt Proc) Cmd Interface Support This patch adds the support of IMP (Integrated Management Processor) command interface to the HNS3 driver. Each PF/VF has support of CQP(Command Queue Pair) ring interface. Each CQP consis of send queue CSQ and receive queue CRQ. There are various commands a PF/VF may support, like for Flow Table manipulation, Device management, Packet buffer allocation, Forwarding, VLANs config, Tunneling/Overlays etc. This patch contains code to initialize the command queue, manage the command queue descriptors and Rx/Tx protocol with the command processor in the form of various commands/results and acknowledgements. Signed-off-by: Daode Huang Signed-off-by: lipeng Signed-off-by: Salil Mehta Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 356 ++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 740 +++++++++++++++++++++ 2 files changed, 1096 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c new file mode 100644 index 000000000000..bc869842728f --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include "hclge_cmd.h" +#include "hnae3.h" +#include "hclge_main.h" + +#define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ) +#define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \ + DMA_TO_DEVICE : DMA_FROM_DEVICE) +#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev) + +static int hclge_ring_space(struct hclge_cmq_ring *ring) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + int used = (ntu - ntc + ring->desc_num) % ring->desc_num; + + return ring->desc_num - used - 1; +} + +static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring) +{ + int size = ring->desc_num * sizeof(struct hclge_desc); + + ring->desc = kzalloc(size, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc, + size, DMA_BIDIRECTIONAL); + if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) { + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; + return -ENOMEM; + } + + return 0; +} + +static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring) +{ + dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr, + ring->desc_num * sizeof(ring->desc[0]), + DMA_BIDIRECTIONAL); + + ring->desc_dma_addr = 0; + kfree(ring->desc); + ring->desc = NULL; +} + +static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type) +{ + struct hclge_hw *hw = &hdev->hw; + struct hclge_cmq_ring *ring = + (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; + int ret; + + ring->flag = ring_type; + ring->dev = hdev; + + ret = hclge_alloc_cmd_desc(ring); + if (ret) { + dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n", + (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret); + return ret; + } + + ring->next_to_clean = 0; + ring->next_to_use = 0; + + return 0; +} + +void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read) +{ + memset((void *)desc, 0, sizeof(struct hclge_desc)); + desc->opcode = cpu_to_le16(opcode); + desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN); + + if (is_read) + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR); + else + desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); +} + +static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring) +{ + dma_addr_t dma = ring->desc_dma_addr; + struct hclge_dev *hdev = ring->dev; + struct hclge_hw *hw = &hdev->hw; + + if (ring->flag == HCLGE_TYPE_CSQ) { + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, + (u32)dma); + hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, + (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | + HCLGE_NIC_CMQ_ENABLE); + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0); + } else { + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, + (u32)dma); + hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, + (u32)((dma >> 31) >> 1)); + hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, + (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) | + HCLGE_NIC_CMQ_ENABLE); + hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0); + hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0); + } +} + +static void hclge_cmd_init_regs(struct hclge_hw *hw) +{ + hclge_cmd_config_regs(&hw->cmq.csq); + hclge_cmd_config_regs(&hw->cmq.crq); +} + +static int hclge_cmd_csq_clean(struct hclge_hw *hw) +{ + struct hclge_cmq_ring *csq = &hw->cmq.csq; + u16 ntc = csq->next_to_clean; + struct hclge_desc *desc; + int clean = 0; + u32 head; + + desc = &csq->desc[ntc]; + head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + + while (head != ntc) { + memset(desc, 0, sizeof(*desc)); + ntc++; + if (ntc == csq->desc_num) + ntc = 0; + desc = &csq->desc[ntc]; + clean++; + } + csq->next_to_clean = ntc; + + return clean; +} + +static int hclge_cmd_csq_done(struct hclge_hw *hw) +{ + u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG); + return head == hw->cmq.csq.next_to_use; +} + +static bool hclge_is_special_opcode(u16 opcode) +{ + u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032}; + int i; + + for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { + if (spec_opcode[i] == opcode) + return true; + } + + return false; +} + +/** + * hclge_cmd_send - send command to command queue + * @hw: pointer to the hw struct + * @desc: prefilled descriptor for describing the command + * @num : the number of descriptors to be sent + * + * This is the main send command for command queue, it + * sends the queue, cleans the queue, etc + **/ +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) +{ + struct hclge_dev *hdev = (struct hclge_dev *)hw->back; + struct hclge_desc *desc_to_use; + bool complete = false; + u32 timeout = 0; + int handle = 0; + int retval = 0; + u16 opcode, desc_ret; + int ntc; + + spin_lock_bh(&hw->cmq.csq.lock); + + if (num > hclge_ring_space(&hw->cmq.csq)) { + spin_unlock_bh(&hw->cmq.csq.lock); + return -EBUSY; + } + + /** + * Record the location of desc in the ring for this time + * which will be use for hardware to write back + */ + ntc = hw->cmq.csq.next_to_use; + opcode = desc[0].opcode; + while (handle < num) { + desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; + *desc_to_use = desc[handle]; + (hw->cmq.csq.next_to_use)++; + if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num) + hw->cmq.csq.next_to_use = 0; + handle++; + } + + /* Write to hardware */ + hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use); + + /** + * If the command is sync, wait for the firmware to write back, + * if multi descriptors to be sent, use the first one to check + */ + if (HCLGE_SEND_SYNC(desc->flag)) { + do { + if (hclge_cmd_csq_done(hw)) + break; + udelay(1); + timeout++; + } while (timeout < hw->cmq.tx_timeout); + } + + if (hclge_cmd_csq_done(hw)) { + complete = true; + handle = 0; + while (handle < num) { + /* Get the result of hardware write back */ + desc_to_use = &hw->cmq.csq.desc[ntc]; + desc[handle] = *desc_to_use; + pr_debug("Get cmd desc:\n"); + + if (likely(!hclge_is_special_opcode(opcode))) + desc_ret = desc[handle].retval; + else + desc_ret = desc[0].retval; + + if ((enum hclge_cmd_return_status)desc_ret == + HCLGE_CMD_EXEC_SUCCESS) + retval = 0; + else + retval = -EIO; + hw->cmq.last_status = (enum hclge_cmd_status)desc_ret; + ntc++; + handle++; + if (ntc == hw->cmq.csq.desc_num) + ntc = 0; + } + } + + if (!complete) + retval = -EAGAIN; + + /* Clean the command send queue */ + handle = hclge_cmd_csq_clean(hw); + if (handle != num) { + dev_warn(&hdev->pdev->dev, + "cleaned %d, need to clean %d\n", handle, num); + } + + spin_unlock_bh(&hw->cmq.csq.lock); + + return retval; +} + +enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw, + u32 *version) +{ + struct hclge_query_version *resp; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1); + resp = (struct hclge_query_version *)desc.data; + + ret = hclge_cmd_send(hw, &desc, 1); + if (!ret) + *version = le32_to_cpu(resp->firmware); + + return ret; +} + +int hclge_cmd_init(struct hclge_dev *hdev) +{ + u32 version; + int ret; + + /* Setup the queue entries for use cmd queue */ + hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; + hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM; + + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + + /* Setup Tx write back timeout */ + hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT; + + /* Setup queue rings */ + ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ); + if (ret) { + dev_err(&hdev->pdev->dev, + "CSQ ring setup error %d\n", ret); + return ret; + } + + ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ); + if (ret) { + dev_err(&hdev->pdev->dev, + "CRQ ring setup error %d\n", ret); + goto err_csq; + } + + hclge_cmd_init_regs(&hdev->hw); + + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); + if (ret) { + dev_err(&hdev->pdev->dev, + "firmware version query failed %d\n", ret); + return ret; + } + hdev->fw_version = version; + + dev_info(&hdev->pdev->dev, "The firware version is %08x\n", version); + + return 0; +err_csq: + hclge_free_cmd_desc(&hdev->hw.cmq.csq); + return ret; +} + +static void hclge_destroy_queue(struct hclge_cmq_ring *ring) +{ + spin_lock_bh(&ring->lock); + hclge_free_cmd_desc(ring); + spin_unlock_bh(&ring->lock); +} + +void hclge_destroy_cmd_queue(struct hclge_hw *hw) +{ + hclge_destroy_queue(&hw->cmq.csq); + hclge_destroy_queue(&hw->cmq.crq); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h new file mode 100644 index 000000000000..91ae0135ee50 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -0,0 +1,740 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_CMD_H +#define __HCLGE_CMD_H +#include +#include + +#define HCLGE_CMDQ_TX_TIMEOUT 1000 + +struct hclge_dev; +struct hclge_desc { + __le16 opcode; + +#define HCLGE_CMDQ_RX_INVLD_B 0 +#define HCLGE_CMDQ_RX_OUTVLD_B 1 + + __le16 flag; + __le16 retval; + __le16 rsv; + __le32 data[6]; +}; + +struct hclge_desc_cb { + dma_addr_t dma; + void *va; + u32 length; +}; + +struct hclge_cmq_ring { + dma_addr_t desc_dma_addr; + struct hclge_desc *desc; + struct hclge_desc_cb *desc_cb; + struct hclge_dev *dev; + u32 head; + u32 tail; + + u16 buf_size; + u16 desc_num; + int next_to_use; + int next_to_clean; + u8 flag; + spinlock_t lock; /* Command queue lock */ +}; + +enum hclge_cmd_return_status { + HCLGE_CMD_EXEC_SUCCESS = 0, + HCLGE_CMD_NO_AUTH = 1, + HCLGE_CMD_NOT_EXEC = 2, + HCLGE_CMD_QUEUE_FULL = 3, +}; + +enum hclge_cmd_status { + HCLGE_STATUS_SUCCESS = 0, + HCLGE_ERR_CSQ_FULL = -1, + HCLGE_ERR_CSQ_TIMEOUT = -2, + HCLGE_ERR_CSQ_ERROR = -3, +}; + +struct hclge_cmq { + struct hclge_cmq_ring csq; + struct hclge_cmq_ring crq; + u16 tx_timeout; /* Tx timeout */ + enum hclge_cmd_status last_status; +}; + +#define HCLGE_CMD_FLAG_IN_VALID_SHIFT 0 +#define HCLGE_CMD_FLAG_OUT_VALID_SHIFT 1 +#define HCLGE_CMD_FLAG_NEXT_SHIFT 2 +#define HCLGE_CMD_FLAG_WR_OR_RD_SHIFT 3 +#define HCLGE_CMD_FLAG_NO_INTR_SHIFT 4 +#define HCLGE_CMD_FLAG_ERR_INTR_SHIFT 5 + +#define HCLGE_CMD_FLAG_IN BIT(HCLGE_CMD_FLAG_IN_VALID_SHIFT) +#define HCLGE_CMD_FLAG_OUT BIT(HCLGE_CMD_FLAG_OUT_VALID_SHIFT) +#define HCLGE_CMD_FLAG_NEXT BIT(HCLGE_CMD_FLAG_NEXT_SHIFT) +#define HCLGE_CMD_FLAG_WR BIT(HCLGE_CMD_FLAG_WR_OR_RD_SHIFT) +#define HCLGE_CMD_FLAG_NO_INTR BIT(HCLGE_CMD_FLAG_NO_INTR_SHIFT) +#define HCLGE_CMD_FLAG_ERR_INTR BIT(HCLGE_CMD_FLAG_ERR_INTR_SHIFT) + +enum hclge_opcode_type { + /* Generic command */ + HCLGE_OPC_QUERY_FW_VER = 0x0001, + HCLGE_OPC_CFG_RST_TRIGGER = 0x0020, + HCLGE_OPC_GBL_RST_STATUS = 0x0021, + HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022, + HCLGE_OPC_QUERY_PF_RSRC = 0x0023, + HCLGE_OPC_QUERY_VF_RSRC = 0x0024, + HCLGE_OPC_GET_CFG_PARAM = 0x0025, + + HCLGE_OPC_STATS_64_BIT = 0x0030, + HCLGE_OPC_STATS_32_BIT = 0x0031, + HCLGE_OPC_STATS_MAC = 0x0032, + /* Device management command */ + + /* MAC commond */ + HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, + HCLGE_OPC_CONFIG_AN_MODE = 0x0304, + HCLGE_OPC_QUERY_AN_RESULT = 0x0306, + HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, + HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, + HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + /* MACSEC command */ + + /* PFC/Pause CMD*/ + HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701, + HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702, + HCLGE_OPC_CFG_MAC_PARA = 0x0703, + HCLGE_OPC_CFG_PFC_PARA = 0x0704, + HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705, + HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706, + HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707, + HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708, + HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709, + HCLGE_OPC_QOS_MAP = 0x070A, + + /* ETS/scheduler commands */ + HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804, + HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805, + HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806, + HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807, + HCLGE_OPC_TM_PORT_WEIGHT = 0x0808, + HCLGE_OPC_TM_PG_WEIGHT = 0x0809, + HCLGE_OPC_TM_QS_WEIGHT = 0x080A, + HCLGE_OPC_TM_PRI_WEIGHT = 0x080B, + HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C, + HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D, + HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E, + HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F, + HCLGE_OPC_TM_PORT_SHAPPING = 0x0810, + HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812, + HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, + HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, + HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + + /* Packet buffer allocate command */ + HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, + HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902, + HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903, + HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904, + HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905, + HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906, + + /* PTP command */ + /* TQP management command */ + HCLGE_OPC_SET_TQP_MAP = 0x0A01, + + /* TQP command */ + HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, + HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, + HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, + HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, + HCLGE_OPC_QUERY_RX_STATUS = 0x0B13, + HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16, + HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17, + HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22, + + /* TSO cmd */ + HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + + /* RSS cmd */ + HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, + HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07, + HCLGE_OPC_RSS_TC_MODE = 0x0D08, + HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02, + + /* Promisuous mode command */ + HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01, + + /* Interrupts cmd */ + HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503, + HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504, + + /* MAC command */ + HCLGE_OPC_MAC_VLAN_ADD = 0x1000, + HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001, + HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002, + HCLGE_OPC_MAC_VLAN_INSERT = 0x1003, + HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010, + HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011, + + /* Multicast linear table cmd */ + HCLGE_OPC_MTA_MAC_MODE_CFG = 0x1020, + HCLGE_OPC_MTA_MAC_FUNC_CFG = 0x1021, + HCLGE_OPC_MTA_TBL_ITEM_CFG = 0x1022, + HCLGE_OPC_MTA_TBL_ITEM_QUERY = 0x1023, + + /* VLAN command */ + HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100, + HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101, + HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102, + + /* MDIO command */ + HCLGE_OPC_MDIO_CONFIG = 0x1900, + + /* QCN command */ + HCLGE_OPC_QCN_MOD_CFG = 0x1A01, + HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02, + HCLGE_OPC_QCN_SHAPPING_IR_CFG = 0x1A03, + HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04, + HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05, + HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06, + HCLGE_OPC_QCN_AJUST_INIT = 0x1A07, + HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08, + + /* Mailbox cmd */ + HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000, +}; + +#define HCLGE_TQP_REG_OFFSET 0x80000 +#define HCLGE_TQP_REG_SIZE 0x200 + +#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10 +#define HCLGE_RCB_INIT_FLAG_EN_B 0 +#define HCLGE_RCB_INIT_FLAG_FINI_B 8 +struct hclge_config_rcb_init { + __le16 rcb_init_flag; + u8 rsv[22]; +}; + +struct hclge_tqp_map { + __le16 tqp_id; /* Absolute tqp id for in this pf */ + u8 tqp_vf; /* VF id */ +#define HCLGE_TQP_MAP_TYPE_PF 0 +#define HCLGE_TQP_MAP_TYPE_VF 1 +#define HCLGE_TQP_MAP_TYPE_B 0 +#define HCLGE_TQP_MAP_EN_B 1 + u8 tqp_flag; /* Indicate it's pf or vf tqp */ + __le16 tqp_vid; /* Virtual id in this pf/vf */ + u8 rsv[18]; +}; + +#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11 + +enum hclge_int_type { + HCLGE_INT_TX, + HCLGE_INT_RX, + HCLGE_INT_EVENT, +}; + +struct hclge_ctrl_vector_chain { + u8 int_vector_id; + u8 int_cause_num; +#define HCLGE_INT_TYPE_S 0 +#define HCLGE_INT_TYPE_M 0x3 +#define HCLGE_TQP_ID_S 2 +#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S) + __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; +}; + +#define HCLGE_TC_NUM 8 +#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */ +#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */ +struct hclge_tx_buff_alloc { + __le16 tx_pkt_buff[HCLGE_TC_NUM]; + u8 tx_buff_rsv[8]; +}; + +struct hclge_rx_priv_buff { + __le16 buf_num[HCLGE_TC_NUM]; + u8 rsv[8]; +}; + +struct hclge_query_version { + __le32 firmware; + __le32 firmware_rsv[5]; +}; + +#define HCLGE_RX_PRIV_EN_B 15 +#define HCLGE_TC_NUM_ONE_DESC 4 +struct hclge_priv_wl { + __le16 high; + __le16 low; +}; + +struct hclge_rx_priv_wl_buf { + struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_thrd { + struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC]; +}; + +struct hclge_rx_com_wl { + struct hclge_priv_wl com_wl; +}; + +struct hclge_waterline { + u32 low; + u32 high; +}; + +struct hclge_tc_thrd { + u32 low; + u32 high; +}; + +struct hclge_priv_buf { + struct hclge_waterline wl; /* Waterline for low and high*/ + u32 buf_size; /* TC private buffer size */ + u32 enable; /* Enable TC private buffer or not */ +}; + +#define HCLGE_MAX_TC_NUM 8 +struct hclge_shared_buf { + struct hclge_waterline self; + struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM]; + u32 buf_size; +}; + +#define HCLGE_RX_COM_WL_EN_B 15 +struct hclge_rx_com_wl_buf { + __le16 high_wl; + __le16 low_wl; + u8 rsv[20]; +}; + +#define HCLGE_RX_PKT_EN_B 15 +struct hclge_rx_pkt_buf { + __le16 high_pkt; + __le16 low_pkt; + u8 rsv[20]; +}; + +#define HCLGE_PF_STATE_DONE_B 0 +#define HCLGE_PF_STATE_MAIN_B 1 +#define HCLGE_PF_STATE_BOND_B 2 +#define HCLGE_PF_STATE_MAC_N_B 6 +#define HCLGE_PF_MAC_NUM_MASK 0x3 +#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B) +#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B) +struct hclge_func_status { + __le32 vf_rst_state[4]; + u8 pf_state; + u8 mac_id; + u8 rsv1; + u8 pf_cnt_in_mac; + u8 pf_num; + u8 vf_num; + u8 rsv[2]; +}; + +struct hclge_pf_res { + __le16 tqp_num; + __le16 buf_size; + __le16 msixcap_localid_ba_nic; + __le16 msixcap_localid_ba_rocee; +#define HCLGE_PF_VEC_NUM_S 0 +#define HCLGE_PF_VEC_NUM_M (0xff << HCLGE_PF_VEC_NUM_S) + __le16 pf_intr_vector_number; + __le16 pf_own_fun_number; + __le32 rsv[3]; +}; + +#define HCLGE_CFG_OFFSET_S 0 +#define HCLGE_CFG_OFFSET_M 0xfffff /* Byte (8-10.3) */ +#define HCLGE_CFG_RD_LEN_S 24 +#define HCLGE_CFG_RD_LEN_M (0xf << HCLGE_CFG_RD_LEN_S) +#define HCLGE_CFG_RD_LEN_BYTES 16 +#define HCLGE_CFG_RD_LEN_UNIT 4 + +#define HCLGE_CFG_VMDQ_S 0 +#define HCLGE_CFG_VMDQ_M (0xff << HCLGE_CFG_VMDQ_S) +#define HCLGE_CFG_TC_NUM_S 8 +#define HCLGE_CFG_TC_NUM_M (0xff << HCLGE_CFG_TC_NUM_S) +#define HCLGE_CFG_TQP_DESC_N_S 16 +#define HCLGE_CFG_TQP_DESC_N_M (0xffff << HCLGE_CFG_TQP_DESC_N_S) +#define HCLGE_CFG_PHY_ADDR_S 0 +#define HCLGE_CFG_PHY_ADDR_M (0x1f << HCLGE_CFG_PHY_ADDR_S) +#define HCLGE_CFG_MEDIA_TP_S 8 +#define HCLGE_CFG_MEDIA_TP_M (0xff << HCLGE_CFG_MEDIA_TP_S) +#define HCLGE_CFG_RX_BUF_LEN_S 16 +#define HCLGE_CFG_RX_BUF_LEN_M (0xffff << HCLGE_CFG_RX_BUF_LEN_S) +#define HCLGE_CFG_MAC_ADDR_H_S 0 +#define HCLGE_CFG_MAC_ADDR_H_M (0xffff << HCLGE_CFG_MAC_ADDR_H_S) +#define HCLGE_CFG_DEFAULT_SPEED_S 16 +#define HCLGE_CFG_DEFAULT_SPEED_M (0xff << HCLGE_CFG_DEFAULT_SPEED_S) + +struct hclge_cfg_param { + __le32 offset; + __le32 rsv; + __le32 param[4]; +}; + +#define HCLGE_MAC_MODE 0x0 +#define HCLGE_DESC_NUM 0x40 + +#define HCLGE_ALLOC_VALID_B 0 +struct hclge_vf_num { + u8 alloc_valid; + u8 rsv[23]; +}; + +#define HCLGE_RSS_DEFAULT_OUTPORT_B 4 +#define HCLGE_RSS_HASH_KEY_OFFSET_B 4 +#define HCLGE_RSS_HASH_KEY_NUM 16 +struct hclge_rss_config { + u8 hash_config; + u8 rsv[7]; + u8 hash_key[HCLGE_RSS_HASH_KEY_NUM]; +}; + +struct hclge_rss_input_tuple { + u8 ipv4_tcp_en; + u8 ipv4_udp_en; + u8 ipv4_sctp_en; + u8 ipv4_fragment_en; + u8 ipv6_tcp_en; + u8 ipv6_udp_en; + u8 ipv6_sctp_en; + u8 ipv6_fragment_en; + u8 rsv[16]; +}; + +#define HCLGE_RSS_CFG_TBL_SIZE 16 + +struct hclge_rss_indirection_table { + u16 start_table_index; + u16 rss_set_bitmap; + u8 rsv[4]; + u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE]; +}; + +#define HCLGE_RSS_TC_OFFSET_S 0 +#define HCLGE_RSS_TC_OFFSET_M (0x3ff << HCLGE_RSS_TC_OFFSET_S) +#define HCLGE_RSS_TC_SIZE_S 12 +#define HCLGE_RSS_TC_SIZE_M (0x7 << HCLGE_RSS_TC_SIZE_S) +#define HCLGE_RSS_TC_VALID_B 15 +struct hclge_rss_tc_mode { + u16 rss_tc_mode[HCLGE_MAX_TC_NUM]; + u8 rsv[8]; +}; + +#define HCLGE_LINK_STS_B 0 +#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B) +struct hclge_link_status { + u8 status; + u8 rsv[23]; +}; + +struct hclge_promisc_param { + u8 vf_id; + u8 enable; +}; + +#define HCLGE_PROMISC_EN_B 1 +#define HCLGE_PROMISC_EN_ALL 0x7 +#define HCLGE_PROMISC_EN_UC 0x1 +#define HCLGE_PROMISC_EN_MC 0x2 +#define HCLGE_PROMISC_EN_BC 0x4 +struct hclge_promisc_cfg { + u8 flag; + u8 vf_id; + __le16 rsv0; + u8 rsv1[20]; +}; + +enum hclge_promisc_type { + HCLGE_UNICAST = 1, + HCLGE_MULTICAST = 2, + HCLGE_BROADCAST = 3, +}; + +#define HCLGE_MAC_TX_EN_B 6 +#define HCLGE_MAC_RX_EN_B 7 +#define HCLGE_MAC_PAD_TX_B 11 +#define HCLGE_MAC_PAD_RX_B 12 +#define HCLGE_MAC_1588_TX_B 13 +#define HCLGE_MAC_1588_RX_B 14 +#define HCLGE_MAC_APP_LP_B 15 +#define HCLGE_MAC_LINE_LP_B 16 +#define HCLGE_MAC_FCS_TX_B 17 +#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18 +#define HCLGE_MAC_RX_FCS_STRIP_B 19 +#define HCLGE_MAC_RX_FCS_B 20 +#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21 +#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22 + +struct hclge_config_mac_mode { + __le32 txrx_pad_fcs_loop_en; + u8 rsv[20]; +}; + +#define HCLGE_CFG_SPEED_S 0 +#define HCLGE_CFG_SPEED_M (0x3f << HCLGE_CFG_SPEED_S) + +#define HCLGE_CFG_DUPLEX_B 7 +#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B) + +struct hclge_config_mac_speed_dup { + u8 speed_dup; + +#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0 + u8 mac_change_fec_en; + u8 rsv[22]; +}; + +#define HCLGE_QUERY_SPEED_S 3 +#define HCLGE_QUERY_AN_B 0 +#define HCLGE_QUERY_DUPLEX_B 2 + +#define HCLGE_QUERY_SPEED_M (0x1f << HCLGE_QUERY_SPEED_S) +#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) +#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) + +struct hclge_query_an_speed_dup { + u8 an_syn_dup_speed; + u8 pause; + u8 rsv[23]; +}; + +#define HCLGE_RING_ID_MASK 0x3ff +#define HCLGE_TQP_ENABLE_B 0 + +#define HCLGE_MAC_CFG_AN_EN_B 0 +#define HCLGE_MAC_CFG_AN_INT_EN_B 1 +#define HCLGE_MAC_CFG_AN_INT_MSK_B 2 +#define HCLGE_MAC_CFG_AN_INT_CLR_B 3 +#define HCLGE_MAC_CFG_AN_RST_B 4 + +#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B) + +struct hclge_config_auto_neg { + __le32 cfg_an_cmd_flag; + u8 rsv[20]; +}; + +#define HCLGE_MAC_MIN_MTU 64 +#define HCLGE_MAC_MAX_MTU 9728 +#define HCLGE_MAC_UPLINK_PORT 0x100 + +struct hclge_config_max_frm_size { + __le16 max_frm_size; + u8 rsv[22]; +}; + +enum hclge_mac_vlan_tbl_opcode { + HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */ + HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */ + HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */ + HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */ +}; + +#define HCLGE_MAC_VLAN_BIT0_EN_B 0x0 +#define HCLGE_MAC_VLAN_BIT1_EN_B 0x1 +#define HCLGE_MAC_EPORT_SW_EN_B 0xc +#define HCLGE_MAC_EPORT_TYPE_B 0xb +#define HCLGE_MAC_EPORT_VFID_S 0x3 +#define HCLGE_MAC_EPORT_VFID_M (0xff << HCLGE_MAC_EPORT_VFID_S) +#define HCLGE_MAC_EPORT_PFID_S 0x0 +#define HCLGE_MAC_EPORT_PFID_M (0x7 << HCLGE_MAC_EPORT_PFID_S) +struct hclge_mac_vlan_tbl_entry { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + __le32 mac_addr_hi32; + __le16 mac_addr_lo16; + __le16 rsv1; + u8 entry_type; + u8 mc_mac_en; + __le16 egress_port; + __le16 egress_queue; + u8 rsv2[6]; +}; + +#define HCLGE_CFG_MTA_MAC_SEL_S 0x0 +#define HCLGE_CFG_MTA_MAC_SEL_M (0x3 << HCLGE_CFG_MTA_MAC_SEL_S) +#define HCLGE_CFG_MTA_MAC_EN_B 0x7 +struct hclge_mta_filter_mode { + u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */ + u8 rsv[23]; +}; + +#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0 +struct hclge_cfg_func_mta_filter { + u8 accept; /* Only used lowest 1 bit */ + u8 function_id; + u8 rsv[22]; +}; + +#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0 +#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0 +#define HCLGE_CFG_MTA_ITEM_IDX_M (0xfff << HCLGE_CFG_MTA_ITEM_IDX_S) +struct hclge_cfg_func_mta_item { + u16 item_idx; /* Only used lowest 12 bit */ + u8 accept; /* Only used lowest 1 bit */ + u8 rsv[21]; +}; + +struct hclge_mac_vlan_add { + __le16 flags; + __le16 mac_addr_hi16; + __le32 mac_addr_lo32; + __le32 mac_addr_msk_hi32; + __le16 mac_addr_msk_lo16; + __le16 vlan_tag; + __le16 ingress_port; + __le16 egress_port; + u8 rsv[4]; +}; + +#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0 +struct hclge_mac_vlan_remove { + __le16 flags; + __le16 mac_addr_hi16; + __le32 mac_addr_lo32; + __le32 mac_addr_msk_hi32; + __le16 mac_addr_msk_lo16; + __le16 vlan_tag; + __le16 ingress_port; + __le16 egress_port; + u8 rsv[4]; +}; + +struct hclge_vlan_filter_ctrl { + u8 vlan_type; + u8 vlan_fe; + u8 rsv[22]; +}; + +struct hclge_vlan_filter_pf_cfg { + u8 vlan_offset; + u8 vlan_cfg; + u8 rsv[2]; + u8 vlan_offset_bitmap[20]; +}; + +struct hclge_vlan_filter_vf_cfg { + u16 vlan_id; + u8 resp_code; + u8 rsv; + u8 vlan_cfg; + u8 rsv1[3]; + u8 vf_bitmap[16]; +}; + +struct hclge_cfg_com_tqp_queue { + __le16 tqp_id; + __le16 stream_id; + u8 enable; + u8 rsv[19]; +}; + +struct hclge_cfg_tx_queue_pointer { + __le16 tqp_id; + __le16 tx_tail; + __le16 tx_head; + __le16 fbd_num; + __le16 ring_offset; + u8 rsv[14]; +}; + +#define HCLGE_TSO_MSS_MIN_S 0 +#define HCLGE_TSO_MSS_MIN_M (0x3FFF << HCLGE_TSO_MSS_MIN_S) + +#define HCLGE_TSO_MSS_MAX_S 16 +#define HCLGE_TSO_MSS_MAX_M (0x3FFF << HCLGE_TSO_MSS_MAX_S) + +struct hclge_cfg_tso_status { + __le16 tso_mss_min; + __le16 tso_mss_max; + u8 rsv[20]; +}; + +#define HCLGE_TSO_MSS_MIN 256 +#define HCLGE_TSO_MSS_MAX 9668 + +#define HCLGE_TQP_RESET_B 0 +struct hclge_reset_tqp_queue { + __le16 tqp_id; + u8 reset_req; + u8 ready_to_reset; + u8 rsv[20]; +}; + +#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ +#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ +#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ + +#define HCLGE_TYPE_CRQ 0 +#define HCLGE_TYPE_CSQ 1 +#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c +#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 +#define HCLGE_NIC_CMQ_EN_B 16 +#define HCLGE_NIC_CMQ_ENABLE BIT(HCLGE_NIC_CMQ_EN_B) +#define HCLGE_NIC_CMQ_DESC_NUM 1024 +#define HCLGE_NIC_CMQ_DESC_NUM_S 3 + +int hclge_cmd_init(struct hclge_dev *hdev); +static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) +{ + writel(value, base + reg); +} + +#define hclge_write_dev(a, reg, value) \ + hclge_write_reg((a)->io_base, (reg), (value)) +#define hclge_read_dev(a, reg) \ + hclge_read_reg((a)->io_base, (reg)) + +static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg) +{ + u8 __iomem *reg_addr = READ_ONCE(base); + + return readl(reg_addr + reg); +} + +#define HCLGE_SEND_SYNC(flag) \ + ((flag) & HCLGE_CMD_FLAG_NO_INTR) + +struct hclge_hw; +int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num); +void hclge_cmd_setup_basic_desc(struct hclge_desc *desc, + enum hclge_opcode_type opcode, bool is_read); + +int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param); + +enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw, + struct hclge_desc *desc); +enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw, + struct hclge_desc *desc); + +void hclge_destroy_cmd_queue(struct hclge_hw *hw); +#endif -- cgit v1.2.3-55-g7522 From 46a3df9f9718541cf1f805be03146427264ef330 Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:48 +0100 Subject: net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support This patch adds the support of Hisilicon Network Subsystem Accceleration Engine and common operations to access it. This layer provides access to the hardware configuration, hardware statistics. This layer is also responsible for triggering the initialization of the PHY layer through the below MDIO layer. Signed-off-by: Daode Huang Signed-off-by: lipeng Signed-off-by: Salil Mehta Signed-off-by: Yisen Zhuang Signed-off-by: Wei Hu (Xavier) Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4267 ++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 519 +++ 2 files changed, 4786 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c new file mode 100644 index 000000000000..3611991689bc --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -0,0 +1,4267 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" +#include "hclge_tm.h" +#include "hnae3.h" + +#define HCLGE_NAME "hclge" +#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) +#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) +#define HCLGE_64BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_64_bit_stats, f)) +#define HCLGE_32BIT_STATS_FIELD_OFF(f) (offsetof(struct hclge_32_bit_stats, f)) + +static int hclge_rss_init_hw(struct hclge_dev *hdev); +static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, + enum hclge_mta_dmac_sel_type mta_mac_sel, + bool enable); +static int hclge_init_vlan_config(struct hclge_dev *hdev); + +static struct hnae3_ae_algo ae_algo; + +static const struct pci_device_id ae_algo_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + /* Required last entry */ + {0, } +}; + +static const struct pci_device_id roce_pci_tbl[] = { + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, + {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, + /* Required last entry */ + {0, } +}; + +static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { + "Mac Loopback test", + "Serdes Loopback test", + "Phy Loopback test" +}; + +static const struct hclge_comm_stats_str g_all_64bit_stats_string[] = { + {"igu_rx_oversize_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_oversize_pkt)}, + {"igu_rx_undersize_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_undersize_pkt)}, + {"igu_rx_out_all_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_out_all_pkt)}, + {"igu_rx_uni_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_uni_pkt)}, + {"igu_rx_multi_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_multi_pkt)}, + {"igu_rx_broad_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(igu_rx_broad_pkt)}, + {"egu_tx_out_all_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_out_all_pkt)}, + {"egu_tx_uni_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_uni_pkt)}, + {"egu_tx_multi_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_multi_pkt)}, + {"egu_tx_broad_pkt", + HCLGE_64BIT_STATS_FIELD_OFF(egu_tx_broad_pkt)}, + {"ssu_ppp_mac_key_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_mac_key_num)}, + {"ssu_ppp_host_key_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_ppp_host_key_num)}, + {"ppp_ssu_mac_rlt_num", + HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_mac_rlt_num)}, + {"ppp_ssu_host_rlt_num", + HCLGE_64BIT_STATS_FIELD_OFF(ppp_ssu_host_rlt_num)}, + {"ssu_tx_in_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_in_num)}, + {"ssu_tx_out_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_tx_out_num)}, + {"ssu_rx_in_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_in_num)}, + {"ssu_rx_out_num", + HCLGE_64BIT_STATS_FIELD_OFF(ssu_rx_out_num)} +}; + +static const struct hclge_comm_stats_str g_all_32bit_stats_string[] = { + {"igu_rx_err_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_err_pkt)}, + {"igu_rx_no_eof_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_eof_pkt)}, + {"igu_rx_no_sof_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(igu_rx_no_sof_pkt)}, + {"egu_tx_1588_pkt", + HCLGE_32BIT_STATS_FIELD_OFF(egu_tx_1588_pkt)}, + {"ssu_full_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_full_drop_num)}, + {"ssu_part_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_part_drop_num)}, + {"ppp_key_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ppp_key_drop_num)}, + {"ppp_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ppp_rlt_drop_num)}, + {"ssu_key_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(ssu_key_drop_num)}, + {"pkt_curr_buf_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_cnt)}, + {"qcn_fb_rcv_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_rcv_cnt)}, + {"qcn_fb_drop_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_drop_cnt)}, + {"qcn_fb_invaild_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(qcn_fb_invaild_cnt)}, + {"rx_packet_tc0_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_in_cnt)}, + {"rx_packet_tc1_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_in_cnt)}, + {"rx_packet_tc2_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_in_cnt)}, + {"rx_packet_tc3_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_in_cnt)}, + {"rx_packet_tc4_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_in_cnt)}, + {"rx_packet_tc5_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_in_cnt)}, + {"rx_packet_tc6_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_in_cnt)}, + {"rx_packet_tc7_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_in_cnt)}, + {"rx_packet_tc0_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc0_out_cnt)}, + {"rx_packet_tc1_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc1_out_cnt)}, + {"rx_packet_tc2_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc2_out_cnt)}, + {"rx_packet_tc3_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc3_out_cnt)}, + {"rx_packet_tc4_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc4_out_cnt)}, + {"rx_packet_tc5_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc5_out_cnt)}, + {"rx_packet_tc6_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc6_out_cnt)}, + {"rx_packet_tc7_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_packet_tc7_out_cnt)}, + {"tx_packet_tc0_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_in_cnt)}, + {"tx_packet_tc1_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_in_cnt)}, + {"tx_packet_tc2_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_in_cnt)}, + {"tx_packet_tc3_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_in_cnt)}, + {"tx_packet_tc4_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_in_cnt)}, + {"tx_packet_tc5_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_in_cnt)}, + {"tx_packet_tc6_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_in_cnt)}, + {"tx_packet_tc7_in_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_in_cnt)}, + {"tx_packet_tc0_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc0_out_cnt)}, + {"tx_packet_tc1_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc1_out_cnt)}, + {"tx_packet_tc2_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc2_out_cnt)}, + {"tx_packet_tc3_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc3_out_cnt)}, + {"tx_packet_tc4_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc4_out_cnt)}, + {"tx_packet_tc5_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc5_out_cnt)}, + {"tx_packet_tc6_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc6_out_cnt)}, + {"tx_packet_tc7_out_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_packet_tc7_out_cnt)}, + {"pkt_curr_buf_tc0_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc0_cnt)}, + {"pkt_curr_buf_tc1_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc1_cnt)}, + {"pkt_curr_buf_tc2_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc2_cnt)}, + {"pkt_curr_buf_tc3_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc3_cnt)}, + {"pkt_curr_buf_tc4_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc4_cnt)}, + {"pkt_curr_buf_tc5_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc5_cnt)}, + {"pkt_curr_buf_tc6_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc6_cnt)}, + {"pkt_curr_buf_tc7_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(pkt_curr_buf_tc7_cnt)}, + {"mb_uncopy_num", + HCLGE_32BIT_STATS_FIELD_OFF(mb_uncopy_num)}, + {"lo_pri_unicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_unicast_rlt_drop_num)}, + {"hi_pri_multicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(hi_pri_multicast_rlt_drop_num)}, + {"lo_pri_multicast_rlt_drop_num", + HCLGE_32BIT_STATS_FIELD_OFF(lo_pri_multicast_rlt_drop_num)}, + {"rx_oq_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(rx_oq_drop_pkt_cnt)}, + {"tx_oq_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(tx_oq_drop_pkt_cnt)}, + {"nic_l2_err_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(nic_l2_err_drop_pkt_cnt)}, + {"roc_l2_err_drop_pkt_cnt", + HCLGE_32BIT_STATS_FIELD_OFF(roc_l2_err_drop_pkt_cnt)} +}; + +static const struct hclge_comm_stats_str g_mac_stats_string[] = { + {"mac_tx_mac_pause_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)}, + {"mac_rx_mac_pause_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)}, + {"mac_tx_pfc_pri0_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)}, + {"mac_tx_pfc_pri1_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)}, + {"mac_tx_pfc_pri2_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)}, + {"mac_tx_pfc_pri3_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)}, + {"mac_tx_pfc_pri4_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)}, + {"mac_tx_pfc_pri5_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)}, + {"mac_tx_pfc_pri6_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)}, + {"mac_tx_pfc_pri7_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)}, + {"mac_rx_pfc_pri0_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)}, + {"mac_rx_pfc_pri1_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)}, + {"mac_rx_pfc_pri2_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)}, + {"mac_rx_pfc_pri3_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)}, + {"mac_rx_pfc_pri4_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)}, + {"mac_rx_pfc_pri5_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)}, + {"mac_rx_pfc_pri6_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)}, + {"mac_rx_pfc_pri7_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)}, + {"mac_tx_total_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)}, + {"mac_tx_total_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)}, + {"mac_tx_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)}, + {"mac_tx_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)}, + {"mac_tx_good_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)}, + {"mac_tx_bad_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)}, + {"mac_tx_uni_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)}, + {"mac_tx_multi_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)}, + {"mac_tx_broad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)}, + {"mac_tx_undersize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)}, + {"mac_tx_overrsize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_overrsize_pkt_num)}, + {"mac_tx_64_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)}, + {"mac_tx_65_127_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)}, + {"mac_tx_128_255_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)}, + {"mac_tx_256_511_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)}, + {"mac_tx_512_1023_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)}, + {"mac_tx_1024_1518_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)}, + {"mac_tx_1519_max_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_oct_pkt_num)}, + {"mac_rx_total_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)}, + {"mac_rx_total_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)}, + {"mac_rx_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)}, + {"mac_rx_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)}, + {"mac_rx_good_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)}, + {"mac_rx_bad_oct_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)}, + {"mac_rx_uni_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)}, + {"mac_rx_multi_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)}, + {"mac_rx_broad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)}, + {"mac_rx_undersize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)}, + {"mac_rx_overrsize_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_overrsize_pkt_num)}, + {"mac_rx_64_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)}, + {"mac_rx_65_127_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)}, + {"mac_rx_128_255_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)}, + {"mac_rx_256_511_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)}, + {"mac_rx_512_1023_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)}, + {"mac_rx_1024_1518_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)}, + {"mac_rx_1519_max_oct_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_oct_pkt_num)}, + + {"mac_trans_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_fragment_pkt_num)}, + {"mac_trans_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_undermin_pkt_num)}, + {"mac_trans_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_jabber_pkt_num)}, + {"mac_trans_err_all_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_err_all_pkt_num)}, + {"mac_trans_from_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_good_pkt_num)}, + {"mac_trans_from_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_trans_from_app_bad_pkt_num)}, + {"mac_rcv_fragment_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fragment_pkt_num)}, + {"mac_rcv_undermin_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_undermin_pkt_num)}, + {"mac_rcv_jabber_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_jabber_pkt_num)}, + {"mac_rcv_fcs_err_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_fcs_err_pkt_num)}, + {"mac_rcv_send_app_good_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_good_pkt_num)}, + {"mac_rcv_send_app_bad_pkt_num", + HCLGE_MAC_STATS_FIELD_OFF(mac_rcv_send_app_bad_pkt_num)} +}; + +static int hclge_64_bit_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_64_BIT_CMD_NUM 5 +#define HCLGE_64_BIT_RTN_DATANUM 4 + u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats); + struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM]; + u64 *desc_data; + int i, k, n; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_64_BIT, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_64_BIT_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 64 bit pkt stats fail, status = %d.\n", ret); + return ret; + } + + for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) { + if (unlikely(i == 0)) { + desc_data = (u64 *)(&desc[i].data[0]); + n = HCLGE_64_BIT_RTN_DATANUM - 1; + } else { + desc_data = (u64 *)(&desc[i]); + n = HCLGE_64_BIT_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le64(*desc_data); + desc_data++; + } + } + + return 0; +} + +static void hclge_reset_partial_32bit_counter(struct hclge_32_bit_stats *stats) +{ + stats->pkt_curr_buf_cnt = 0; + stats->pkt_curr_buf_tc0_cnt = 0; + stats->pkt_curr_buf_tc1_cnt = 0; + stats->pkt_curr_buf_tc2_cnt = 0; + stats->pkt_curr_buf_tc3_cnt = 0; + stats->pkt_curr_buf_tc4_cnt = 0; + stats->pkt_curr_buf_tc5_cnt = 0; + stats->pkt_curr_buf_tc6_cnt = 0; + stats->pkt_curr_buf_tc7_cnt = 0; +} + +static int hclge_32_bit_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_32_BIT_CMD_NUM 8 +#define HCLGE_32_BIT_RTN_DATANUM 8 + + struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM]; + struct hclge_32_bit_stats *all_32_bit_stats; + u32 *desc_data; + int i, k, n; + u64 *data; + int ret; + + all_32_bit_stats = &hdev->hw_stats.all_32_bit_stats; + data = (u64 *)(&all_32_bit_stats->egu_tx_1588_pkt); + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_32_BIT, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_32_BIT_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get 32 bit pkt stats fail, status = %d.\n", ret); + + return ret; + } + + hclge_reset_partial_32bit_counter(all_32_bit_stats); + for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) { + if (unlikely(i == 0)) { + all_32_bit_stats->igu_rx_err_pkt += + cpu_to_le32(desc[i].data[0]); + all_32_bit_stats->igu_rx_no_eof_pkt += + cpu_to_le32(desc[i].data[1] & 0xffff); + all_32_bit_stats->igu_rx_no_sof_pkt += + cpu_to_le32((desc[i].data[1] >> 16) & 0xffff); + + desc_data = (u32 *)(&desc[i].data[2]); + n = HCLGE_32_BIT_RTN_DATANUM - 4; + } else { + desc_data = (u32 *)(&desc[i]); + n = HCLGE_32_BIT_RTN_DATANUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le32(*desc_data); + desc_data++; + } + } + + return 0; +} + +static int hclge_mac_update_stats(struct hclge_dev *hdev) +{ +#define HCLGE_MAC_CMD_NUM 17 +#define HCLGE_RTN_DATA_NUM 4 + + u64 *data = (u64 *)(&hdev->hw_stats.mac_stats); + struct hclge_desc desc[HCLGE_MAC_CMD_NUM]; + u64 *desc_data; + int i, k, n; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true); + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get MAC pkt stats fail, status = %d.\n", ret); + + return ret; + } + + for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) { + if (unlikely(i == 0)) { + desc_data = (u64 *)(&desc[i].data[0]); + n = HCLGE_RTN_DATA_NUM - 2; + } else { + desc_data = (u64 *)(&desc[i]); + n = HCLGE_RTN_DATA_NUM; + } + for (k = 0; k < n; k++) { + *data++ += cpu_to_le64(*desc_data); + desc_data++; + } + } + + return 0; +} + +static int hclge_tqps_update_stats(struct hnae3_handle *handle) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hnae3_queue *queue; + struct hclge_desc desc[1]; + struct hclge_tqp *tqp; + int ret, i; + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + /* command : HCLGE_OPC_QUERY_IGU_STAT */ + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_QUERY_RX_STATUS, + true); + + desc[0].data[0] = (tqp->index & 0x1ff); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_rx_ring_pktnum_rcd += + cpu_to_le32(desc[0].data[4]); + } + + for (i = 0; i < kinfo->num_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + /* command : HCLGE_OPC_QUERY_IGU_STAT */ + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_QUERY_TX_STATUS, + true); + + desc[0].data[0] = (tqp->index & 0x1ff); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Query tqp stat fail, status = %d,queue = %d\n", + ret, i); + return ret; + } + tqp->tqp_stats.rcb_tx_ring_pktnum_rcd += + cpu_to_le32(desc[0].data[4]); + } + + return 0; +} + +static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hclge_tqp *tqp; + u64 *buff = data; + int i; + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); + *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd); + } + + for (i = 0; i < kinfo->num_tqps; i++) { + tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); + *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd); + } + + return buff; +} + +static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + return kinfo->num_tqps * (2); +} + +static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + u8 *buff = data; + int i = 0; + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i], + struct hclge_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_tx_pktnum_rcd", + tqp->index); + buff = buff + ETH_GSTRING_LEN; + } + + for (i = 0; i < kinfo->num_tqps; i++) { + struct hclge_tqp *tqp = container_of(kinfo->tqp[i], + struct hclge_tqp, q); + snprintf(buff, ETH_GSTRING_LEN, "rcb_q%d_rx_pktnum_rcd", + tqp->index); + buff = buff + ETH_GSTRING_LEN; + } + + return buff; +} + +static u64 *hclge_comm_get_stats(void *comm_stats, + const struct hclge_comm_stats_str strs[], + int size, u64 *data) +{ + u64 *buf = data; + u32 i; + + for (i = 0; i < size; i++) + buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset); + + return buf + size; +} + +static u8 *hclge_comm_get_strings(u32 stringset, + const struct hclge_comm_stats_str strs[], + int size, u8 *data) +{ + char *buff = (char *)data; + u32 i; + + if (stringset != ETH_SS_STATS) + return buff; + + for (i = 0; i < size; i++) { + snprintf(buff, ETH_GSTRING_LEN, + strs[i].desc); + buff = buff + ETH_GSTRING_LEN; + } + + return (u8 *)buff; +} + +static void hclge_update_netstat(struct hclge_hw_stats *hw_stats, + struct net_device_stats *net_stats) +{ + net_stats->tx_dropped = 0; + net_stats->rx_dropped = hw_stats->all_32_bit_stats.ssu_full_drop_num; + net_stats->rx_dropped += hw_stats->all_32_bit_stats.ppp_key_drop_num; + net_stats->rx_dropped += hw_stats->all_32_bit_stats.ssu_key_drop_num; + + net_stats->rx_errors = hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_errors += hw_stats->mac_stats.mac_rx_undersize_pkt_num; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_err_pkt; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_eof_pkt; + net_stats->rx_errors += hw_stats->all_32_bit_stats.igu_rx_no_sof_pkt; + net_stats->rx_errors += hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + + net_stats->multicast = hw_stats->mac_stats.mac_tx_multi_pkt_num; + net_stats->multicast += hw_stats->mac_stats.mac_rx_multi_pkt_num; + + net_stats->rx_crc_errors = hw_stats->mac_stats.mac_rcv_fcs_err_pkt_num; + net_stats->rx_length_errors = + hw_stats->mac_stats.mac_rx_undersize_pkt_num; + net_stats->rx_length_errors += + hw_stats->mac_stats.mac_rx_overrsize_pkt_num; + net_stats->rx_over_errors = + hw_stats->mac_stats.mac_rx_overrsize_pkt_num; +} + +static void hclge_update_stats_for_all(struct hclge_dev *hdev) +{ + struct hnae3_handle *handle; + int status; + + handle = &hdev->vport[0].nic; + if (handle->client) { + status = hclge_tqps_update_stats(handle); + if (status) { + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + } + } + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", status); + + status = hclge_32_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 32 bit stats fail, status = %d.\n", + status); + + hclge_update_netstat(&hdev->hw_stats, &handle->kinfo.netdev->stats); +} + +static void hclge_update_stats(struct hnae3_handle *handle, + struct net_device_stats *net_stats) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_hw_stats *hw_stats = &hdev->hw_stats; + int status; + + status = hclge_mac_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update MAC stats fail, status = %d.\n", + status); + + status = hclge_32_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 32 bit stats fail, status = %d.\n", + status); + + status = hclge_64_bit_update_stats(hdev); + if (status) + dev_err(&hdev->pdev->dev, + "Update 64 bit stats fail, status = %d.\n", + status); + + status = hclge_tqps_update_stats(handle); + if (status) + dev_err(&hdev->pdev->dev, + "Update TQPS stats fail, status = %d.\n", + status); + + hclge_update_netstat(hw_stats, net_stats); +} + +static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) +{ +#define HCLGE_LOOPBACK_TEST_FLAGS 0x7 + + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int count = 0; + + /* Loopback test support rules: + * mac: only GE mode support + * serdes: all mac mode will support include GE/XGE/LGE/CGE + * phy: only support when phy device exist on board + */ + if (stringset == ETH_SS_TEST) { + /* clear loopback bit flags at first */ + handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS)); + if (hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M || + hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) { + count += 1; + handle->flags |= HNAE3_SUPPORT_MAC_LOOPBACK; + } else { + count = -EOPNOTSUPP; + } + } else if (stringset == ETH_SS_STATS) { + count = ARRAY_SIZE(g_mac_stats_string) + + ARRAY_SIZE(g_all_32bit_stats_string) + + ARRAY_SIZE(g_all_64bit_stats_string) + + hclge_tqps_get_sset_count(handle, stringset); + } + + return count; +} + +static void hclge_get_strings(struct hnae3_handle *handle, + u32 stringset, + u8 *data) +{ + u8 *p = (char *)data; + int size; + + if (stringset == ETH_SS_STATS) { + size = ARRAY_SIZE(g_mac_stats_string); + p = hclge_comm_get_strings(stringset, + g_mac_stats_string, + size, + p); + size = ARRAY_SIZE(g_all_32bit_stats_string); + p = hclge_comm_get_strings(stringset, + g_all_32bit_stats_string, + size, + p); + size = ARRAY_SIZE(g_all_64bit_stats_string); + p = hclge_comm_get_strings(stringset, + g_all_64bit_stats_string, + size, + p); + p = hclge_tqps_get_strings(handle, p); + } else if (stringset == ETH_SS_TEST) { + if (handle->flags & HNAE3_SUPPORT_MAC_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_MAC], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_SERDES_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_SERDES], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) { + memcpy(p, + hns3_nic_test_strs[HNAE3_MAC_INTER_LOOP_PHY], + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + } +} + +static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u64 *p; + + p = hclge_comm_get_stats(&hdev->hw_stats.mac_stats, + g_mac_stats_string, + ARRAY_SIZE(g_mac_stats_string), + data); + p = hclge_comm_get_stats(&hdev->hw_stats.all_32_bit_stats, + g_all_32bit_stats_string, + ARRAY_SIZE(g_all_32bit_stats_string), + p); + p = hclge_comm_get_stats(&hdev->hw_stats.all_64_bit_stats, + g_all_64bit_stats_string, + ARRAY_SIZE(g_all_64bit_stats_string), + p); + p = hclge_tqps_get_stats(handle, p); +} + +static int hclge_parse_func_status(struct hclge_dev *hdev, + struct hclge_func_status *status) +{ + if (!(status->pf_state & HCLGE_PF_STATE_DONE)) + return -EINVAL; + + /* Set the pf to main pf */ + if (status->pf_state & HCLGE_PF_STATE_MAIN) + hdev->flag |= HCLGE_FLAG_MAIN; + else + hdev->flag &= ~HCLGE_FLAG_MAIN; + + hdev->num_req_vfs = status->vf_num / status->pf_num; + return 0; +} + +static int hclge_query_function_status(struct hclge_dev *hdev) +{ + struct hclge_func_status *req; + struct hclge_desc desc; + int timeout = 0; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true); + req = (struct hclge_func_status *)desc.data; + + do { + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status failed %d.\n", + ret); + + return ret; + } + + /* Check pf reset is done */ + if (req->pf_state) + break; + usleep_range(1000, 2000); + } while (timeout++ < 5); + + ret = hclge_parse_func_status(hdev, req); + + return ret; +} + +static int hclge_query_pf_resource(struct hclge_dev *hdev) +{ + struct hclge_pf_res *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource failed %d.\n", ret); + return ret; + } + + req = (struct hclge_pf_res *)desc.data; + hdev->num_tqps = __le16_to_cpu(req->tqp_num); + hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + + if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) { + hdev->num_roce_msix = + hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + + /* PF should have NIC vectors and Roce vectors, + * NIC vectors are queued before Roce vectors. + */ + hdev->num_msi = hdev->num_roce_msix + HCLGE_ROCE_VECTOR_OFFSET; + } else { + hdev->num_msi = + hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), + HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); + } + + return 0; +} + +static int hclge_parse_speed(int speed_cmd, int *speed) +{ + switch (speed_cmd) { + case 6: + *speed = HCLGE_MAC_SPEED_10M; + break; + case 7: + *speed = HCLGE_MAC_SPEED_100M; + break; + case 0: + *speed = HCLGE_MAC_SPEED_1G; + break; + case 1: + *speed = HCLGE_MAC_SPEED_10G; + break; + case 2: + *speed = HCLGE_MAC_SPEED_25G; + break; + case 3: + *speed = HCLGE_MAC_SPEED_40G; + break; + case 4: + *speed = HCLGE_MAC_SPEED_50G; + break; + case 5: + *speed = HCLGE_MAC_SPEED_100G; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc) +{ + struct hclge_cfg_param *req; + u64 mac_addr_tmp_high; + u64 mac_addr_tmp; + int i; + + req = (struct hclge_cfg_param *)desc[0].data; + + /* get the configuration */ + cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_VMDQ_M, + HCLGE_CFG_VMDQ_S); + cfg->tc_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S); + cfg->tqp_desc_num = hnae_get_field(__le32_to_cpu(req->param[0]), + HCLGE_CFG_TQP_DESC_N_M, + HCLGE_CFG_TQP_DESC_N_S); + + cfg->phy_addr = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_PHY_ADDR_M, + HCLGE_CFG_PHY_ADDR_S); + cfg->media_type = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_MEDIA_TP_M, + HCLGE_CFG_MEDIA_TP_S); + cfg->rx_buf_len = hnae_get_field(__le32_to_cpu(req->param[1]), + HCLGE_CFG_RX_BUF_LEN_M, + HCLGE_CFG_RX_BUF_LEN_S); + /* get mac_address */ + mac_addr_tmp = __le32_to_cpu(req->param[2]); + mac_addr_tmp_high = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_MAC_ADDR_H_M, + HCLGE_CFG_MAC_ADDR_H_S); + + mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1; + + cfg->default_speed = hnae_get_field(__le32_to_cpu(req->param[3]), + HCLGE_CFG_DEFAULT_SPEED_M, + HCLGE_CFG_DEFAULT_SPEED_S); + for (i = 0; i < ETH_ALEN; i++) + cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff; + + req = (struct hclge_cfg_param *)desc[1].data; + cfg->numa_node_map = __le32_to_cpu(req->param[0]); +} + +/* hclge_get_cfg: query the static parameter from flash + * @hdev: pointer to struct hclge_dev + * @hcfg: the config structure to be getted + */ +static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg) +{ + struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM]; + struct hclge_cfg_param *req; + int i, ret; + + for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) { + req = (struct hclge_cfg_param *)desc[i].data; + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM, + true); + hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M, + HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES); + /* Len should be united by 4 bytes when send to hardware */ + hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M, + HCLGE_CFG_RD_LEN_S, + HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT); + req->offset = cpu_to_le32(req->offset); + } + + ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM); + if (ret) { + dev_err(&hdev->pdev->dev, + "get config failed %d.\n", ret); + return ret; + } + + hclge_parse_cfg(hcfg, desc); + return 0; +} + +static int hclge_get_cap(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_query_function_status(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query function status error %d.\n", ret); + return ret; + } + + /* get pf resource */ + ret = hclge_query_pf_resource(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "query pf resource error %d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_configure(struct hclge_dev *hdev) +{ + struct hclge_cfg cfg; + int ret, i; + + ret = hclge_get_cfg(hdev, &cfg); + if (ret) { + dev_err(&hdev->pdev->dev, "get mac mode error %d.\n", ret); + return ret; + } + + hdev->num_vmdq_vport = cfg.vmdq_vport_num; + hdev->base_tqp_pid = 0; + hdev->rss_size_max = 1; + hdev->rx_buf_len = cfg.rx_buf_len; + for (i = 0; i < ETH_ALEN; i++) + hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i]; + hdev->hw.mac.media_type = cfg.media_type; + hdev->num_desc = cfg.tqp_desc_num; + hdev->tm_info.num_pg = 1; + hdev->tm_info.num_tc = cfg.tc_num; + hdev->tm_info.hw_pfc_map = 0; + + ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed); + if (ret) { + dev_err(&hdev->pdev->dev, "Get wrong speed ret=%d.\n", ret); + return ret; + } + + if ((hdev->tm_info.num_tc > HNAE3_MAX_TC) || + (hdev->tm_info.num_tc < 1)) { + dev_warn(&hdev->pdev->dev, "TC num = %d.\n", + hdev->tm_info.num_tc); + hdev->tm_info.num_tc = 1; + } + + /* Currently not support uncontiuous tc */ + for (i = 0; i < cfg.tc_num; i++) + hnae_set_bit(hdev->hw_tc_map, i, 1); + + if (!hdev->num_vmdq_vport && !hdev->num_req_vfs) + hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + else + hdev->tx_sch_mode = HCLGE_FLAG_VNET_BASE_SCH_MODE; + + return ret; +} + +static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, + int tso_mss_max) +{ + struct hclge_cfg_tso_status *req; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false); + + req = (struct hclge_cfg_tso_status *)desc.data; + hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_min); + hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M, + HCLGE_TSO_MSS_MIN_S, tso_mss_max); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_alloc_tqps(struct hclge_dev *hdev) +{ + struct hclge_tqp *tqp; + int i; + + hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, + sizeof(struct hclge_tqp), GFP_KERNEL); + if (!hdev->htqp) + return -ENOMEM; + + tqp = hdev->htqp; + + for (i = 0; i < hdev->num_tqps; i++) { + tqp->dev = &hdev->pdev->dev; + tqp->index = i; + + tqp->q.ae_algo = &ae_algo; + tqp->q.buf_size = hdev->rx_buf_len; + tqp->q.desc_num = hdev->num_desc; + tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + + i * HCLGE_TQP_REG_SIZE; + + tqp++; + } + + return 0; +} + +static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id, + u16 tqp_pid, u16 tqp_vid, bool is_pf) +{ + struct hclge_tqp_map *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false); + + req = (struct hclge_tqp_map *)desc.data; + req->tqp_id = cpu_to_le16(tqp_pid); + req->tqp_vf = cpu_to_le16(func_id); + req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B | + 1 << HCLGE_TQP_MAP_EN_B; + req->tqp_vid = cpu_to_le16(tqp_vid); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_assign_tqp(struct hclge_vport *vport, + struct hnae3_queue **tqp, u16 num_tqps) +{ + struct hclge_dev *hdev = vport->back; + int i, alloced, func_id, ret; + bool is_pf; + + func_id = vport->vport_id; + is_pf = (vport->vport_id == 0) ? true : false; + + for (i = 0, alloced = 0; i < hdev->num_tqps && + alloced < num_tqps; i++) { + if (!hdev->htqp[i].alloced) { + hdev->htqp[i].q.handle = &vport->nic; + hdev->htqp[i].q.tqp_index = alloced; + tqp[alloced] = &hdev->htqp[i].q; + hdev->htqp[i].alloced = true; + ret = hclge_map_tqps_to_func(hdev, func_id, + hdev->htqp[i].index, + alloced, is_pf); + if (ret) + return ret; + + alloced++; + } + } + vport->alloc_tqps = num_tqps; + + return 0; +} + +static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hnae3_knic_private_info *kinfo = &nic->kinfo; + struct hclge_dev *hdev = vport->back; + int i, ret; + + kinfo->num_desc = hdev->num_desc; + kinfo->rx_buf_len = hdev->rx_buf_len; + kinfo->num_tc = min_t(u16, num_tqps, hdev->tm_info.num_tc); + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, num_tqps / kinfo->num_tc); + kinfo->num_tqps = kinfo->rss_size * kinfo->num_tc; + + for (i = 0; i < HNAE3_MAX_TC; i++) { + if (hdev->hw_tc_map & BIT(i)) { + kinfo->tc_info[i].enable = true; + kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; + kinfo->tc_info[i].tqp_count = kinfo->rss_size; + kinfo->tc_info[i].tc = i; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info[i].enable = false; + kinfo->tc_info[i].tqp_offset = 0; + kinfo->tc_info[i].tqp_count = 1; + kinfo->tc_info[i].tc = 0; + } + } + + kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, + sizeof(struct hnae3_queue *), GFP_KERNEL); + if (!kinfo->tqp) + return -ENOMEM; + + ret = hclge_assign_tqp(vport, kinfo->tqp, kinfo->num_tqps); + if (ret) { + dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret); + return -EINVAL; + } + + return 0; +} + +static void hclge_unic_setup(struct hclge_vport *vport, u16 num_tqps) +{ + /* this would be initialized later */ +} + +static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_dev *hdev = vport->back; + int ret; + + nic->pdev = hdev->pdev; + nic->ae_algo = &ae_algo; + nic->numa_node_mask = hdev->numa_node_mask; + + if (hdev->ae_dev->dev_type == HNAE3_DEV_KNIC) { + ret = hclge_knic_setup(vport, num_tqps); + if (ret) { + dev_err(&hdev->pdev->dev, "knic setup failed %d\n", + ret); + return ret; + } + } else { + hclge_unic_setup(vport, num_tqps); + } + + return 0; +} + +static int hclge_alloc_vport(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_vport *vport; + u32 tqp_main_vport; + u32 tqp_per_vport; + int num_vport, i; + int ret; + + /* We need to alloc a vport for main NIC of PF */ + num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + + if (hdev->num_tqps < num_vport) + num_vport = hdev->num_tqps; + + /* Alloc the same number of TQPs for every vport */ + tqp_per_vport = hdev->num_tqps / num_vport; + tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport; + + vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport), + GFP_KERNEL); + if (!vport) + return -ENOMEM; + + hdev->vport = vport; + hdev->num_alloc_vport = num_vport; + +#ifdef CONFIG_PCI_IOV + /* Enable SRIOV */ + if (hdev->num_req_vfs) { + dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n", + hdev->num_req_vfs); + ret = pci_enable_sriov(hdev->pdev, hdev->num_req_vfs); + if (ret) { + hdev->num_alloc_vfs = 0; + dev_err(&pdev->dev, "SRIOV enable failed %d\n", + ret); + return ret; + } + } + hdev->num_alloc_vfs = hdev->num_req_vfs; +#endif + + for (i = 0; i < num_vport; i++) { + vport->back = hdev; + vport->vport_id = i; + + if (i == 0) + ret = hclge_vport_setup(vport, tqp_main_vport); + else + ret = hclge_vport_setup(vport, tqp_per_vport); + if (ret) { + dev_err(&pdev->dev, + "vport setup failed for vport %d, %d\n", + i, ret); + return ret; + } + + vport++; + } + + return 0; +} + +static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev, u16 buf_size) +{ +/* TX buffer size is unit by 128 byte */ +#define HCLGE_BUF_SIZE_UNIT_SHIFT 7 +#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15) + struct hclge_tx_buff_alloc *req; + struct hclge_desc desc; + int ret; + u8 i; + + req = (struct hclge_tx_buff_alloc *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0); + for (i = 0; i < HCLGE_TC_NUM; i++) + req->tx_pkt_buff[i] = + cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) | + HCLGE_BUF_SIZE_UPDATE_EN_MSK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, u32 buf_size) +{ + int ret = hclge_cmd_alloc_tx_buff(hdev, buf_size); + + if (ret) { + dev_err(&hdev->pdev->dev, + "tx buffer alloc failed %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_tc_num(struct hclge_dev *hdev) +{ + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i)) + cnt++; + return cnt; +} + +static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) +{ + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) + if (hdev->hw_tc_map & BIT(i) && + hdev->tm_info.hw_pfc_map & BIT(i)) + cnt++; + return cnt; +} + +/* Get the number of pfc enabled TCs, which have private buffer */ +static int hclge_get_pfc_priv_num(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if ((hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +/* Get the number of pfc disabled TCs, which have private buffer */ +static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + int i, cnt = 0; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i)) && + priv->enable) + cnt++; + } + + return cnt; +} + +static u32 hclge_get_rx_priv_buff_alloced(struct hclge_dev *hdev) +{ + struct hclge_priv_buf *priv; + u32 rx_priv = 0; + int i; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (priv->enable) + rx_priv += priv->buf_size; + } + return rx_priv; +} + +static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all) +{ + u32 shared_buf_min, shared_buf_tc, shared_std; + int tc_num, pfc_enable_num; + u32 shared_buf; + u32 rx_priv; + int i; + + tc_num = hclge_get_tc_num(hdev); + pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); + + shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + shared_buf_tc = pfc_enable_num * hdev->mps + + (tc_num - pfc_enable_num) * hdev->mps / 2 + + hdev->mps; + shared_std = max_t(u32, shared_buf_min, shared_buf_tc); + + rx_priv = hclge_get_rx_priv_buff_alloced(hdev); + if (rx_all <= rx_priv + shared_std) + return false; + + shared_buf = rx_all - rx_priv; + hdev->s_buf.buf_size = shared_buf; + hdev->s_buf.self.high = shared_buf; + hdev->s_buf.self.low = 2 * hdev->mps; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + if ((hdev->hw_tc_map & BIT(i)) && + (hdev->tm_info.hw_pfc_map & BIT(i))) { + hdev->s_buf.tc_thrd[i].low = hdev->mps; + hdev->s_buf.tc_thrd[i].high = 2 * hdev->mps; + } else { + hdev->s_buf.tc_thrd[i].low = 0; + hdev->s_buf.tc_thrd[i].high = hdev->mps; + } + } + + return true; +} + +/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs + * @hdev: pointer to struct hclge_dev + * @tx_size: the allocated tx buffer for all TCs + * @return: 0: calculate sucessful, negative: fail + */ +int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size) +{ + u32 rx_all = hdev->pkt_buf_size - tx_size; + int no_pfc_priv_num, pfc_priv_num; + struct hclge_priv_buf *priv; + int i; + + /* step 1, try to alloc private buffer for all enabled tc */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + if (hdev->hw_tc_map & BIT(i)) { + priv->enable = 1; + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = hdev->mps; + priv->wl.high = priv->wl.low + hdev->mps; + priv->buf_size = priv->wl.high + + HCLGE_DEFAULT_DV; + } else { + priv->wl.low = 0; + priv->wl.high = 2 * hdev->mps; + priv->buf_size = priv->wl.high; + } + } + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 2, try to decrease the buffer size of + * no pfc TC's private buffer + */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i)) + priv->enable = 1; + + if (hdev->tm_info.hw_pfc_map & BIT(i)) { + priv->wl.low = 128; + priv->wl.high = priv->wl.low + hdev->mps; + priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; + } else { + priv->wl.low = 0; + priv->wl.high = hdev->mps; + priv->buf_size = priv->wl.high; + } + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 3, try to reduce the number of pfc disabled TCs, + * which have private buffer + */ + /* get the total no pfc enable TC number, which have private buffer */ + no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev); + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i) && + !(hdev->tm_info.hw_pfc_map & BIT(i))) { + /* Clear the no pfc TC private buffer */ + priv->wl.low = 0; + priv->wl.high = 0; + priv->buf_size = 0; + priv->enable = 0; + no_pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all) || + no_pfc_priv_num == 0) + break; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + /* step 4, try to reduce the number of pfc enabled TCs + * which have private buffer. + */ + pfc_priv_num = hclge_get_pfc_priv_num(hdev); + + /* let the last to be cleared first */ + for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) { + priv = &hdev->priv_buf[i]; + + if (hdev->hw_tc_map & BIT(i) && + hdev->tm_info.hw_pfc_map & BIT(i)) { + /* Reduce the number of pfc TC with private buffer */ + priv->wl.low = 0; + priv->enable = 0; + priv->wl.high = 0; + priv->buf_size = 0; + pfc_priv_num--; + } + + if (hclge_is_rx_buf_ok(hdev, rx_all) || + pfc_priv_num == 0) + break; + } + if (hclge_is_rx_buf_ok(hdev, rx_all)) + return 0; + + return -ENOMEM; +} + +static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev) +{ + struct hclge_rx_priv_buff *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false); + req = (struct hclge_rx_priv_buff *)desc.data; + + /* Alloc private buffer TCs */ + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + struct hclge_priv_buf *priv = &hdev->priv_buf[i]; + + req->buf_num[i] = + cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S); + req->buf_num[i] |= + cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "rx private buffer alloc cmd failed %d\n", ret); + return ret; + } + + return 0; +} + +#define HCLGE_PRIV_ENABLE(a) ((a) > 0 ? 1 : 0) + +static int hclge_rx_priv_wl_config(struct hclge_dev *hdev) +{ + struct hclge_rx_priv_wl_buf *req; + struct hclge_priv_buf *priv; + struct hclge_desc desc[2]; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC, + false); + req = (struct hclge_rx_priv_wl_buf *)desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + priv = &hdev->priv_buf[i * HCLGE_TC_NUM_ONE_DESC + j]; + req->tc_wl[j].high = + cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.high) << + HCLGE_RX_PRIV_EN_B); + req->tc_wl[j].low = + cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S); + req->tc_wl[j].low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(priv->wl.low) << + HCLGE_RX_PRIV_EN_B); + } + } + + /* Send 2 descriptor at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "rx private waterline config cmd failed %d\n", + ret); + return ret; + } + return 0; +} + +static int hclge_common_thrd_config(struct hclge_dev *hdev) +{ + struct hclge_shared_buf *s_buf = &hdev->s_buf; + struct hclge_rx_com_thrd *req; + struct hclge_desc desc[2]; + struct hclge_tc_thrd *tc; + int i, j; + int ret; + + for (i = 0; i < 2; i++) { + hclge_cmd_setup_basic_desc(&desc[i], + HCLGE_OPC_RX_COM_THRD_ALLOC, false); + req = (struct hclge_rx_com_thrd *)&desc[i].data; + + /* The first descriptor set the NEXT bit to 1 */ + if (i == 0) + desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) { + tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j]; + + req->com_thrd[j].high = + cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(tc->high) << + HCLGE_RX_PRIV_EN_B); + req->com_thrd[j].low = + cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S); + req->com_thrd[j].low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(tc->low) << + HCLGE_RX_PRIV_EN_B); + } + } + + /* Send 2 descriptors at one time */ + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "common threshold config cmd failed %d\n", ret); + return ret; + } + return 0; +} + +static int hclge_common_wl_config(struct hclge_dev *hdev) +{ + struct hclge_shared_buf *buf = &hdev->s_buf; + struct hclge_rx_com_wl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false); + + req = (struct hclge_rx_com_wl *)desc.data; + req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S); + req->com_wl.high |= + cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.high) << + HCLGE_RX_PRIV_EN_B); + + req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S); + req->com_wl.low |= + cpu_to_le16(HCLGE_PRIV_ENABLE(buf->self.low) << + HCLGE_RX_PRIV_EN_B); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "common waterline config cmd failed %d\n", ret); + return ret; + } + + return 0; +} + +int hclge_buffer_alloc(struct hclge_dev *hdev) +{ + u32 tx_buf_size = HCLGE_DEFAULT_TX_BUF; + int ret; + + hdev->priv_buf = devm_kmalloc_array(&hdev->pdev->dev, HCLGE_MAX_TC_NUM, + sizeof(struct hclge_priv_buf), + GFP_KERNEL | __GFP_ZERO); + if (!hdev->priv_buf) + return -ENOMEM; + + ret = hclge_tx_buffer_alloc(hdev, tx_buf_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not alloc tx buffers %d\n", ret); + return ret; + } + + ret = hclge_rx_buffer_calc(hdev, tx_buf_size); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not calc rx priv buffer size for all TCs %d\n", + ret); + return ret; + } + + ret = hclge_rx_priv_buf_alloc(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n", + ret); + return ret; + } + + ret = hclge_rx_priv_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure rx private waterline %d\n", ret); + return ret; + } + + ret = hclge_common_thrd_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common threshold %d\n", ret); + return ret; + } + + ret = hclge_common_wl_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not configure common waterline %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_init_roce_base_info(struct hclge_vport *vport) +{ + struct hnae3_handle *roce = &vport->roce; + struct hnae3_handle *nic = &vport->nic; + + roce->rinfo.num_vectors = vport->back->num_roce_msix; + + if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors || + vport->back->num_msi_left == 0) + return -EINVAL; + + roce->rinfo.base_vector = vport->back->roce_base_vector; + + roce->rinfo.netdev = nic->kinfo.netdev; + roce->rinfo.roce_io_base = vport->back->hw.io_base; + + roce->pdev = nic->pdev; + roce->ae_algo = nic->ae_algo; + roce->numa_node_mask = nic->numa_node_mask; + + return 0; +} + +static int hclge_init_msix(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret, i; + + hdev->msix_entries = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!hdev->msix_entries) + return -ENOMEM; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) + return -ENOMEM; + + for (i = 0; i < hdev->num_msi; i++) { + hdev->msix_entries[i].entry = i; + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + } + + hdev->num_msi_left = hdev->num_msi; + hdev->base_msi_vector = hdev->pdev->irq; + hdev->roce_base_vector = hdev->base_msi_vector + + HCLGE_ROCE_VECTOR_OFFSET; + + ret = pci_enable_msix_range(hdev->pdev, hdev->msix_entries, + hdev->num_msi, hdev->num_msi); + if (ret < 0) { + dev_info(&hdev->pdev->dev, + "MSI-X vector alloc failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_init_msi(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int vectors; + int i; + + hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi, + sizeof(u16), GFP_KERNEL); + if (!hdev->vector_status) + return -ENOMEM; + + for (i = 0; i < hdev->num_msi; i++) + hdev->vector_status[i] = HCLGE_INVALID_VPORT; + + vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi, PCI_IRQ_MSI); + if (vectors < 0) { + dev_err(&pdev->dev, "MSI vectors enable failed %d\n", vectors); + return -EINVAL; + } + hdev->num_msi = vectors; + hdev->num_msi_left = vectors; + hdev->base_msi_vector = pdev->irq; + hdev->roce_base_vector = hdev->base_msi_vector + + HCLGE_ROCE_VECTOR_OFFSET; + + return 0; +} + +static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed) +{ + struct hclge_mac *mac = &hdev->hw.mac; + + if ((speed == HCLGE_MAC_SPEED_10M) || (speed == HCLGE_MAC_SPEED_100M)) + mac->duplex = (u8)duplex; + else + mac->duplex = HCLGE_MAC_FULL; + + mac->speed = speed; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) +{ + struct hclge_config_mac_speed_dup *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_config_mac_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false); + + hnae_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, !!duplex); + + switch (speed) { + case HCLGE_MAC_SPEED_10M: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 6); + break; + case HCLGE_MAC_SPEED_100M: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 7); + break; + case HCLGE_MAC_SPEED_1G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 0); + break; + case HCLGE_MAC_SPEED_10G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 1); + break; + case HCLGE_MAC_SPEED_25G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 2); + break; + case HCLGE_MAC_SPEED_40G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 3); + break; + case HCLGE_MAC_SPEED_50G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 4); + break; + case HCLGE_MAC_SPEED_100G: + hnae_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, + HCLGE_CFG_SPEED_S, 5); + break; + default: + dev_err(&hdev->pdev->dev, "invald speed (%d)\n", speed); + return -EINVAL; + } + + hnae_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B, + 1); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config cmd failed %d.\n", ret); + return ret; + } + + hclge_check_speed_dup(hdev, duplex, speed); + + return 0; +} + +static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, + u8 duplex) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_cfg_mac_speed_dup(hdev, speed, duplex); +} + +static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, + u8 *duplex) +{ + struct hclge_query_an_speed_dup *req; + struct hclge_desc desc; + int speed_tmp; + int ret; + + req = (struct hclge_query_an_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/autoneg/duplex query cmd failed %d\n", + ret); + return ret; + } + + *duplex = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); + speed_tmp = hnae_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, + HCLGE_QUERY_SPEED_S); + + ret = hclge_parse_speed(speed_tmp, speed); + if (ret) { + dev_err(&hdev->pdev->dev, + "could not parse speed(=%d), %d\n", speed_tmp, ret); + return -EIO; + } + + return 0; +} + +static int hclge_query_autoneg_result(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + struct hclge_query_an_speed_dup *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_query_an_speed_dup *)desc.data; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "autoneg result query cmd failed %d.\n", ret); + return ret; + } + + mac->autoneg = hnae_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_AN_B); + + return 0; +} + +static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) +{ + struct hclge_config_auto_neg *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false); + + req = (struct hclge_config_auto_neg *)desc.data; + hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_set_autoneg_en(hdev, enable); +} + +static int hclge_get_autoneg(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_query_autoneg_result(hdev); + + return hdev->hw.mac.autoneg; +} + +static int hclge_mac_init(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + int ret; + + ret = hclge_cfg_mac_speed_dup(hdev, hdev->hw.mac.speed, HCLGE_MAC_FULL); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mac speed dup fail ret=%d\n", ret); + return ret; + } + + mac->link = 0; + + ret = hclge_mac_mdio_config(hdev); + if (ret) { + dev_warn(&hdev->pdev->dev, + "mdio config fail ret=%d\n", ret); + return ret; + } + + /* Initialize the MTA table work mode */ + hdev->accept_mta_mc = true; + hdev->enable_mta = true; + hdev->mta_mac_sel_type = HCLGE_MAC_ADDR_47_36; + + ret = hclge_set_mta_filter_mode(hdev, + hdev->mta_mac_sel_type, + hdev->enable_mta); + if (ret) { + dev_err(&hdev->pdev->dev, "set mta filter mode failed %d\n", + ret); + return ret; + } + + return hclge_cfg_func_mta_filter(hdev, 0, hdev->accept_mta_mc); +} + +static void hclge_task_schedule(struct hclge_dev *hdev) +{ + if (!test_bit(HCLGE_STATE_DOWN, &hdev->state) && + !test_bit(HCLGE_STATE_REMOVING, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)) + (void)schedule_work(&hdev->service_task); +} + +static int hclge_get_mac_link_status(struct hclge_dev *hdev) +{ + struct hclge_link_status *req; + struct hclge_desc desc; + int link_status; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n", + ret); + return ret; + } + + req = (struct hclge_link_status *)desc.data; + link_status = req->status & HCLGE_LINK_STATUS; + + return !!link_status; +} + +static int hclge_get_mac_phy_link(struct hclge_dev *hdev) +{ + int mac_state; + int link_stat; + + mac_state = hclge_get_mac_link_status(hdev); + + if (hdev->hw.mac.phydev) { + if (!genphy_read_status(hdev->hw.mac.phydev)) + link_stat = mac_state & + hdev->hw.mac.phydev->link; + else + link_stat = 0; + + } else { + link_stat = mac_state; + } + + return !!link_stat; +} + +static void hclge_update_link_status(struct hclge_dev *hdev) +{ + struct hnae3_client *client = hdev->nic_client; + struct hnae3_handle *handle; + int state; + int i; + + if (!client) + return; + state = hclge_get_mac_phy_link(hdev); + if (state != hdev->hw.mac.link) { + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + handle = &hdev->vport[i].nic; + client->ops->link_status_change(handle, state); + } + hdev->hw.mac.link = state; + } +} + +static int hclge_update_speed_duplex(struct hclge_dev *hdev) +{ + struct hclge_mac mac = hdev->hw.mac; + u8 duplex; + int speed; + int ret; + + /* get the speed and duplex as autoneg'result from mac cmd when phy + * doesn't exit. + */ + if (mac.phydev) + return 0; + + /* update mac->antoneg. */ + ret = hclge_query_autoneg_result(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "autoneg result query failed %d\n", ret); + return ret; + } + + if (!mac.autoneg) + return 0; + + ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac autoneg/speed/duplex query failed %d\n", ret); + return ret; + } + + if ((mac.speed != speed) || (mac.duplex != duplex)) { + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) { + dev_err(&hdev->pdev->dev, + "mac speed/duplex config failed %d\n", ret); + return ret; + } + } + + return 0; +} + +static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_update_speed_duplex(hdev); +} + +static int hclge_get_status(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + hclge_update_link_status(hdev); + + return hdev->hw.mac.link; +} + +static void hclge_service_timer(unsigned long data) +{ + struct hclge_dev *hdev = (struct hclge_dev *)data; + (void)mod_timer(&hdev->service_timer, jiffies + HZ); + + hclge_task_schedule(hdev); +} + +static void hclge_service_complete(struct hclge_dev *hdev) +{ + WARN_ON(!test_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state)); + + /* Flush memory before next watchdog */ + smp_mb__before_atomic(); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); +} + +static void hclge_service_task(struct work_struct *work) +{ + struct hclge_dev *hdev = + container_of(work, struct hclge_dev, service_task); + + hclge_update_speed_duplex(hdev); + hclge_update_link_status(hdev); + hclge_update_stats_for_all(hdev); + hclge_service_complete(hdev); +} + +static void hclge_disable_sriov(struct hclge_dev *hdev) +{ +#ifdef CONFIG_PCI_IOV + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(hdev->pdev)) { + dev_warn(&hdev->pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } + + pci_disable_sriov(hdev->pdev); +#endif +} + +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) +{ + /* VF handle has no client */ + if (!handle->client) + return container_of(handle, struct hclge_vport, nic); + else if (handle->client->type == HNAE3_CLIENT_ROCE) + return container_of(handle, struct hclge_vport, roce); + else + return container_of(handle, struct hclge_vport, nic); +} + +static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num, + struct hnae3_vector_info *vector_info) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_vector_info *vector = vector_info; + struct hclge_dev *hdev = vport->back; + int alloc = 0; + int i, j; + + vector_num = min(hdev->num_msi_left, vector_num); + + for (j = 0; j < vector_num; j++) { + for (i = 1; i < hdev->num_msi; i++) { + if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) { + vector->vector = pci_irq_vector(hdev->pdev, i); + vector->io_addr = hdev->hw.io_base + + HCLGE_VECTOR_REG_BASE + + (i - 1) * HCLGE_VECTOR_REG_OFFSET + + vport->vport_id * + HCLGE_VECTOR_VF_OFFSET; + hdev->vector_status[i] = vport->vport_id; + + vector++; + alloc++; + + break; + } + } + } + hdev->num_msi_left -= alloc; + hdev->num_msi_used += alloc; + + return alloc; +} + +static int hclge_get_vector_index(struct hclge_dev *hdev, int vector) +{ + int i; + + for (i = 0; i < hdev->num_msi; i++) { + if (hdev->msix_entries) { + if (vector == hdev->msix_entries[i].vector) + return i; + } else { + if (vector == (hdev->base_msi_vector + i)) + return i; + } + } + return -EINVAL; +} + +static u32 hclge_get_rss_key_size(struct hnae3_handle *handle) +{ + return HCLGE_RSS_KEY_SIZE; +} + +static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle) +{ + return HCLGE_RSS_IND_TBL_SIZE; +} + +static int hclge_get_rss_algo(struct hclge_dev *hdev) +{ + struct hclge_rss_config *req; + struct hclge_desc desc; + int rss_hash_algo; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get link status error, status =%d\n", ret); + return ret; + } + + req = (struct hclge_rss_config *)desc.data; + rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK); + + if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ) + return ETH_RSS_HASH_TOP; + + return -EINVAL; +} + +static int hclge_set_rss_algo_key(struct hclge_dev *hdev, + const u8 hfunc, const u8 *key) +{ + struct hclge_rss_config *req; + struct hclge_desc desc; + int key_offset; + int key_size; + int ret; + + req = (struct hclge_rss_config *)desc.data; + + for (key_offset = 0; key_offset < 3; key_offset++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG, + false); + + req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK); + req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B); + + if (key_offset == 2) + key_size = + HCLGE_RSS_KEY_SIZE - HCLGE_RSS_HASH_KEY_NUM * 2; + else + key_size = HCLGE_RSS_HASH_KEY_NUM; + + memcpy(req->hash_key, + key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure RSS config fail, status = %d\n", + ret); + return ret; + } + } + return 0; +} + +static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir) +{ + struct hclge_rss_indirection_table *req; + struct hclge_desc desc; + int i, j; + int ret; + + req = (struct hclge_rss_indirection_table *)desc.data; + + for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) { + hclge_cmd_setup_basic_desc + (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false); + + req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE; + req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK; + + for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++) + req->rss_result[j] = + indir[i * HCLGE_RSS_CFG_TBL_SIZE + j]; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss indir table fail,status = %d\n", + ret); + return ret; + } + } + return 0; +} + +static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid, + u16 *tc_size, u16 *tc_offset) +{ + struct hclge_rss_tc_mode *req; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false); + req = (struct hclge_rss_tc_mode *)desc.data; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B, + (tc_valid[i] & 0x1)); + hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M, + HCLGE_RSS_TC_SIZE_S, tc_size[i]); + hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M, + HCLGE_RSS_TC_OFFSET_S, tc_offset[i]); + } + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss tc mode fail, status = %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_set_rss_input_tuple(struct hclge_dev *hdev) +{ +#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf +#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f + struct hclge_rss_input_tuple *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false); + + req = (struct hclge_rss_input_tuple *)desc.data; + req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; + req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + req->ipv6_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP; + req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Configure rss input fail, status = %d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir, + u8 *key, u8 *hfunc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i; + + /* Get hash algorithm */ + if (hfunc) + *hfunc = hclge_get_rss_algo(hdev); + + /* Get the RSS Key required by the user */ + if (key) + memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE); + + /* Get indirect table */ + if (indir) + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + indir[i] = vport->rss_indirection_tbl[i]; + + return 0; +} + +static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + u8 hash_algo; + int ret, i; + + /* Set the RSS Hash Key if specififed by the user */ + if (key) { + /* Update the shadow RSS key with user specified qids */ + memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE); + + if (hfunc == ETH_RSS_HASH_TOP || + hfunc == ETH_RSS_HASH_NO_CHANGE) + hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + else + return -EINVAL; + ret = hclge_set_rss_algo_key(hdev, hash_algo, key); + if (ret) + return ret; + } + + /* Update the shadow RSS table with user specified qids */ + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) + vport->rss_indirection_tbl[i] = indir[i]; + + /* Update the hardware */ + ret = hclge_set_rss_indir_table(hdev, indir); + return ret; +} + +static int hclge_get_tc_size(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->rss_size_max; +} + +static int hclge_rss_init_hw(struct hclge_dev *hdev) +{ + const u8 hfunc = HCLGE_RSS_HASH_ALGO_TOEPLITZ; + struct hclge_vport *vport = hdev->vport; + u16 tc_offset[HCLGE_MAX_TC_NUM]; + u8 rss_key[HCLGE_RSS_KEY_SIZE]; + u16 tc_valid[HCLGE_MAX_TC_NUM]; + u16 tc_size[HCLGE_MAX_TC_NUM]; + u32 *rss_indir = NULL; + const u8 *key; + int i, ret, j; + + rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL); + if (!rss_indir) + return -ENOMEM; + + /* Get default RSS key */ + netdev_rss_key_fill(rss_key, HCLGE_RSS_KEY_SIZE); + + /* Initialize RSS indirect table for each vport */ + for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { + for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { + vport[j].rss_indirection_tbl[i] = + i % hdev->rss_size_max; + rss_indir[i] = vport[j].rss_indirection_tbl[i]; + } + } + ret = hclge_set_rss_indir_table(hdev, rss_indir); + if (ret) + goto err; + + key = rss_key; + ret = hclge_set_rss_algo_key(hdev, hfunc, key); + if (ret) + goto err; + + ret = hclge_set_rss_input_tuple(hdev); + if (ret) + goto err; + + for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { + if (hdev->hw_tc_map & BIT(i)) + tc_valid[i] = 1; + else + tc_valid[i] = 0; + + switch (hdev->rss_size_max) { + case HCLGE_RSS_TC_SIZE_0: + tc_size[i] = 0; + break; + case HCLGE_RSS_TC_SIZE_1: + tc_size[i] = 1; + break; + case HCLGE_RSS_TC_SIZE_2: + tc_size[i] = 2; + break; + case HCLGE_RSS_TC_SIZE_3: + tc_size[i] = 3; + break; + case HCLGE_RSS_TC_SIZE_4: + tc_size[i] = 4; + break; + case HCLGE_RSS_TC_SIZE_5: + tc_size[i] = 5; + break; + case HCLGE_RSS_TC_SIZE_6: + tc_size[i] = 6; + break; + case HCLGE_RSS_TC_SIZE_7: + tc_size[i] = 7; + break; + default: + break; + } + tc_offset[i] = hdev->rss_size_max * i; + } + ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); + +err: + kfree(rss_indir); + + return ret; +} + +int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_ctrl_vector_chain *req; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + int ret; + int i; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false); + + req = (struct hclge_ctrl_vector_chain *)desc.data; + req->int_vector_id = vector_id; + + i = 0; + for (node = ring_chain; node; node = node->next) { + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", + ret); + return ret; + } + i = 0; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_ADD_RING_TO_VECTOR, + false); + req->int_vector_id = vector_id; + } + } + + if (i > 0) { + req->int_cause_num = i; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Map TQP fail, status is %d.\n", ret); + return ret; + } + } + + return 0; +} + +int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle, + int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int vector_id; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&hdev->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + return hclge_map_vport_ring_to_vector(vport, vector_id, ring_chain); +} + +static int hclge_unmap_ring_from_vector( + struct hnae3_handle *handle, int vector, + struct hnae3_ring_chain_node *ring_chain) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_ctrl_vector_chain *req; + struct hnae3_ring_chain_node *node; + struct hclge_desc desc; + int i, vector_id; + int ret; + + vector_id = hclge_get_vector_index(hdev, vector); + if (vector_id < 0) { + dev_err(&handle->pdev->dev, + "Get vector index fail. ret =%d\n", vector_id); + return vector_id; + } + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false); + + req = (struct hclge_ctrl_vector_chain *)desc.data; + req->int_vector_id = vector_id; + + i = 0; + for (node = ring_chain; node; node = node->next) { + hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M, + HCLGE_INT_TYPE_S, + hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); + hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, + HCLGE_TQP_ID_S, node->tqp_index); + + req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); + + if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { + req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Unmap TQP fail, status is %d.\n", + ret); + return ret; + } + i = 0; + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_ADD_RING_TO_VECTOR, + false); + req->int_vector_id = vector_id; + } + } + + if (i > 0) { + req->int_cause_num = i; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Unmap TQP fail, status is %d.\n", ret); + return ret; + } + } + + return 0; +} + +int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, + struct hclge_promisc_param *param) +{ + struct hclge_promisc_cfg *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false); + + req = (struct hclge_promisc_cfg *)desc.data; + req->vf_id = param->vf_id; + req->flag = (param->enable << HCLGE_PROMISC_EN_B); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set promisc mode fail, status is %d.\n", ret); + return ret; + } + return 0; +} + +void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, + bool en_mc, bool en_bc, int vport_id) +{ + if (!param) + return; + + memset(param, 0, sizeof(struct hclge_promisc_param)); + if (en_uc) + param->enable = HCLGE_PROMISC_EN_UC; + if (en_mc) + param->enable |= HCLGE_PROMISC_EN_MC; + if (en_bc) + param->enable |= HCLGE_PROMISC_EN_BC; + param->vf_id = vport_id; +} + +static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_promisc_param param; + + hclge_promisc_param_init(¶m, en, en, true, vport->vport_id); + hclge_cmd_set_promisc_mode(hdev, ¶m); +} + +static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) +{ + struct hclge_desc desc; + struct hclge_config_mac_mode *req = + (struct hclge_config_mac_mode *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_RX_FCS_STRIP_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable); + hnae_set_bit(req->txrx_pad_fcs_loop_en, + HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "mac enable fail, ret =%d.\n", ret); +} + +static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id, + int stream_id, bool enable) +{ + struct hclge_desc desc; + struct hclge_cfg_com_tqp_queue *req = + (struct hclge_cfg_com_tqp_queue *)desc.data; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false); + req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK); + req->stream_id = cpu_to_le16(stream_id); + req->enable |= enable << HCLGE_TQP_ENABLE_B; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "Tqp enable fail, status =%d.\n", ret); + return ret; +} + +static void hclge_reset_tqp_stats(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hnae3_queue *queue; + struct hclge_tqp *tqp; + int i; + + for (i = 0; i < vport->alloc_tqps; i++) { + queue = handle->kinfo.tqp[i]; + tqp = container_of(queue, struct hclge_tqp, q); + memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, queue_id, ret; + + for (i = 0; i < vport->alloc_tqps; i++) { + /* todo clear interrupt */ + /* ring enable */ + queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclge_tqp_enable(hdev, queue_id, 0, true); + } + /* mac enable */ + hclge_cfg_mac_mode(hdev, true); + clear_bit(HCLGE_STATE_DOWN, &hdev->state); + (void)mod_timer(&hdev->service_timer, jiffies + HZ); + + ret = hclge_mac_start_phy(hdev); + if (ret) + return ret; + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); + + return 0; +} + +static void hclge_ae_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int i, queue_id; + + for (i = 0; i < vport->alloc_tqps; i++) { + /* Ring disable */ + queue_id = hclge_get_queue_id(handle->kinfo.tqp[i]); + if (queue_id < 0) { + dev_warn(&hdev->pdev->dev, + "Get invalid queue id, ignore it\n"); + continue; + } + + hclge_tqp_enable(hdev, queue_id, 0, false); + } + /* Mac disable */ + hclge_cfg_mac_mode(hdev, false); + + hclge_mac_stop_phy(hdev); + + /* reset tqp stats */ + hclge_reset_tqp_stats(handle); +} + +static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, + u16 cmdq_resp, u8 resp_code, + enum hclge_mac_vlan_tbl_opcode op) +{ + struct hclge_dev *hdev = vport->back; + int return_status = -EIO; + + if (cmdq_resp) { + dev_err(&hdev->pdev->dev, + "cmdq execute failed for get_mac_vlan_cmd_status,status=%d.\n", + cmdq_resp); + return -EIO; + } + + if (op == HCLGE_MAC_VLAN_ADD) { + if ((!resp_code) || (resp_code == 1)) { + return_status = 0; + } else if (resp_code == 2) { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "add mac addr failed for uc_overflow.\n"); + } else if (resp_code == 3) { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "add mac addr failed for mc_overflow.\n"); + } else { + dev_err(&hdev->pdev->dev, + "add mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else if (op == HCLGE_MAC_VLAN_REMOVE) { + if (!resp_code) { + return_status = 0; + } else if (resp_code == 1) { + return_status = -EIO; + dev_dbg(&hdev->pdev->dev, + "remove mac addr failed for miss.\n"); + } else { + dev_err(&hdev->pdev->dev, + "remove mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else if (op == HCLGE_MAC_VLAN_LKUP) { + if (!resp_code) { + return_status = 0; + } else if (resp_code == 1) { + return_status = -EIO; + dev_dbg(&hdev->pdev->dev, + "lookup mac addr failed for miss.\n"); + } else { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for undefined, code=%d.\n", + resp_code); + } + } else { + return_status = -EIO; + dev_err(&hdev->pdev->dev, + "unknown opcode for get_mac_vlan_cmd_status,opcode=%d.\n", + op); + } + + return return_status; +} + +static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr) +{ + int word_num; + int bit_num; + + if (vfid > 255 || vfid < 0) + return -EIO; + + if (vfid >= 0 && vfid <= 191) { + word_num = vfid / 32; + bit_num = vfid % 32; + if (clr) + desc[1].data[word_num] &= ~(1 << bit_num); + else + desc[1].data[word_num] |= (1 << bit_num); + } else { + word_num = (vfid - 192) / 32; + bit_num = vfid % 32; + if (clr) + desc[2].data[word_num] &= ~(1 << bit_num); + else + desc[2].data[word_num] |= (1 << bit_num); + } + + return 0; +} + +static bool hclge_is_all_function_id_zero(struct hclge_desc *desc) +{ +#define HCLGE_DESC_NUMBER 3 +#define HCLGE_FUNC_NUMBER_PER_DESC 6 + int i, j; + + for (i = 0; i < HCLGE_DESC_NUMBER; i++) + for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++) + if (desc[i].data[j]) + return false; + + return true; +} + +static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req, + const u8 *addr) +{ + const unsigned char *mac_addr = addr; + u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) | + (mac_addr[0]) | (mac_addr[1] << 8); + u32 low_val = mac_addr[4] | (mac_addr[5] << 8); + + new_req->mac_addr_hi32 = cpu_to_le32(high_val); + new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff); +} + +u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport, + const u8 *addr) +{ + u16 high_val = addr[1] | (addr[0] << 8); + struct hclge_dev *hdev = vport->back; + u32 rsh = 4 - hdev->mta_mac_sel_type; + u16 ret_val = (high_val >> rsh) & 0xfff; + + return ret_val; +} + +static int hclge_set_mta_filter_mode(struct hclge_dev *hdev, + enum hclge_mta_dmac_sel_type mta_mac_sel, + bool enable) +{ + struct hclge_mta_filter_mode *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_mta_filter_mode *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false); + + hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B, + enable); + hnae_set_field(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_SEL_M, + HCLGE_CFG_MTA_MAC_SEL_S, mta_mac_sel); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mat filter mode failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, + u8 func_id, + bool enable) +{ + struct hclge_cfg_func_mta_filter *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_cfg_func_mta_filter *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false); + + hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B, + enable); + req->function_id = func_id; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config func_id enable failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_set_mta_table_item(struct hclge_vport *vport, + u16 idx, + bool enable) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_cfg_func_mta_item *req; + struct hclge_desc desc; + int ret; + + req = (struct hclge_cfg_func_mta_item *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false); + hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable); + + hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M, + HCLGE_CFG_MTA_ITEM_IDX_S, idx); + req->item_idx = cpu_to_le16(req->item_idx); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Config mta table item failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + u8 resp_code; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false); + + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "del mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (desc.data[0] >> 8) & 0xff; + + return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code, + HCLGE_MAC_VLAN_REMOVE); +} + +static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req, + struct hclge_desc *desc, + bool is_mc) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_code; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true); + if (is_mc) { + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_MAC_VLAN_ADD, + true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], + HCLGE_OPC_MAC_VLAN_ADD, + true); + ret = hclge_cmd_send(&hdev->hw, desc, 3); + } else { + memcpy(desc[0].data, + req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + } + if (ret) { + dev_err(&hdev->pdev->dev, + "lookup mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + resp_code = (desc[0].data[0] >> 8) & 0xff; + + return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code, + HCLGE_MAC_VLAN_LKUP); +} + +static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport, + struct hclge_mac_vlan_tbl_entry *req, + struct hclge_desc *mc_desc) +{ + struct hclge_dev *hdev = vport->back; + int cfg_status; + u8 resp_code; + int ret; + + if (!mc_desc) { + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, + HCLGE_OPC_MAC_VLAN_ADD, + false); + memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + resp_code = (desc.data[0] >> 8) & 0xff; + cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } else { + mc_desc[0].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + mc_desc[1].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR); + mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT); + memcpy(mc_desc[0].data, req, + sizeof(struct hclge_mac_vlan_tbl_entry)); + ret = hclge_cmd_send(&hdev->hw, mc_desc, 3); + resp_code = (mc_desc[0].data[0] >> 8) & 0xff; + cfg_status = hclge_get_mac_vlan_cmd_status(vport, + mc_desc[0].retval, + resp_code, + HCLGE_MAC_VLAN_ADD); + } + + if (ret) { + dev_err(&hdev->pdev->dev, + "add mac addr failed for cmd_send, ret =%d.\n", + ret); + return ret; + } + + return cfg_status; +} + +static int hclge_add_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_add_uc_addr_common(vport, addr); +} + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + dev_err(&hdev->pdev->dev, + "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n", + addr, + is_zero_ether_addr(addr), + is_broadcast_ether_addr(addr), + is_multicast_ether_addr(addr)); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.egress_port, + HCLGE_MAC_EPORT_SW_EN_B, 0); + hnae_set_bit(req.egress_port, + HCLGE_MAC_EPORT_TYPE_B, 0); + hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M, + HCLGE_MAC_EPORT_VFID_S, vport->vport_id); + hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M, + HCLGE_MAC_EPORT_PFID_S, 0); + req.egress_port = cpu_to_le16(req.egress_port); + + hclge_prepare_mac_addr(&req, addr); + + status = hclge_add_mac_vlan_tbl(vport, &req, NULL); + + return status; +} + +static int hclge_rm_uc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_rm_uc_addr_common(vport, addr); +} + +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + + /* mac addr check */ + if (is_zero_ether_addr(addr) || + is_broadcast_ether_addr(addr) || + is_multicast_ether_addr(addr)) { + dev_dbg(&hdev->pdev->dev, + "Remove mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_remove_mac_vlan_tbl(vport, &req); + + return status; +} + +static int hclge_add_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_add_mc_addr_common(vport, addr); +} + +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + struct hclge_desc desc[3]; + u16 tbl_idx; + int status; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + dev_err(&hdev->pdev->dev, + "Add mc mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, update VFID for it */ + hclge_update_desc_vfid(desc, vport->vport_id, false); + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } else { + /* This mac addr do not exist, add new entry for it */ + memset(desc[0].data, 0, sizeof(desc[0].data)); + memset(desc[1].data, 0, sizeof(desc[0].data)); + memset(desc[2].data, 0, sizeof(desc[0].data)); + hclge_update_desc_vfid(desc, vport->vport_id, false); + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + } + + /* Set MTA table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, true); + + return status; +} + +static int hclge_rm_mc_addr(struct hnae3_handle *handle, + const unsigned char *addr) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_rm_mc_addr_common(vport, addr); +} + +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr) +{ + struct hclge_dev *hdev = vport->back; + struct hclge_mac_vlan_tbl_entry req; + enum hclge_cmd_status status; + struct hclge_desc desc[3]; + u16 tbl_idx; + + /* mac addr check */ + if (!is_multicast_ether_addr(addr)) { + dev_dbg(&hdev->pdev->dev, + "Remove mc mac err! invalid mac:%pM.\n", + addr); + return -EINVAL; + } + + memset(&req, 0, sizeof(req)); + hnae_set_bit(req.flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1); + hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0); + hclge_prepare_mac_addr(&req, addr); + status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true); + if (!status) { + /* This mac addr exist, remove this handle's VFID for it */ + hclge_update_desc_vfid(desc, vport->vport_id, true); + + if (hclge_is_all_function_id_zero(desc)) + /* All the vfid is zero, so need to delete this entry */ + status = hclge_remove_mac_vlan_tbl(vport, &req); + else + /* Not all the vfid is zero, update the vfid */ + status = hclge_add_mac_vlan_tbl(vport, &req, desc); + + } else { + /* This mac addr do not exist, can't delete it */ + dev_err(&hdev->pdev->dev, + "Rm mutilcast mac addr failed, ret = %d.\n", + status); + return -EIO; + } + + /* Set MTB table for this MAC address */ + tbl_idx = hclge_get_mac_addr_to_mta_index(vport, addr); + status = hclge_set_mta_table_item(vport, tbl_idx, false); + + return status; +} + +static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + ether_addr_copy(p, hdev->hw.mac.mac_addr); +} + +static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p) +{ + const unsigned char *new_addr = (const unsigned char *)p; + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + /* mac addr check */ + if (is_zero_ether_addr(new_addr) || + is_broadcast_ether_addr(new_addr) || + is_multicast_ether_addr(new_addr)) { + dev_err(&hdev->pdev->dev, + "Change uc mac err! invalid mac:%p.\n", + new_addr); + return -EINVAL; + } + + hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr); + + if (!hclge_add_uc_addr(handle, new_addr)) { + ether_addr_copy(hdev->hw.mac.mac_addr, new_addr); + return 0; + } + + return -EIO; +} + +static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type, + bool filter_en) +{ + struct hclge_vlan_filter_ctrl *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false); + + req = (struct hclge_vlan_filter_ctrl *)desc.data; + req->vlan_type = vlan_type; + req->vlan_fe = filter_en; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n", + ret); + return ret; + } + + return 0; +} + +int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid, + bool is_kill, u16 vlan, u8 qos, __be16 proto) +{ +#define HCLGE_MAX_VF_BYTES 16 + struct hclge_vlan_filter_vf_cfg *req0; + struct hclge_vlan_filter_vf_cfg *req1; + struct hclge_desc desc[2]; + u8 vf_byte_val; + u8 vf_byte_off; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + hclge_cmd_setup_basic_desc(&desc[1], + HCLGE_OPC_VLAN_FILTER_VF_CFG, false); + + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + vf_byte_off = vfid / 8; + vf_byte_val = 1 << (vfid % 8); + + req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data; + req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data; + + req0->vlan_id = vlan; + req0->vlan_cfg = is_kill; + + if (vf_byte_off < HCLGE_MAX_VF_BYTES) + req0->vf_bitmap[vf_byte_off] = vf_byte_val; + else + req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val; + + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send vf vlan command fail, ret =%d.\n", + ret); + return ret; + } + + if (!is_kill) { + if (!req0->resp_code || req0->resp_code == 1) + return 0; + + dev_err(&hdev->pdev->dev, + "Add vf vlan filter fail, ret =%d.\n", + req0->resp_code); + } else { + if (!req0->resp_code) + return 0; + + dev_err(&hdev->pdev->dev, + "Kill vf vlan filter fail, ret =%d.\n", + req0->resp_code); + } + + return -EIO; +} + +static int hclge_set_port_vlan_filter(struct hnae3_handle *handle, + __be16 proto, u16 vlan_id, + bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_filter_pf_cfg *req; + struct hclge_desc desc; + u8 vlan_offset_byte_val; + u8 vlan_offset_byte; + u8 vlan_offset_160; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false); + + vlan_offset_160 = vlan_id / 160; + vlan_offset_byte = (vlan_id % 160) / 8; + vlan_offset_byte_val = 1 << (vlan_id % 8); + + req = (struct hclge_vlan_filter_pf_cfg *)desc.data; + req->vlan_offset = vlan_offset_160; + req->vlan_cfg = is_kill; + req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val; + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "port vlan command, send fail, ret =%d.\n", + ret); + return ret; + } + + ret = hclge_set_vf_vlan_common(hdev, 0, is_kill, vlan_id, 0, proto); + if (ret) { + dev_err(&hdev->pdev->dev, + "Set pf vlan filter config fail, ret =%d.\n", + ret); + return -EIO; + } + + return 0; +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + return hclge_set_vf_vlan_common(hdev, vfid, false, vlan, qos, proto); +} + +static int hclge_init_vlan_config(struct hclge_dev *hdev) +{ +#define HCLGE_VLAN_TYPE_VF_TABLE 0 +#define HCLGE_VLAN_TYPE_PORT_TABLE 1 + int ret; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, + true); + if (ret) + return ret; + + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, + true); + + return ret; +} + +static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_config_max_frm_size *req; + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if ((new_mtu < HCLGE_MAC_MIN_MTU) || (new_mtu > HCLGE_MAC_MAX_MTU)) + return -EINVAL; + + hdev->mps = new_mtu; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); + + req = (struct hclge_config_max_frm_size *)desc.data; + req->max_frm_size = cpu_to_le16(new_mtu); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); + return ret; + } + + return 0; +} + +static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id, + bool enable) +{ + struct hclge_reset_tqp_queue *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false); + + req = (struct hclge_reset_tqp_queue *)desc.data; + req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Send tqp reset cmd error, status =%d\n", ret); + return ret; + } + + return 0; +} + +static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) +{ + struct hclge_reset_tqp_queue *req; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true); + + req = (struct hclge_reset_tqp_queue *)desc.data; + req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "Get reset status error, status =%d\n", ret); + return ret; + } + + return hnae_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); +} + +static void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + int reset_try_times = 0; + int reset_status; + int ret; + + ret = hclge_tqp_enable(hdev, queue_id, 0, false); + if (ret) { + dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_id, true); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Send reset tqp cmd fail, ret = %d\n", ret); + return; + } + + reset_try_times = 0; + while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) { + /* Wait for tqp hw reset */ + msleep(20); + reset_status = hclge_get_reset_status(hdev, queue_id); + if (reset_status) + break; + } + + if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) { + dev_warn(&hdev->pdev->dev, "Reset TQP fail\n"); + return; + } + + ret = hclge_send_reset_tqp_cmd(hdev, queue_id, false); + if (ret) { + dev_warn(&hdev->pdev->dev, + "Deassert the soft reset fail, ret = %d\n", ret); + return; + } +} + +static u32 hclge_get_fw_version(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->fw_version; +} + +static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg, + u32 *rx_en, u32 *tx_en) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *auto_neg = hclge_get_autoneg(handle); + + if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) { + *rx_en = 0; + *tx_en = 0; + return; + } + + if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) { + *rx_en = 1; + *tx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) { + *tx_en = 1; + *rx_en = 0; + } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) { + *rx_en = 1; + *tx_en = 1; + } else { + *rx_en = 0; + *tx_en = 0; + } +} + +static void hclge_get_ksettings_an_result(struct hnae3_handle *handle, + u8 *auto_neg, u32 *speed, u8 *duplex) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (speed) + *speed = hdev->hw.mac.speed; + if (duplex) + *duplex = hdev->hw.mac.duplex; + if (auto_neg) + *auto_neg = hdev->hw.mac.autoneg; +} + +static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (media_type) + *media_type = hdev->hw.mac.media_type; +} + +static void hclge_get_mdix_mode(struct hnae3_handle *handle, + u8 *tp_mdix_ctrl, u8 *tp_mdix) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct phy_device *phydev = hdev->hw.mac.phydev; + int mdix_ctrl, mdix, retval, is_resolved; + + if (!phydev) { + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + *tp_mdix = ETH_TP_MDI_INVALID; + return; + } + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX); + + retval = phy_read(phydev, HCLGE_PHY_CSC_REG); + mdix_ctrl = hnae_get_field(retval, HCLGE_PHY_MDIX_CTRL_M, + HCLGE_PHY_MDIX_CTRL_S); + + retval = phy_read(phydev, HCLGE_PHY_CSS_REG); + mdix = hnae_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B); + is_resolved = hnae_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B); + + phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER); + + switch (mdix_ctrl) { + case 0x0: + *tp_mdix_ctrl = ETH_TP_MDI; + break; + case 0x1: + *tp_mdix_ctrl = ETH_TP_MDI_X; + break; + case 0x3: + *tp_mdix_ctrl = ETH_TP_MDI_AUTO; + break; + default: + *tp_mdix_ctrl = ETH_TP_MDI_INVALID; + break; + } + + if (!is_resolved) + *tp_mdix = ETH_TP_MDI_INVALID; + else if (mdix) + *tp_mdix = ETH_TP_MDI_X; + else + *tp_mdix = ETH_TP_MDI; +} + +static int hclge_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i, ret; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport = &hdev->vport[i]; + + switch (client->type) { + case HNAE3_CLIENT_KNIC: + + hdev->nic_client = client; + vport->nic.client = client; + ret = client->ops->init_instance(&vport->nic); + if (ret) + goto err; + + if (hdev->roce_client && + hnae_get_bit(hdev->ae_dev->flag, + HNAE_DEV_SUPPORT_ROCE_B)) { + struct hnae3_client *rc = hdev->roce_client; + + ret = hclge_init_roce_base_info(vport); + if (ret) + goto err; + + ret = rc->ops->init_instance(&vport->roce); + if (ret) + goto err; + } + + break; + case HNAE3_CLIENT_UNIC: + hdev->nic_client = client; + vport->nic.client = client; + + ret = client->ops->init_instance(&vport->nic); + if (ret) + goto err; + + break; + case HNAE3_CLIENT_ROCE: + if (hnae_get_bit(hdev->ae_dev->flag, + HNAE_DEV_SUPPORT_ROCE_B)) { + hdev->roce_client = client; + vport->roce.client = client; + } + + if (hdev->roce_client) { + ret = hclge_init_roce_base_info(vport); + if (ret) + goto err; + + ret = client->ops->init_instance(&vport->roce); + if (ret) + goto err; + } + } + } + + return 0; +err: + return ret; +} + +static void hclge_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_vport *vport; + int i; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + vport = &hdev->vport[i]; + if (hdev->roce_client) + hdev->roce_client->ops->uninit_instance(&vport->roce, + 0); + if (client->type == HNAE3_CLIENT_ROCE) + return; + if (client->ops->uninit_instance) + client->ops->uninit_instance(&vport->nic, 0); + } +} + +static int hclge_pci_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_hw *hw; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "failed to enable PCI device\n"); + goto err_no_drvdata; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, + "can't set consistent PCI DMA"); + goto err_disable_device; + } + dev_warn(&pdev->dev, "set DMA mask to 32 bits\n"); + } + + ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "PCI request regions failed %d\n", ret); + goto err_disable_device; + } + + pci_set_master(pdev); + hw = &hdev->hw; + hw->back = hdev; + hw->io_base = pcim_iomap(pdev, 2, 0); + if (!hw->io_base) { + dev_err(&pdev->dev, "Can't map configuration register space\n"); + ret = -ENOMEM; + goto err_clr_master; + } + + return 0; +err_clr_master: + pci_clear_master(pdev); + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); +err_no_drvdata: + pci_set_drvdata(pdev, NULL); + + return ret; +} + +static void hclge_pci_uninit(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + if (hdev->flag & HCLGE_FLAG_USE_MSIX) { + pci_disable_msix(pdev); + devm_kfree(&pdev->dev, hdev->msix_entries); + hdev->msix_entries = NULL; + } else { + pci_disable_msi(pdev); + } + + pci_clear_master(pdev); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); +} + +static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct pci_dev *pdev = ae_dev->pdev; + const struct pci_device_id *id; + struct hclge_dev *hdev; + int ret; + + hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); + if (!hdev) { + ret = -ENOMEM; + goto err_hclge_dev; + } + + hdev->flag |= HCLGE_FLAG_USE_MSIX; + hdev->pdev = pdev; + hdev->ae_dev = ae_dev; + ae_dev->priv = hdev; + + id = pci_match_id(roce_pci_tbl, ae_dev->pdev); + if (id) + hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1); + + ret = hclge_pci_init(hdev); + if (ret) { + dev_err(&pdev->dev, "PCI init failed\n"); + goto err_pci_init; + } + + /* Command queue initialize */ + ret = hclge_cmd_init(hdev); + if (ret) + goto err_cmd_init; + + ret = hclge_get_cap(hdev); + if (ret) { + dev_err(&pdev->dev, "get hw capabilty error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_configure(hdev); + if (ret) { + dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); + return ret; + } + + if (hdev->flag & HCLGE_FLAG_USE_MSIX) + ret = hclge_init_msix(hdev); + else + ret = hclge_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, "Init msix/msi error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_alloc_tqps(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_alloc_vport(hdev); + if (ret) { + dev_err(&pdev->dev, "Allocate vport error, ret = %d.\n", ret); + return ret; + } + + ret = hclge_mac_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret); + return ret; + } + ret = hclge_buffer_alloc(hdev); + if (ret) { + dev_err(&pdev->dev, "Buffer allocate fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX); + if (ret) { + dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_rss_init_hw(hdev); + if (ret) { + dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_init_vlan_config(hdev); + if (ret) { + dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); + return ret; + } + + ret = hclge_tm_schd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret); + return ret; + } + + setup_timer(&hdev->service_timer, hclge_service_timer, + (unsigned long)hdev); + INIT_WORK(&hdev->service_task, hclge_service_task); + + set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state); + set_bit(HCLGE_STATE_DOWN, &hdev->state); + + pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); + return 0; + +err_cmd_init: + pci_release_regions(pdev); +err_pci_init: + pci_set_drvdata(pdev, NULL); +err_hclge_dev: + return ret; +} + +static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + struct hclge_mac *mac = &hdev->hw.mac; + + set_bit(HCLGE_STATE_DOWN, &hdev->state); + +#ifdef CONFIG_PCI_IOV + hclge_disable_sriov(hdev); +#endif + + if (hdev->service_timer.data) + del_timer_sync(&hdev->service_timer); + if (hdev->service_task.func) + cancel_work_sync(&hdev->service_task); + + if (mac->phydev) + mdiobus_unregister(mac->mdio_bus); + + hclge_destroy_cmd_queue(&hdev->hw); + hclge_pci_uninit(hdev); + ae_dev->priv = NULL; +} + +static const struct hnae3_ae_ops hclge_ops = { + .init_ae_dev = hclge_init_ae_dev, + .uninit_ae_dev = hclge_uninit_ae_dev, + .init_client_instance = hclge_init_client_instance, + .uninit_client_instance = hclge_uninit_client_instance, + .map_ring_to_vector = hclge_map_handle_ring_to_vector, + .unmap_ring_from_vector = hclge_unmap_ring_from_vector, + .get_vector = hclge_get_vector, + .set_promisc_mode = hclge_set_promisc_mode, + .start = hclge_ae_start, + .stop = hclge_ae_stop, + .get_status = hclge_get_status, + .get_ksettings_an_result = hclge_get_ksettings_an_result, + .update_speed_duplex_h = hclge_update_speed_duplex_h, + .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h, + .get_media_type = hclge_get_media_type, + .get_rss_key_size = hclge_get_rss_key_size, + .get_rss_indir_size = hclge_get_rss_indir_size, + .get_rss = hclge_get_rss, + .set_rss = hclge_set_rss, + .get_tc_size = hclge_get_tc_size, + .get_mac_addr = hclge_get_mac_addr, + .set_mac_addr = hclge_set_mac_addr, + .add_uc_addr = hclge_add_uc_addr, + .rm_uc_addr = hclge_rm_uc_addr, + .add_mc_addr = hclge_add_mc_addr, + .rm_mc_addr = hclge_rm_mc_addr, + .set_autoneg = hclge_set_autoneg, + .get_autoneg = hclge_get_autoneg, + .get_pauseparam = hclge_get_pauseparam, + .set_mtu = hclge_set_mtu, + .reset_queue = hclge_reset_tqp, + .get_stats = hclge_get_stats, + .update_stats = hclge_update_stats, + .get_strings = hclge_get_strings, + .get_sset_count = hclge_get_sset_count, + .get_fw_version = hclge_get_fw_version, + .get_mdix_mode = hclge_get_mdix_mode, + .set_vlan_filter = hclge_set_port_vlan_filter, + .set_vf_vlan_filter = hclge_set_vf_vlan_filter, +}; + +static struct hnae3_ae_algo ae_algo = { + .ops = &hclge_ops, + .name = HCLGE_NAME, + .pdev_id_table = ae_algo_pci_tbl, +}; + +static int hclge_init(void) +{ + pr_info("%s is initializing\n", HCLGE_NAME); + + return hnae3_register_ae_algo(&ae_algo); +} + +static void hclge_exit(void) +{ + hnae3_unregister_ae_algo(&ae_algo); +} +module_init(hclge_init); +module_exit(hclge_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); +MODULE_DESCRIPTION("HCLGE Driver"); +MODULE_VERSION(HCLGE_MOD_VERSION); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h new file mode 100644 index 000000000000..edb10ad075eb --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -0,0 +1,519 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_MAIN_H +#define __HCLGE_MAIN_H +#include +#include +#include +#include "hclge_cmd.h" +#include "hnae3.h" + +#define HCLGE_MOD_VERSION "v1.0" +#define HCLGE_DRIVER_NAME "hclge" + +#define HCLGE_INVALID_VPORT 0xffff + +#define HCLGE_ROCE_VECTOR_OFFSET 96 + +#define HCLGE_PF_CFG_BLOCK_SIZE 32 +#define HCLGE_PF_CFG_DESC_NUM \ + (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES) + +#define HCLGE_VECTOR_REG_BASE 0x20000 + +#define HCLGE_VECTOR_REG_OFFSET 0x4 +#define HCLGE_VECTOR_VF_OFFSET 0x100000 + +#define HCLGE_RSS_IND_TBL_SIZE 512 +#define HCLGE_RSS_SET_BITMAP_MSK 0xffff +#define HCLGE_RSS_KEY_SIZE 40 +#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0 +#define HCLGE_RSS_HASH_ALGO_SIMPLE 1 +#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2 +#define HCLGE_RSS_HASH_ALGO_MASK 0xf +#define HCLGE_RSS_CFG_TBL_NUM \ + (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE) + +#define HCLGE_RSS_TC_SIZE_0 1 +#define HCLGE_RSS_TC_SIZE_1 2 +#define HCLGE_RSS_TC_SIZE_2 4 +#define HCLGE_RSS_TC_SIZE_3 8 +#define HCLGE_RSS_TC_SIZE_4 16 +#define HCLGE_RSS_TC_SIZE_5 32 +#define HCLGE_RSS_TC_SIZE_6 64 +#define HCLGE_RSS_TC_SIZE_7 128 + +#define HCLGE_TQP_RESET_TRY_TIMES 10 + +#define HCLGE_PHY_PAGE_MDIX 0 +#define HCLGE_PHY_PAGE_COPPER 0 + +/* Page Selection Reg. */ +#define HCLGE_PHY_PAGE_REG 22 + +/* Copper Specific Control Register */ +#define HCLGE_PHY_CSC_REG 16 + +/* Copper Specific Status Register */ +#define HCLGE_PHY_CSS_REG 17 + +#define HCLGE_PHY_MDIX_CTRL_S (5) +#define HCLGE_PHY_MDIX_CTRL_M (3 << HCLGE_PHY_MDIX_CTRL_S) + +#define HCLGE_PHY_MDIX_STATUS_B (6) +#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11) + +enum HCLGE_DEV_STATE { + HCLGE_STATE_REINITING, + HCLGE_STATE_DOWN, + HCLGE_STATE_DISABLED, + HCLGE_STATE_REMOVING, + HCLGE_STATE_SERVICE_INITED, + HCLGE_STATE_SERVICE_SCHED, + HCLGE_STATE_MBX_HANDLING, + HCLGE_STATE_MBX_IRQ, + HCLGE_STATE_MAX +}; + +#define HCLGE_MPF_ENBALE 1 +struct hclge_caps { + u16 num_tqp; + u16 num_buffer_cell; + u32 flag; + u16 vmdq; +}; + +enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ + HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ + HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ + HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */ + HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */ + HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */ + HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */ + HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */ +}; + +enum HCLGE_MAC_DUPLEX { + HCLGE_MAC_HALF, + HCLGE_MAC_FULL +}; + +enum hclge_mta_dmac_sel_type { + HCLGE_MAC_ADDR_47_36, + HCLGE_MAC_ADDR_46_35, + HCLGE_MAC_ADDR_45_34, + HCLGE_MAC_ADDR_44_33, +}; + +struct hclge_mac { + u8 phy_addr; + u8 flag; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 autoneg; + u8 duplex; + u32 speed; + int link; /* store the link status of mac & phy (if phy exit)*/ + struct phy_device *phydev; + struct mii_bus *mdio_bus; + phy_interface_t phy_if; +}; + +struct hclge_hw { + void __iomem *io_base; + struct hclge_mac mac; + int num_vec; + struct hclge_cmq cmq; + struct hclge_caps caps; + void *back; +}; + +/* TQP stats */ +struct hlcge_tqp_stats { + /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */ + u64 rcb_tx_ring_pktnum_rcd; /* 32bit */ + /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */ + u64 rcb_rx_ring_pktnum_rcd; /* 32bit */ +}; + +struct hclge_tqp { + struct device *dev; /* Device for DMA mapping */ + struct hnae3_queue q; + struct hlcge_tqp_stats tqp_stats; + u16 index; /* Global index in a NIC controller */ + + bool alloced; +}; + +enum hclge_fc_mode { + HCLGE_FC_NONE, + HCLGE_FC_RX_PAUSE, + HCLGE_FC_TX_PAUSE, + HCLGE_FC_FULL, + HCLGE_FC_PFC, + HCLGE_FC_DEFAULT +}; + +#define HCLGE_PG_NUM 4 +#define HCLGE_SCH_MODE_SP 0 +#define HCLGE_SCH_MODE_DWRR 1 +struct hclge_pg_info { + u8 pg_id; + u8 pg_sch_mode; /* 0: sp; 1: dwrr */ + u8 tc_bit_map; + u32 bw_limit; + u8 tc_dwrr[HNAE3_MAX_TC]; +}; + +struct hclge_tc_info { + u8 tc_id; + u8 tc_sch_mode; /* 0: sp; 1: dwrr */ + u8 up; + u8 pgid; + u32 bw_limit; +}; + +struct hclge_cfg { + u8 vmdq_vport_num; + u8 tc_num; + u16 tqp_desc_num; + u16 rx_buf_len; + u8 phy_addr; + u8 media_type; + u8 mac_addr[ETH_ALEN]; + u8 default_speed; + u32 numa_node_map; +}; + +struct hclge_tm_info { + u8 num_tc; + u8 num_pg; /* It must be 1 if vNET-Base schd */ + u8 pg_dwrr[HCLGE_PG_NUM]; + struct hclge_pg_info pg_info[HCLGE_PG_NUM]; + struct hclge_tc_info tc_info[HNAE3_MAX_TC]; + enum hclge_fc_mode fc_mode; + u8 hw_pfc_map; /* Allow for packet drop or not on this TC */ +}; + +struct hclge_comm_stats_str { + char desc[ETH_GSTRING_LEN]; + unsigned long offset; +}; + +/* all 64bit stats, opcode id: 0x0030 */ +struct hclge_64_bit_stats { + /* query_igu_stat */ + u64 igu_rx_oversize_pkt; + u64 igu_rx_undersize_pkt; + u64 igu_rx_out_all_pkt; + u64 igu_rx_uni_pkt; + u64 igu_rx_multi_pkt; + u64 igu_rx_broad_pkt; + u64 rsv0; + + /* query_egu_stat */ + u64 egu_tx_out_all_pkt; + u64 egu_tx_uni_pkt; + u64 egu_tx_multi_pkt; + u64 egu_tx_broad_pkt; + + /* ssu_ppp packet stats */ + u64 ssu_ppp_mac_key_num; + u64 ssu_ppp_host_key_num; + u64 ppp_ssu_mac_rlt_num; + u64 ppp_ssu_host_rlt_num; + + /* ssu_tx_in_out_dfx_stats */ + u64 ssu_tx_in_num; + u64 ssu_tx_out_num; + /* ssu_rx_in_out_dfx_stats */ + u64 ssu_rx_in_num; + u64 ssu_rx_out_num; +}; + +/* all 32bit stats, opcode id: 0x0031 */ +struct hclge_32_bit_stats { + u64 igu_rx_err_pkt; + u64 igu_rx_no_eof_pkt; + u64 igu_rx_no_sof_pkt; + u64 egu_tx_1588_pkt; + u64 egu_tx_err_pkt; + u64 ssu_full_drop_num; + u64 ssu_part_drop_num; + u64 ppp_key_drop_num; + u64 ppp_rlt_drop_num; + u64 ssu_key_drop_num; + u64 pkt_curr_buf_cnt; + u64 qcn_fb_rcv_cnt; + u64 qcn_fb_drop_cnt; + u64 qcn_fb_invaild_cnt; + u64 rsv0; + u64 rx_packet_tc0_in_cnt; + u64 rx_packet_tc1_in_cnt; + u64 rx_packet_tc2_in_cnt; + u64 rx_packet_tc3_in_cnt; + u64 rx_packet_tc4_in_cnt; + u64 rx_packet_tc5_in_cnt; + u64 rx_packet_tc6_in_cnt; + u64 rx_packet_tc7_in_cnt; + u64 rx_packet_tc0_out_cnt; + u64 rx_packet_tc1_out_cnt; + u64 rx_packet_tc2_out_cnt; + u64 rx_packet_tc3_out_cnt; + u64 rx_packet_tc4_out_cnt; + u64 rx_packet_tc5_out_cnt; + u64 rx_packet_tc6_out_cnt; + u64 rx_packet_tc7_out_cnt; + + /* Tx packet level statistics */ + u64 tx_packet_tc0_in_cnt; + u64 tx_packet_tc1_in_cnt; + u64 tx_packet_tc2_in_cnt; + u64 tx_packet_tc3_in_cnt; + u64 tx_packet_tc4_in_cnt; + u64 tx_packet_tc5_in_cnt; + u64 tx_packet_tc6_in_cnt; + u64 tx_packet_tc7_in_cnt; + u64 tx_packet_tc0_out_cnt; + u64 tx_packet_tc1_out_cnt; + u64 tx_packet_tc2_out_cnt; + u64 tx_packet_tc3_out_cnt; + u64 tx_packet_tc4_out_cnt; + u64 tx_packet_tc5_out_cnt; + u64 tx_packet_tc6_out_cnt; + u64 tx_packet_tc7_out_cnt; + + /* packet buffer statistics */ + u64 pkt_curr_buf_tc0_cnt; + u64 pkt_curr_buf_tc1_cnt; + u64 pkt_curr_buf_tc2_cnt; + u64 pkt_curr_buf_tc3_cnt; + u64 pkt_curr_buf_tc4_cnt; + u64 pkt_curr_buf_tc5_cnt; + u64 pkt_curr_buf_tc6_cnt; + u64 pkt_curr_buf_tc7_cnt; + + u64 mb_uncopy_num; + u64 lo_pri_unicast_rlt_drop_num; + u64 hi_pri_multicast_rlt_drop_num; + u64 lo_pri_multicast_rlt_drop_num; + u64 rx_oq_drop_pkt_cnt; + u64 tx_oq_drop_pkt_cnt; + u64 nic_l2_err_drop_pkt_cnt; + u64 roc_l2_err_drop_pkt_cnt; +}; + +/* mac stats ,opcode id: 0x0032 */ +struct hclge_mac_stats { + u64 mac_tx_mac_pause_num; + u64 mac_rx_mac_pause_num; + u64 mac_tx_pfc_pri0_pkt_num; + u64 mac_tx_pfc_pri1_pkt_num; + u64 mac_tx_pfc_pri2_pkt_num; + u64 mac_tx_pfc_pri3_pkt_num; + u64 mac_tx_pfc_pri4_pkt_num; + u64 mac_tx_pfc_pri5_pkt_num; + u64 mac_tx_pfc_pri6_pkt_num; + u64 mac_tx_pfc_pri7_pkt_num; + u64 mac_rx_pfc_pri0_pkt_num; + u64 mac_rx_pfc_pri1_pkt_num; + u64 mac_rx_pfc_pri2_pkt_num; + u64 mac_rx_pfc_pri3_pkt_num; + u64 mac_rx_pfc_pri4_pkt_num; + u64 mac_rx_pfc_pri5_pkt_num; + u64 mac_rx_pfc_pri6_pkt_num; + u64 mac_rx_pfc_pri7_pkt_num; + u64 mac_tx_total_pkt_num; + u64 mac_tx_total_oct_num; + u64 mac_tx_good_pkt_num; + u64 mac_tx_bad_pkt_num; + u64 mac_tx_good_oct_num; + u64 mac_tx_bad_oct_num; + u64 mac_tx_uni_pkt_num; + u64 mac_tx_multi_pkt_num; + u64 mac_tx_broad_pkt_num; + u64 mac_tx_undersize_pkt_num; + u64 mac_tx_overrsize_pkt_num; + u64 mac_tx_64_oct_pkt_num; + u64 mac_tx_65_127_oct_pkt_num; + u64 mac_tx_128_255_oct_pkt_num; + u64 mac_tx_256_511_oct_pkt_num; + u64 mac_tx_512_1023_oct_pkt_num; + u64 mac_tx_1024_1518_oct_pkt_num; + u64 mac_tx_1519_max_oct_pkt_num; + u64 mac_rx_total_pkt_num; + u64 mac_rx_total_oct_num; + u64 mac_rx_good_pkt_num; + u64 mac_rx_bad_pkt_num; + u64 mac_rx_good_oct_num; + u64 mac_rx_bad_oct_num; + u64 mac_rx_uni_pkt_num; + u64 mac_rx_multi_pkt_num; + u64 mac_rx_broad_pkt_num; + u64 mac_rx_undersize_pkt_num; + u64 mac_rx_overrsize_pkt_num; + u64 mac_rx_64_oct_pkt_num; + u64 mac_rx_65_127_oct_pkt_num; + u64 mac_rx_128_255_oct_pkt_num; + u64 mac_rx_256_511_oct_pkt_num; + u64 mac_rx_512_1023_oct_pkt_num; + u64 mac_rx_1024_1518_oct_pkt_num; + u64 mac_rx_1519_max_oct_pkt_num; + + u64 mac_trans_fragment_pkt_num; + u64 mac_trans_undermin_pkt_num; + u64 mac_trans_jabber_pkt_num; + u64 mac_trans_err_all_pkt_num; + u64 mac_trans_from_app_good_pkt_num; + u64 mac_trans_from_app_bad_pkt_num; + u64 mac_rcv_fragment_pkt_num; + u64 mac_rcv_undermin_pkt_num; + u64 mac_rcv_jabber_pkt_num; + u64 mac_rcv_fcs_err_pkt_num; + u64 mac_rcv_send_app_good_pkt_num; + u64 mac_rcv_send_app_bad_pkt_num; +}; + +struct hclge_hw_stats { + struct hclge_mac_stats mac_stats; + struct hclge_64_bit_stats all_64_bit_stats; + struct hclge_32_bit_stats all_32_bit_stats; +}; + +struct hclge_dev { + struct pci_dev *pdev; + struct hnae3_ae_dev *ae_dev; + struct hclge_hw hw; + struct hclge_hw_stats hw_stats; + unsigned long state; + + u32 fw_version; + u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ + u16 num_tqps; /* Num task queue pairs of this PF */ + u16 num_req_vfs; /* Num VFs requested for this PF */ + + u16 num_roce_msix; /* Num of roce vectors for this PF */ + int roce_base_vector; + + /* Base task tqp physical id of this PF */ + u16 base_tqp_pid; + u16 alloc_rss_size; /* Allocated RSS task queue */ + u16 rss_size_max; /* HW defined max RSS task queue */ + + /* Num of guaranteed filters for this PF */ + u16 fdir_pf_filter_count; + u16 num_alloc_vport; /* Num vports this driver supports */ + u32 numa_node_mask; + u16 rx_buf_len; + u16 num_desc; + u8 hw_tc_map; + u8 tc_num_last_time; + enum hclge_fc_mode fc_mode_last_time; + +#define HCLGE_FLAG_TC_BASE_SCH_MODE 1 +#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 + u8 tx_sch_mode; + + u8 default_up; + struct hclge_tm_info tm_info; + + u16 num_msi; + u16 num_msi_left; + u16 num_msi_used; + u32 base_msi_vector; + struct msix_entry *msix_entries; + u16 *vector_status; + + u16 pending_udp_bitmap; + + u16 rx_itr_default; + u16 tx_itr_default; + + u16 adminq_work_limit; /* Num of admin receive queue desc to process */ + unsigned long service_timer_period; + unsigned long service_timer_previous; + struct timer_list service_timer; + struct work_struct service_task; + + bool cur_promisc; + int num_alloc_vfs; /* Actual number of VFs allocated */ + + struct hclge_tqp *htqp; + struct hclge_vport *vport; + + struct dentry *hclge_dbgfs; + + struct hnae3_client *nic_client; + struct hnae3_client *roce_client; + +#define HCLGE_FLAG_USE_MSI 0x00000001 +#define HCLGE_FLAG_USE_MSIX 0x00000002 +#define HCLGE_FLAG_MAIN 0x00000004 +#define HCLGE_FLAG_DCB_CAPABLE 0x00000008 +#define HCLGE_FLAG_DCB_ENABLE 0x00000010 + u32 flag; + + u32 pkt_buf_size; /* Total pf buf size for tx/rx */ + u32 mps; /* Max packet size */ + struct hclge_priv_buf *priv_buf; + struct hclge_shared_buf s_buf; + + enum hclge_mta_dmac_sel_type mta_mac_sel_type; + bool enable_mta; /* Mutilcast filter enable */ + bool accept_mta_mc; /* Whether accept mta filter multicast */ +}; + +struct hclge_vport { + u16 alloc_tqps; /* Allocated Tx/Rx queues */ + + u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ + /* User configured lookup table entries */ + u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; + + u16 qs_offset; + u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u8 dwrr; + + int vport_id; + struct hclge_dev *back; /* Back reference to associated dev */ + struct hnae3_handle nic; + struct hnae3_handle roce; +}; + +void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, + bool en_mc, bool en_bc, int vport_id); + +int hclge_add_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_uc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_add_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); +int hclge_rm_mc_addr_common(struct hclge_vport *vport, + const unsigned char *addr); + +int hclge_cfg_func_mta_filter(struct hclge_dev *hdev, + u8 func_id, + bool enable); +struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle); +int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector, + struct hnae3_ring_chain_node *ring_chain); +static inline int hclge_get_queue_id(struct hnae3_queue *queue) +{ + struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); + + return tqp->index; +} + +int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); +int hclge_set_vf_vlan_common(struct hclge_dev *vport, int vfid, + bool is_kill, u16 vlan, u8 qos, __be16 proto); +#endif -- cgit v1.2.3-55-g7522 From 848440544b41fbe21f36072ee7dc7c3c59ce62e2 Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:49 +0100 Subject: net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver THis patch adds the support of the Scheduling and Shaping functionalities during the transmit leg. This also adds the support of Pause at MAC level. (Pause at per-priority level shall be added later along with the DCB feature). Hardware as such consists of two types of cofiguration of 6 level schedulers. Algorithms varies according to the level and type of scheduler being used. Current patch is used to initialize the mapping, algorithms(like SP, DWRR etc) and shaper(CIR, PIR etc) being used. Signed-off-by: Daode Huang Signed-off-by: lipeng Signed-off-by: Salil Mehta Signed-off-by: Yisen Zhuang Signed-off-by: Wei Hu (Xavier) Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 1015 ++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h | 106 ++ 2 files changed, 1121 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c new file mode 100644 index 000000000000..1c577d268f00 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -0,0 +1,1015 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_tm.h" + +enum hclge_shaper_level { + HCLGE_SHAPER_LVL_PRI = 0, + HCLGE_SHAPER_LVL_PG = 1, + HCLGE_SHAPER_LVL_PORT = 2, + HCLGE_SHAPER_LVL_QSET = 3, + HCLGE_SHAPER_LVL_CNT = 4, + HCLGE_SHAPER_LVL_VF = 0, + HCLGE_SHAPER_LVL_PF = 1, +}; + +#define HCLGE_SHAPER_BS_U_DEF 1 +#define HCLGE_SHAPER_BS_S_DEF 4 + +#define HCLGE_ETHER_MAX_RATE 100000 + +/* hclge_shaper_para_calc: calculate ir parameter for the shaper + * @ir: Rate to be config, its unit is Mbps + * @shaper_level: the shaper level. eg: port, pg, priority, queueset + * @ir_b: IR_B parameter of IR shaper + * @ir_u: IR_U parameter of IR shaper + * @ir_s: IR_S parameter of IR shaper + * + * the formula: + * + * IR_b * (2 ^ IR_u) * 8 + * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) + * Tick * (2 ^ IR_s) + * + * @return: 0: calculate sucessful, negative: fail + */ +static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, + u8 *ir_b, u8 *ir_u, u8 *ir_s) +{ + const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { + 6 * 256, /* Prioriy level */ + 6 * 32, /* Prioriy group level */ + 6 * 8, /* Port level */ + 6 * 256 /* Qset level */ + }; + u8 ir_u_calc = 0, ir_s_calc = 0; + u32 ir_calc; + u32 tick; + + /* Calc tick */ + if (shaper_level >= HCLGE_SHAPER_LVL_CNT) + return -EINVAL; + + tick = tick_array[shaper_level]; + + /** + * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 + * the formula is changed to: + * 126 * 1 * 8 + * ir_calc = ---------------- * 1000 + * tick * 1 + */ + ir_calc = (1008000 + (tick >> 1) - 1) / tick; + + if (ir_calc == ir) { + *ir_b = 126; + *ir_u = 0; + *ir_s = 0; + + return 0; + } else if (ir_calc > ir) { + /* Increasing the denominator to select ir_s value */ + while (ir_calc > ir) { + ir_s_calc++; + ir_calc = 1008000 / (tick * (1 << ir_s_calc)); + } + + if (ir_calc == ir) + *ir_b = 126; + else + *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; + } else { + /* Increasing the numerator to select ir_u value */ + u32 numerator; + + while (ir_calc < ir) { + ir_u_calc++; + numerator = 1008000 * (1 << ir_u_calc); + ir_calc = (numerator + (tick >> 1)) / tick; + } + + if (ir_calc == ir) { + *ir_b = 126; + } else { + u32 denominator = (8000 * (1 << --ir_u_calc)); + *ir_b = (ir * tick + (denominator >> 1)) / denominator; + } + } + + *ir_u = ir_u_calc; + *ir_s = ir_s_calc; + + return 0; +} + +static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); + + desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | + (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) +{ + u8 tc; + + for (tc = 0; tc < hdev->tm_info.num_tc; tc++) + if (hdev->tm_info.tc_info[tc].up == pri_id) + break; + + if (tc >= hdev->tm_info.num_tc) + return -EINVAL; + + /** + * the register for priority has four bytes, the first bytes includes + * priority0 and priority1, the higher 4bit stands for priority1 + * while the lower 4bit stands for priority0, as below: + * first byte: | pri_1 | pri_0 | + * second byte: | pri_3 | pri_2 | + * third byte: | pri_5 | pri_4 | + * fourth byte: | pri_7 | pri_6 | + */ + pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); + + return 0; +} + +static int hclge_up_to_tc_map(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + u8 *pri = (u8 *)desc.data; + u8 pri_id; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); + + for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { + ret = hclge_fill_pri_array(hdev, pri, pri_id); + if (ret) + return ret; + } + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, + u8 pg_id, u8 pri_bit_map) +{ + struct hclge_pg_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); + + map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + + map->pg_id = pg_id; + map->pri_bit_map = pri_bit_map; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, + u16 qs_id, u8 pri) +{ + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); + + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + + map->qs_id = cpu_to_le16(qs_id); + map->priority = pri; + map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, + u8 q_id, u16 qs_id) +{ + struct hclge_nq_to_qs_link_cmd *map; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); + + map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + + map->nq_id = cpu_to_le16(q_id); + map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, + u8 dwrr) +{ + struct hclge_pg_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); + + weight = (struct hclge_pg_weight_cmd *)desc.data; + + weight->pg_id = pg_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, + u8 dwrr) +{ + struct hclge_priority_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); + + weight = (struct hclge_priority_weight_cmd *)desc.data; + + weight->pri_id = pri_id; + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, + u8 dwrr) +{ + struct hclge_qs_weight_cmd *weight; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); + + weight = (struct hclge_qs_weight_cmd *)desc.data; + + weight->qs_id = cpu_to_le16(qs_id); + weight->dwrr = dwrr; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pg_id, + u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) +{ + struct hclge_pg_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : + HCLGE_OPC_TM_PG_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + + shap_cfg_cmd->pg_id = pg_id; + + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); + hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, + enum hclge_shap_bucket bucket, u8 pri_id, + u8 ir_b, u8 ir_u, u8 ir_s, + u8 bs_b, u8 bs_s) +{ + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + enum hclge_opcode_type opcode; + struct hclge_desc desc; + + opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : + HCLGE_OPC_TM_PRI_C_SHAPPING; + + hclge_cmd_setup_basic_desc(&desc, opcode, false); + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + + shap_cfg_cmd->pri_id = pri_id; + + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); + hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); + + if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pg_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(pri_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); + + if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) + desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); + else + desc.data[1] = 0; + + desc.data[0] = cpu_to_le32(qs_id); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, + false); + + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + + bp_to_qs_map_cmd->tc_id = tc; + + /* Qset and tc is one by one mapping */ + bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + u8 i; + + kinfo = &vport->nic.kinfo; + vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; + kinfo->num_tc = + min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); + kinfo->rss_size + = min_t(u16, hdev->rss_size_max, + kinfo->num_tqps / kinfo->num_tc); + vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; + vport->dwrr = 100; /* 100 percent as init */ + + for (i = 0; i < kinfo->num_tc; i++) { + if (hdev->hw_tc_map & BIT(i)) { + kinfo->tc_info[i].enable = true; + kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; + kinfo->tc_info[i].tqp_count = kinfo->rss_size; + kinfo->tc_info[i].tc = i; + kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up; + } else { + /* Set to default queue if TC is disable */ + kinfo->tc_info[i].enable = false; + kinfo->tc_info[i].tqp_offset = 0; + kinfo->tc_info[i].tqp_count = 1; + kinfo->tc_info[i].tc = 0; + kinfo->tc_info[i].up = 0; + } + } +} + +static void hclge_tm_vport_info_update(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_tm_vport_tc_info_update(vport); + + vport++; + } +} + +static void hclge_tm_tc_info_init(struct hclge_dev *hdev) +{ + u8 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + hdev->tm_info.tc_info[i].tc_id = i; + hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].up = i; + hdev->tm_info.tc_info[i].pgid = 0; + hdev->tm_info.tc_info[i].bw_limit = + hdev->tm_info.pg_info[0].bw_limit; + } + + hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; +} + +static void hclge_tm_pg_info_init(struct hclge_dev *hdev) +{ + u8 i; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + int k; + + hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; + + hdev->tm_info.pg_info[i].pg_id = i; + hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; + + hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; + + if (i != 0) + continue; + + hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; + for (k = 0; k < hdev->tm_info.num_tc; k++) + hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; + } +} + +static int hclge_tm_schd_info_init(struct hclge_dev *hdev) +{ + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tm_info.num_pg != 1)) + return -EINVAL; + + hclge_tm_pg_info_init(hdev); + + hclge_tm_tc_info_init(hdev); + + hclge_tm_vport_info_update(hdev); + + hdev->tm_info.fc_mode = HCLGE_FC_NONE; + hdev->fc_mode_last_time = hdev->tm_info.fc_mode; + + return 0; +} + +static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg mapping */ + ret = hclge_tm_pg_to_pri_map_cfg( + hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) +{ + u8 ir_u, ir_b, ir_s; + int ret; + u32 i; + + /* Cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* Pg to pri */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Calc shaper para */ + ret = hclge_shaper_para_calc( + hdev->tm_info.pg_info[i].bw_limit, + HCLGE_SHAPER_LVL_PG, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_C_BUCKET, i, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pg_shapping_cfg(hdev, + HCLGE_TM_SHAP_P_BUCKET, i, + ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + u32 i; + + /* cfg pg schd */ + if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) + return 0; + + /* pg to prio */ + for (i = 0; i < hdev->tm_info.num_pg; i++) { + /* Cfg dwrr */ + ret = hclge_tm_pg_weight_cfg(hdev, i, + hdev->tm_info.pg_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, + struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hnae3_queue **tqp = kinfo->tqp; + struct hnae3_tc_info *v_tc_info; + u32 i, j; + int ret; + + for (i = 0; i < kinfo->num_tc; i++) { + v_tc_info = &kinfo->tc_info[i]; + for (j = 0; j < v_tc_info->tqp_count; j++) { + struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; + + ret = hclge_tm_q_to_qs_map_cfg(hdev, + hclge_get_queue_id(q), + vport->qs_offset + i); + if (ret) + return ret; + } + } + + return 0; +} + +static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + /* Cfg qs -> pri mapping, one by one mapping */ + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i); + if (ret) + return ret; + } + } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { + int k; + /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ + for (k = 0; k < hdev->num_alloc_vport; k++) + for (i = 0; i < HNAE3_MAX_TC; i++) { + ret = hclge_tm_qs_to_pri_map_cfg( + hdev, vport[k].qs_offset + i, k); + if (ret) + return ret; + } + } else { + return -EINVAL; + } + + /* Cfg q -> qs mapping */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_vport_q_to_qs_map(hdev, vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) +{ + u8 ir_u, ir_b, ir_s; + int ret; + u32 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_shaper_para_calc( + hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_PRI, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg( + hdev, HCLGE_TM_SHAP_C_BUCKET, i, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg( + hdev, HCLGE_TM_SHAP_P_BUCKET, i, + ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) +{ + struct hclge_dev *hdev = vport->back; + u8 ir_u, ir_b, ir_s; + int ret; + + ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, + vport->vport_id, + 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, + vport->vport_id, + ir_b, ir_u, ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + if (ret) + return ret; + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + struct hnae3_tc_info *v_tc_info; + u8 ir_u, ir_b, ir_s; + u32 i; + int ret; + + for (i = 0; i < kinfo->num_tc; i++) { + v_tc_info = &kinfo->tc_info[i]; + ret = hclge_shaper_para_calc( + hdev->tm_info.tc_info[i].bw_limit, + HCLGE_SHAPER_LVL_QSET, + &ir_b, &ir_u, &ir_s); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + /* Need config vport shaper */ + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); + if (ret) + return ret; + + ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_pg_info *pg_info; + u8 dwrr; + int ret; + u32 i; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + pg_info = + &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; + dwrr = pg_info->tc_dwrr[i]; + + ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + + ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + /* Vf dwrr */ + ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); + if (ret) + return ret; + + /* Qset dwrr */ + for (i = 0; i < kinfo->num_tc; i++) { + ret = hclge_tm_qs_weight_cfg( + hdev, vport->qs_offset + i, + hdev->tm_info.pg_info[0].tc_dwrr[i]); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u32 i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); + if (ret) + return ret; + + vport++; + } + + return 0; +} + +static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); + if (ret) + return ret; + } else { + ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_map_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_to_pri_map(hdev); + if (ret) + return ret; + + return hclge_tm_pri_q_qs_cfg(hdev); +} + +static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_shaper_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_shaper_cfg(hdev); +} + +int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_pg_dwrr_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_pri_dwrr_cfg(hdev); +} + +static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) +{ + int ret; + u8 i; + + /* Only being config on TC-Based scheduler mode */ + if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) + return 0; + + for (i = 0; i < hdev->tm_info.num_pg; i++) { + ret = hclge_tm_pg_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) +{ + struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; + struct hclge_dev *hdev = vport->back; + int ret; + u8 i; + + ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); + if (ret) + return ret; + + for (i = 0; i < kinfo->num_tc; i++) { + ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i); + if (ret) + return ret; + } + + return 0; +} + +static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int ret; + u8 i; + + if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_pri_schd_mode_cfg(hdev, i); + if (ret) + return ret; + + ret = hclge_tm_qs_schd_mode_cfg(hdev, i); + if (ret) + return ret; + } + } else { + for (i = 0; i < hdev->num_alloc_vport; i++) { + ret = hclge_tm_schd_mode_vnet_base_cfg(vport); + if (ret) + return ret; + + vport++; + } + } + + return 0; +} + +static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) +{ + int ret; + + ret = hclge_tm_lvl2_schd_mode_cfg(hdev); + if (ret) + return ret; + + return hclge_tm_lvl34_schd_mode_cfg(hdev); +} + +static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) +{ + int ret; + + /* Cfg tm mapping */ + ret = hclge_tm_map_cfg(hdev); + if (ret) + return ret; + + /* Cfg tm shaper */ + ret = hclge_tm_shaper_cfg(hdev); + if (ret) + return ret; + + /* Cfg dwrr */ + ret = hclge_tm_dwrr_cfg(hdev); + if (ret) + return ret; + + /* Cfg schd mode for each level schd */ + return hclge_tm_schd_mode_hw(hdev); +} + +int hclge_pause_setup_hw(struct hclge_dev *hdev) +{ + bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC; + int ret; + u8 i; + + ret = hclge_mac_pause_en_cfg(hdev, en, en); + if (ret) + return ret; + + for (i = 0; i < hdev->tm_info.num_tc; i++) { + ret = hclge_tm_qs_bp_cfg(hdev, i); + if (ret) + return ret; + } + + return hclge_up_to_tc_map(hdev); +} + +int hclge_tm_init_hw(struct hclge_dev *hdev) +{ + int ret; + + if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && + (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) + return -ENOTSUPP; + + ret = hclge_tm_schd_setup_hw(hdev); + if (ret) + return ret; + + ret = hclge_pause_setup_hw(hdev); + if (ret) + return ret; + + return 0; +} + +int hclge_tm_schd_init(struct hclge_dev *hdev) +{ + int ret = hclge_tm_schd_info_init(hdev); + + if (ret) + return ret; + + return hclge_tm_init_hw(hdev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h new file mode 100644 index 000000000000..7e67337dfaf2 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_TM_H +#define __HCLGE_TM_H + +#include + +/* MAC Pause */ +#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0) +#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1) + +#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0) + +/* SP or DWRR */ +#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0) +#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE) + +struct hclge_pg_to_pri_link_cmd { + u8 pg_id; + u8 rsvd1[3]; + u8 pri_bit_map; +}; + +struct hclge_qs_to_pri_link_cmd { + __le16 qs_id; + __le16 rsvd; + u8 priority; +#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0) + u8 link_vld; +}; + +struct hclge_nq_to_qs_link_cmd { + __le16 nq_id; + __le16 rsvd; +#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10) + __le16 qset_id; +}; + +struct hclge_pg_weight_cmd { + u8 pg_id; + u8 dwrr; +}; + +struct hclge_priority_weight_cmd { + u8 pri_id; + u8 dwrr; +}; + +struct hclge_qs_weight_cmd { + __le16 qs_id; + u8 dwrr; +}; + +#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) +#define HCLGE_TM_SHAP_IR_B_LSH 0 +#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) +#define HCLGE_TM_SHAP_IR_U_LSH 8 +#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12) +#define HCLGE_TM_SHAP_IR_S_LSH 12 +#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16) +#define HCLGE_TM_SHAP_BS_B_LSH 16 +#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21) +#define HCLGE_TM_SHAP_BS_S_LSH 21 + +enum hclge_shap_bucket { + HCLGE_TM_SHAP_C_BUCKET = 0, + HCLGE_TM_SHAP_P_BUCKET, +}; + +struct hclge_pri_shapping_cmd { + u8 pri_id; + u8 rsvd[3]; + __le32 pri_shapping_para; +}; + +struct hclge_pg_shapping_cmd { + u8 pg_id; + u8 rsvd[3]; + __le32 pg_shapping_para; +}; + +struct hclge_bp_to_qs_map_cmd { + u8 tc_id; + u8 rsvd[2]; + u8 qs_group_id; + __le32 qs_bit_map; + u32 rsvd1; +}; + +#define hclge_tm_set_feild(dest, string, val) \ + hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH), val) +#define hclge_tm_get_feild(src, string) \ + hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ + (HCLGE_TM_SHAP_##string##_LSH)) + +int hclge_tm_schd_init(struct hclge_dev *hdev); +int hclge_pause_setup_hw(struct hclge_dev *hdev); +#endif -- cgit v1.2.3-55-g7522 From 256727da73951b0cbb97105db921bd9e8221aec9 Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:50 +0100 Subject: net: hns3: Add MDIO support to HNS3 Ethernet driver for hip08 SoC This patch adds the support of MDIO bus interface for HNS3 driver. Code provides various interfaces to start and stop the PHY layer and to read and write the MDIO bus or PHY. Signed-off-by: Daode Huang Signed-off-by: lipeng Signed-off-by: Salil Mehta Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 213 +++++++++++++++++++++ .../ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h | 17 ++ 2 files changed, 230 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c new file mode 100644 index 000000000000..a2add8bb1945 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include + +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_mdio.h" + +enum hclge_mdio_c22_op_seq { + HCLGE_MDIO_C22_WRITE = 1, + HCLGE_MDIO_C22_READ = 2 +}; + +#define HCLGE_MDIO_CTRL_START_B 0 +#define HCLGE_MDIO_CTRL_ST_S 1 +#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S) +#define HCLGE_MDIO_CTRL_OP_S 3 +#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S) + +#define HCLGE_MDIO_PHYID_S 0 +#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S) + +#define HCLGE_MDIO_PHYREG_S 0 +#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S) + +#define HCLGE_MDIO_STA_B 0 + +struct hclge_mdio_cfg_cmd { + u8 ctrl_bit; + u8 phyid; + u8 phyad; + u8 rsvd; + __le16 reserve; + __le16 data_wr; + __le16 data_rd; + __le16 sta; +}; + +static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum, + u16 data) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); + + hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE); + + mdio_cmd->data_wr = cpu_to_le16(data); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio write fail when sending cmd, status is %d.\n", + ret); + return ret; + } + + return 0; +} + +static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) +{ + struct hclge_mdio_cfg_cmd *mdio_cmd; + struct hclge_dev *hdev = bus->priv; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true); + + mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data; + + hnae_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M, + HCLGE_MDIO_PHYID_S, phyid); + hnae_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M, + HCLGE_MDIO_PHYREG_S, regnum); + + hnae_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M, + HCLGE_MDIO_CTRL_ST_S, 1); + hnae_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M, + HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ); + + /* Read out phy data */ + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "mdio read fail when get data, status is %d.\n", + ret); + return ret; + } + + if (hnae_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) { + dev_err(&hdev->pdev->dev, "mdio read data error\n"); + return -EIO; + } + + return le16_to_cpu(mdio_cmd->data_rd); +} + +int hclge_mac_mdio_config(struct hclge_dev *hdev) +{ + struct hclge_mac *mac = &hdev->hw.mac; + struct phy_device *phydev; + struct mii_bus *mdio_bus; + int ret; + + if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) + return 0; + + mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev); + if (!mdio_bus) + return -ENOMEM; + + mdio_bus->name = "hisilicon MII bus"; + mdio_bus->read = hclge_mdio_read; + mdio_bus->write = hclge_mdio_write; + snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii", + dev_name(&hdev->pdev->dev)); + + mdio_bus->parent = &hdev->pdev->dev; + mdio_bus->priv = hdev; + mdio_bus->phy_mask = ~(1 << mac->phy_addr); + ret = mdiobus_register(mdio_bus); + if (ret) { + dev_err(mdio_bus->parent, + "Failed to register MDIO bus ret = %#x\n", ret); + return ret; + } + + phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr); + if (!phydev || IS_ERR(phydev)) { + dev_err(mdio_bus->parent, "Failed to get phy device\n"); + mdiobus_unregister(mdio_bus); + return -EIO; + } + + mac->phydev = phydev; + mac->mdio_bus = mdio_bus; + + return 0; +} + +static void hclge_mac_adjust_link(struct net_device *netdev) +{ + struct hnae3_handle *h = *((void **)netdev_priv(netdev)); + struct hclge_vport *vport = hclge_get_vport(h); + struct hclge_dev *hdev = vport->back; + int duplex, speed; + int ret; + + speed = netdev->phydev->speed; + duplex = netdev->phydev->duplex; + + ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); + if (ret) + netdev_err(netdev, "failed to adjust link.\n"); +} + +int hclge_mac_start_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = hdev->hw.mac.phydev; + int ret; + + if (!phydev) + return 0; + + ret = phy_connect_direct(netdev, phydev, + hclge_mac_adjust_link, + PHY_INTERFACE_MODE_SGMII); + if (ret) { + netdev_err(netdev, "phy_connect_direct err.\n"); + return ret; + } + + phy_start(phydev); + + return 0; +} + +void hclge_mac_stop_phy(struct hclge_dev *hdev) +{ + struct net_device *netdev = hdev->vport[0].nic.netdev; + struct phy_device *phydev = netdev->phydev; + + if (!phydev) + return; + + phy_stop(phydev); + phy_disconnect(phydev); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h new file mode 100644 index 000000000000..c5e91cfb8f2c --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2016-2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __HCLGE_MDIO_H +#define __HCLGE_MDIO_H + +int hclge_mac_mdio_config(struct hclge_dev *hdev); +int hclge_mac_start_phy(struct hclge_dev *hdev); +void hclge_mac_stop_phy(struct hclge_dev *hdev); + +#endif -- cgit v1.2.3-55-g7522 From 496d03e960ae2067b81c26a9ea46a028e9d4055d Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:51 +0100 Subject: net: hns3: Add Ethtool support to HNS3 driver This patch adds the support of the Ethtool interface to the HNS3 Ethernet driver. Various commands to read the statistics, configure the offloading, loopback selftest etc. are supported. Signed-off-by: Daode Huang Signed-off-by: lipeng Signed-off-by: Salil Mehta Signed-off-by: Yisen Zhuang Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 482 +++++++++++++++++++++ 1 file changed, 482 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c new file mode 100644 index 000000000000..0ad65e47c77e --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2016~2017 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include + +#include "hns3_enet.h" + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_size; + int stats_offset; +}; + +/* tqp related stats */ +#define HNS3_TQP_STAT(_string, _member) { \ + .stats_string = _string, \ + .stats_size = FIELD_SIZEOF(struct ring_stats, _member), \ + .stats_offset = offsetof(struct hns3_enet_ring, stats), \ +} \ + +static const struct hns3_stats hns3_txq_stats[] = { + /* Tx per-queue statistics */ + HNS3_TQP_STAT("tx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("tx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("tx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("tx_pkts", tx_pkts), + HNS3_TQP_STAT("tx_bytes", tx_bytes), + HNS3_TQP_STAT("tx_err_cnt", tx_err_cnt), + HNS3_TQP_STAT("tx_restart_queue", restart_queue), + HNS3_TQP_STAT("tx_busy", tx_busy), +}; + +#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) + +static const struct hns3_stats hns3_rxq_stats[] = { + /* Rx per-queue statistics */ + HNS3_TQP_STAT("rx_io_err_cnt", io_err_cnt), + HNS3_TQP_STAT("rx_sw_err_cnt", sw_err_cnt), + HNS3_TQP_STAT("rx_seg_pkt_cnt", seg_pkt_cnt), + HNS3_TQP_STAT("rx_pkts", rx_pkts), + HNS3_TQP_STAT("rx_bytes", rx_bytes), + HNS3_TQP_STAT("rx_err_cnt", rx_err_cnt), + HNS3_TQP_STAT("rx_reuse_pg_cnt", reuse_pg_cnt), + HNS3_TQP_STAT("rx_err_pkt_len", err_pkt_len), + HNS3_TQP_STAT("rx_non_vld_descs", non_vld_descs), + HNS3_TQP_STAT("rx_err_bd_num", err_bd_num), + HNS3_TQP_STAT("rx_l2_err", l2_err), + HNS3_TQP_STAT("rx_l3l4_csum_err", l3l4_csum_err), +}; + +#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) + +#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT) + +struct hns3_link_mode_mapping { + u32 hns3_link_mode; + u32 ethtool_link_mode; +}; + +static const struct hns3_link_mode_mapping hns3_lm_map[] = { + {HNS3_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT}, + {HNS3_LM_AUTONEG_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT}, + {HNS3_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT}, + {HNS3_LM_PAUSE_BIT, ETHTOOL_LINK_MODE_Pause_BIT}, + {HNS3_LM_BACKPLANE_BIT, ETHTOOL_LINK_MODE_Backplane_BIT}, + {HNS3_LM_10BASET_HALF_BIT, ETHTOOL_LINK_MODE_10baseT_Half_BIT}, + {HNS3_LM_10BASET_FULL_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT}, + {HNS3_LM_100BASET_HALF_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT}, + {HNS3_LM_100BASET_FULL_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT}, + {HNS3_LM_1000BASET_FULL_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT}, +}; + +static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd, + bool is_advertised) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(hns3_lm_map); i++) { + if (!(caps & hns3_lm_map[i].hns3_link_mode)) + continue; + + if (is_advertised) { + ethtool_link_ksettings_zero_link_mode(cmd, + advertising); + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.advertising); + } else { + ethtool_link_ksettings_zero_link_mode(cmd, + supported); + __set_bit(hns3_lm_map[i].ethtool_link_mode, + cmd->link_modes.supported); + } + } +} + +static int hns3_get_sset_count(struct net_device *netdev, int stringset) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + + if (!ops->get_sset_count) + return -EOPNOTSUPP; + + switch (stringset) { + case ETH_SS_STATS: + return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) + + ops->get_sset_count(h, stringset)); + + case ETH_SS_TEST: + return ops->get_sset_count(h, stringset); + } + + return 0; +} + +static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats, + u32 stat_count, u32 num_tqps) +{ +#define MAX_PREFIX_SIZE (8 + 4) + u32 size_left; + u32 i, j; + u32 n1; + + for (i = 0; i < num_tqps; i++) { + for (j = 0; j < stat_count; j++) { + data[ETH_GSTRING_LEN - 1] = '\0'; + + /* first, prepend the prefix string */ + n1 = snprintf(data, MAX_PREFIX_SIZE, "rcb_q%d_", i); + n1 = min_t(uint, n1, MAX_PREFIX_SIZE - 1); + size_left = (ETH_GSTRING_LEN - 1) - n1; + + /* now, concatenate the stats string to it */ + strncat(data, stats[j].stats_string, size_left); + data += ETH_GSTRING_LEN; + } + } + + return data; +} + +static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data) +{ + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + + /* get strings for Tx */ + data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT, + kinfo->num_tqps); + + /* get strings for Rx */ + data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT, + kinfo->num_tqps); + + return data; +} + +static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + char *buff = (char *)data; + + if (!ops->get_strings) + return; + + switch (stringset) { + case ETH_SS_STATS: + buff = hns3_get_strings_tqps(h, buff); + h->ae_algo->ops->get_strings(h, stringset, (u8 *)buff); + break; + case ETH_SS_TEST: + ops->get_strings(h, stringset, data); + break; + } +} + +static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) +{ + struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_enet_ring *ring; + u8 *stat; + u32 i; + + /* get stats for Tx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i].ring; + for (i = 0; i < HNS3_TXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_txq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + /* get stats for Rx */ + for (i = 0; i < kinfo->num_tqps; i++) { + ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; + for (i = 0; i < HNS3_RXQ_STATS_COUNT; i++) { + stat = (u8 *)ring + hns3_rxq_stats[i].stats_offset; + *data++ = *(u64 *)stat; + } + } + + return data; +} + +/* hns3_get_stats - get detail statistics. + * @netdev: net device + * @stats: statistics info. + * @data: statistics data. + */ +void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *data) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u64 *p = data; + + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { + netdev_err(netdev, "could not get any statistics\n"); + return; + } + + h->ae_algo->ops->update_stats(h, &netdev->stats); + + /* get per-queue stats */ + p = hns3_get_stats_tqps(h, p); + + /* get MAC & other misc hardware stats */ + h->ae_algo->ops->get_stats(h, p); +} + +static void hns3_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + strncpy(drvinfo->version, hns3_driver_version, + sizeof(drvinfo->version)); + drvinfo->version[sizeof(drvinfo->version) - 1] = '\0'; + + strncpy(drvinfo->driver, h->pdev->driver->name, + sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strncpy(drvinfo->bus_info, pci_name(h->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0'; + + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", + priv->ae_handle->ae_algo->ops->get_fw_version(h)); +} + +static u32 hns3_get_link(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h; + + h = priv->ae_handle; + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status) + return h->ae_algo->ops->get_status(h); + else + return 0; +} + +static void hns3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + int queue_num = priv->ae_handle->kinfo.num_tqps; + + param->tx_max_pending = HNS3_RING_MAX_PENDING; + param->rx_max_pending = HNS3_RING_MAX_PENDING; + + param->tx_pending = priv->ring_data[0].ring->desc_num; + param->rx_pending = priv->ring_data[queue_num].ring->desc_num; +} + +static void hns3_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *param) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam) + h->ae_algo->ops->get_pauseparam(h, ¶m->autoneg, + ¶m->rx_pause, ¶m->tx_pause); +} + +static int hns3_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u32 supported_caps; + u32 advertised_caps; + u8 media_type; + u8 link_stat; + u8 auto_neg; + u8 duplex; + u32 speed; + + if (!h->ae_algo || !h->ae_algo->ops) + return -EOPNOTSUPP; + + /* 1.auto_neg&speed&duplex from cmd */ + if (h->ae_algo->ops->get_ksettings_an_result) { + h->ae_algo->ops->get_ksettings_an_result(h, &auto_neg, + &speed, &duplex); + cmd->base.autoneg = auto_neg; + cmd->base.speed = speed; + cmd->base.duplex = duplex; + + link_stat = hns3_get_link(netdev); + if (!link_stat) { + cmd->base.speed = (u32)SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + } + + /* 2.media_type get from bios parameter block */ + if (h->ae_algo->ops->get_media_type) + h->ae_algo->ops->get_media_type(h, &media_type); + + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + supported_caps = HNS3_LM_FIBRE_BIT | HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + supported_caps = HNS3_LM_TP_BIT | HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT; + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + supported_caps = HNS3_LM_BACKPLANE_BIT | HNS3_LM_PAUSE_BIT | + HNS3_LM_AUTONEG_BIT | HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + cmd->base.port = PORT_OTHER; + supported_caps = 0; + advertised_caps = 0; + break; + } + + /* now, map driver link modes to ethtool link modes */ + hns3_driv_to_eth_caps(supported_caps, cmd, false); + hns3_driv_to_eth_caps(advertised_caps, cmd, true); + + /* 3.mdix_ctrl&mdix get from phy reg */ + if (h->ae_algo->ops->get_mdix_mode) + h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, + &cmd->base.eth_tp_mdix); + /* 4.mdio_support */ + cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; + + return 0; +} + +static u32 hns3_get_rss_key_size(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_key_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_key_size(h); +} + +static u32 hns3_get_rss_indir_size(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || + !h->ae_algo->ops->get_rss_indir_size) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss_indir_size(h); +} + +static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss) + return -EOPNOTSUPP; + + return h->ae_algo->ops->get_rss(h, indir, key, hfunc); +} + +static int hns3_set_rss(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss) + return -EOPNOTSUPP; + + /* currently we only support Toeplitz hash */ + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP)) { + netdev_err(netdev, + "hash func not supported (only Toeplitz hash)\n"); + return -EOPNOTSUPP; + } + if (!indir) { + netdev_err(netdev, + "set rss failed for indir is empty\n"); + return -EOPNOTSUPP; + } + + return h->ae_algo->ops->set_rss(h, indir, key, hfunc); +} + +static int hns3_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + + if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = h->ae_algo->ops->get_tc_size(h); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct ethtool_ops hns3_ethtool_ops = { + .get_drvinfo = hns3_get_drvinfo, + .get_link = hns3_get_link, + .get_ringparam = hns3_get_ringparam, + .get_pauseparam = hns3_get_pauseparam, + .get_strings = hns3_get_strings, + .get_ethtool_stats = hns3_get_stats, + .get_sset_count = hns3_get_sset_count, + .get_rxnfc = hns3_get_rxnfc, + .get_rxfh_key_size = hns3_get_rss_key_size, + .get_rxfh_indir_size = hns3_get_rss_indir_size, + .get_rxfh = hns3_get_rss, + .set_rxfh = hns3_set_rss, + .get_link_ksettings = hns3_get_link_ksettings, +}; + +void hns3_ethtool_set_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &hns3_ethtool_ops; +} -- cgit v1.2.3-55-g7522 From 15e8e5ffd624702ba2fa0d27566069922561ae95 Mon Sep 17 00:00:00 2001 From: Salil Date: Wed, 2 Aug 2017 16:59:52 +0100 Subject: net: hns3: Add HNS3 driver to kernel build framework & MAINTAINERS This patch updates the MAINTAINERS file with HNS3 Ethernet driver maintainers names and other details. This also introduces the new Makefiles required to build the HNS3 Ethernet driver and updates the existing Kconfig file in the hisilicon folder. Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- MAINTAINERS | 8 +++++++ drivers/net/ethernet/hisilicon/Kconfig | 27 ++++++++++++++++++++++ drivers/net/ethernet/hisilicon/Makefile | 1 + drivers/net/ethernet/hisilicon/hns3/Makefile | 7 ++++++ .../net/ethernet/hisilicon/hns3/hns3pf/Makefile | 11 +++++++++ 5 files changed, 54 insertions(+) create mode 100644 drivers/net/ethernet/hisilicon/hns3/Makefile create mode 100644 drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile diff --git a/MAINTAINERS b/MAINTAINERS index c67618c97c5d..a515da73c7e4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6148,6 +6148,14 @@ S: Maintained F: drivers/net/ethernet/hisilicon/ F: Documentation/devicetree/bindings/net/hisilicon*.txt +HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) +M: Yisen Zhuang +M: Salil Mehta +L: netdev@vger.kernel.org +W: http://www.hisilicon.com +S: Maintained +F: drivers/net/ethernet/hisilicon/hns3/ + HISILICON ROCE DRIVER M: Lijun Ou M: Wei Hu(Xavier) diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index d11287e11371..91c7bdb9b43c 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -76,4 +76,31 @@ config HNS_ENET This selects the general ethernet driver for HNS. This module make use of any HNS AE driver, such as HNS_DSAF +config HNS3 + tristate "Hisilicon Network Subsystem Support HNS3 (Framework)" + depends on PCI + ---help--- + This selects the framework support for Hisilicon Network Subsystem 3. + This layer facilitates clients like ENET, RoCE and user-space ethernet + drivers(like ODP)to register with HNAE devices and their associated + operations. + +config HNS3_HCLGE + tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" + depends on PCI_MSI + depends on HNS3 + ---help--- + This selects the HNS3_HCLGE network acceleration engine & its hardware + compatibility layer. The engine would be used in Hisilicon hip08 family of + SoCs and further upcoming SoCs. + +config HNS3_ENET + tristate "Hisilicon HNS3 Ethernet Device Support" + depends on 64BIT && PCI + depends on HNS3 && HNS3_HCLGE + ---help--- + This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 + family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 + devices and their associated operations. + endif # NET_VENDOR_HISILICON diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile index 8661695024dc..3828c435c18f 100644 --- a/drivers/net/ethernet/hisilicon/Makefile +++ b/drivers/net/ethernet/hisilicon/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o obj-$(CONFIG_HIP04_ETH) += hip04_eth.o obj-$(CONFIG_HNS_MDIO) += hns_mdio.o obj-$(CONFIG_HNS) += hns/ +obj-$(CONFIG_HNS3) += hns3/ obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile new file mode 100644 index 000000000000..a9349e1f3e51 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the HISILICON network device drivers. +# + +obj-$(CONFIG_HNS3) += hns3pf/ + +obj-$(CONFIG_HNS3) += hnae3.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile new file mode 100644 index 000000000000..162e8a42acd0 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -0,0 +1,11 @@ +# +# Makefile for the HISILICON network device drivers. +# + +ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o + +obj-$(CONFIG_HNS3_ENET) += hns3.o +hns3-objs = hns3_enet.o hns3_ethtool.o -- cgit v1.2.3-55-g7522 From e11e8729fafd3dc49397c28f320ac3b2d483abbc Mon Sep 17 00:00:00 2001 From: Romain Perier Date: Thu, 3 Aug 2017 09:49:03 +0200 Subject: net: arc_emac: Add support for ndo_do_ioctl net_device_ops operation This operation is required for handling ioctl commands like SIOCGMIIREG, when debugging MDIO registers from userspace. This commit adds support for this operation. Signed-off-by: Romain Perier Signed-off-by: David S. Miller --- drivers/net/ethernet/arc/emac_main.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 68de2f2652f2..3241af1ce718 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -720,6 +720,18 @@ static int arc_emac_set_address(struct net_device *ndev, void *p) return 0; } +static int arc_emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + if (!netif_running(dev)) + return -EINVAL; + + if (!dev->phydev) + return -ENODEV; + + return phy_mii_ioctl(dev->phydev, rq, cmd); +} + + static const struct net_device_ops arc_emac_netdev_ops = { .ndo_open = arc_emac_open, .ndo_stop = arc_emac_stop, @@ -727,6 +739,7 @@ static const struct net_device_ops arc_emac_netdev_ops = { .ndo_set_mac_address = arc_emac_set_address, .ndo_get_stats = arc_emac_stats, .ndo_set_rx_mode = arc_emac_set_rx_mode, + .ndo_do_ioctl = arc_emac_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = arc_emac_poll_controller, #endif -- cgit v1.2.3-55-g7522 From eb1e93a10ef5bedfc15d1404dc6a2c46d843cde9 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 3 Aug 2017 10:41:55 +0200 Subject: net: mvpp2: fix MVPP21_ISR_RXQ_GROUP_REG definition The MVPP21_ISR_RXQ_GROUP_REG register is not indexed by rxq, but by port, so we fix the parameter name accordingly. There are no functional changes. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 48d21c1e09f2..4b36a153328a 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -185,7 +185,7 @@ /* Interrupt Cause and Mask registers */ #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 -#define MVPP21_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) #define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -- cgit v1.2.3-55-g7522 From b5635ad2e4906e8f8bd3ab145059ce413c03a7ad Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 3 Aug 2017 10:41:56 +0200 Subject: net: mvpp2: remove RX queue group reset code The RX queue group allocation is anyway re-done later in mvpp2_port_init(), so resetting it in mvpp2_init() is not very useful, and will be annoying as we are going to rework the RX queue group allocation logic. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 4b36a153328a..537d2b4adef3 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -6845,23 +6845,6 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) /* Rx Fifo Init */ mvpp2_rx_fifo_init(priv); - /* Reset Rx queue group interrupt configuration */ - for (i = 0; i < MVPP2_MAX_PORTS; i++) { - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(i), - rxq_number); - continue; - } else { - u32 val; - - val = (i << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } - } - if (priv->hw_version == MVPP21) writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT, priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG); -- cgit v1.2.3-55-g7522 From 09f8397553a24c66e0141ec57f2c1801acd4e4a0 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 3 Aug 2017 10:41:57 +0200 Subject: net: mvpp2: introduce per-port nrxqs/ntxqs variables Currently, the global variables rxq_number and txq_number hold the number of per-port TXQs and RXQs. Until now, such numbers were constant regardless of the driver configuration. As we are going to introduce different modes for TX and RX queues, these numbers will depend on the configuration (PPv2.1 vs. PPv2.2, exact queue distribution logic). Therefore, as a preparation, we move the number of RXQs and TXQs in the 'struct mvpp2_port' structure, next to the RXQs and TXQs descriptor arrays. For now, they remain initialized to the same default values as rxq_number/txq_number used to be initialized, but this will change in future commits. The only non-mechanical change in this patch is that the check to verify hardware constraints on the number of RXQs and TXQs is moved from mvpp2_probe() to mvpp2_port_probe(), since it's now in mvpp2_port_probe() that we initialize the per-port count of RXQ and TXQ. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 83 ++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 537d2b4adef3..84908aa88374 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -768,7 +768,9 @@ struct mvpp2_port { void __iomem *base; struct mvpp2_rx_queue **rxqs; + unsigned int nrxqs; struct mvpp2_tx_queue **txqs; + unsigned int ntxqs; struct net_device *dev; int pkt_size; @@ -1062,13 +1064,6 @@ struct mvpp2_bm_pool { u32 port_map; }; -/* Static declaractions */ - -/* Number of RXQs used by single port */ -static int rxq_number = MVPP2_DEFAULT_RXQ; -/* Number of TXQs used by single port */ -static int txq_number = MVPP2_MAX_TXQ; - #define MVPP2_DRIVER_NAME "mvpp2" #define MVPP2_DRIVER_VERSION "1.0" @@ -4070,7 +4065,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) port->pool_long->port_map |= (1 << port->id); - for (rxq = 0; rxq < rxq_number; rxq++) + for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); } @@ -4084,7 +4079,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) port->pool_short->port_map |= (1 << port->id); - for (rxq = 0; rxq < rxq_number; rxq++) + for (rxq = 0; rxq < port->nrxqs; rxq++) mvpp2_rxq_short_pool_set(port, rxq, port->pool_short->id); } @@ -4376,7 +4371,7 @@ static void mvpp2_defaults_set(struct mvpp2_port *port) MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); /* Enable Rx cache snoop */ - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_SNOOP_PKT_SIZE_MASK | @@ -4394,7 +4389,7 @@ static void mvpp2_ingress_enable(struct mvpp2_port *port) u32 val; int lrxq, queue; - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val &= ~MVPP2_RXQ_DISABLE_MASK; @@ -4407,7 +4402,7 @@ static void mvpp2_ingress_disable(struct mvpp2_port *port) u32 val; int lrxq, queue; - for (lrxq = 0; lrxq < rxq_number; lrxq++) { + for (lrxq = 0; lrxq < port->nrxqs; lrxq++) { queue = port->rxqs[lrxq]->id; val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); val |= MVPP2_RXQ_DISABLE_MASK; @@ -4426,7 +4421,7 @@ static void mvpp2_egress_enable(struct mvpp2_port *port) /* Enable all initialized TXs. */ qmap = 0; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; if (txq->descs) @@ -4712,7 +4707,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg) struct mvpp2_port *port = arg; int queue; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { int id = port->txqs[queue]->id; mvpp2_percpu_read(port->priv, smp_processor_id(), @@ -4753,7 +4748,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); } - for (txq = 0; txq < txq_number; txq++) { + for (txq = 0; txq < port->ntxqs; txq++) { val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; @@ -5229,7 +5224,7 @@ static void mvpp2_cleanup_txqs(struct mvpp2_port *port) val |= MVPP2_TX_PORT_FLUSH_MASK(port->id); mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val); - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; mvpp2_txq_clean(port, txq); mvpp2_txq_deinit(port, txq); @@ -5246,7 +5241,7 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port) { int queue; - for (queue = 0; queue < rxq_number; queue++) + for (queue = 0; queue < port->nrxqs; queue++) mvpp2_rxq_deinit(port, port->rxqs[queue]); } @@ -5255,7 +5250,7 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port) { int queue, err; - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { err = mvpp2_rxq_init(port, port->rxqs[queue]); if (err) goto err_cleanup; @@ -5273,7 +5268,7 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) struct mvpp2_tx_queue *txq; int queue, err; - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { txq = port->txqs[queue]; err = mvpp2_txq_init(port, txq); if (err) @@ -5385,7 +5380,7 @@ static void mvpp2_tx_proc_cb(unsigned long data) port_pcpu->timer_scheduled = false; /* Process all the Tx queues */ - cause = (1 << txq_number) - 1; + cause = (1 << port->ntxqs) - 1; tx_todo = mvpp2_tx_done(port, cause); /* Set the timer in case not all the packets were processed */ @@ -6228,7 +6223,7 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, struct mvpp2_port *port = netdev_priv(dev); int queue; - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->time_coal = c->rx_coalesce_usecs; @@ -6237,7 +6232,7 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, mvpp2_rx_time_coal_set(port, rxq); } - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; @@ -6373,15 +6368,20 @@ static int mvpp2_port_init(struct mvpp2_port *port) struct mvpp2_txq_pcpu *txq_pcpu; int queue, cpu, err; - if (port->first_rxq + rxq_number > + /* Checks for hardware constraints */ + if (port->first_rxq + port->nrxqs > MVPP2_MAX_PORTS * priv->max_port_rxqs) return -EINVAL; + if (port->nrxqs % 4 || (port->nrxqs > priv->max_port_rxqs) || + (port->ntxqs > MVPP2_MAX_TXQ)) + return -EINVAL; + /* Disable port */ mvpp2_egress_disable(port); mvpp2_port_disable(port); - port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs), + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), GFP_KERNEL); if (!port->txqs) return -ENOMEM; @@ -6389,7 +6389,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) /* Associate physical Tx queues to this port and initialize. * The mapping is predefined. */ - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { int queue_phy_id = mvpp2_txq_phys(port->id, queue); struct mvpp2_tx_queue *txq; @@ -6416,7 +6416,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) port->txqs[queue] = txq; } - port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs), + port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs), GFP_KERNEL); if (!port->rxqs) { err = -ENOMEM; @@ -6424,7 +6424,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) } /* Allocate and initialize Rx queue for this port */ - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq; /* Map physical Rx queue to port's logical Rx queue */ @@ -6444,19 +6444,19 @@ static int mvpp2_port_init(struct mvpp2_port *port) /* Configure Rx queue group interrupt for this port */ if (priv->hw_version == MVPP21) { mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), - rxq_number); + port->nrxqs); } else { u32 val; val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - val = (rxq_number << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); + val = (port->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); } /* Create Rx descriptor rings */ - for (queue = 0; queue < rxq_number; queue++) { + for (queue = 0; queue < port->nrxqs; queue++) { struct mvpp2_rx_queue *rxq = port->rxqs[queue]; rxq->size = port->rx_ring_size; @@ -6484,7 +6484,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) return 0; err_free_percpu: - for (queue = 0; queue < txq_number; queue++) { + for (queue = 0; queue < port->ntxqs; queue++) { if (!port->txqs[queue]) continue; free_percpu(port->txqs[queue]->pcpu); @@ -6505,12 +6505,16 @@ static int mvpp2_port_probe(struct platform_device *pdev, const char *dt_mac_addr; const char *mac_from; char hw_mac_addr[ETH_ALEN]; + unsigned int ntxqs, nrxqs; u32 id; int features; int phy_mode; int err, i, cpu; - dev = alloc_etherdev_mqs(sizeof(*port), txq_number, rxq_number); + ntxqs = MVPP2_MAX_TXQ; + nrxqs = MVPP2_DEFAULT_RXQ; + + dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); if (!dev) return -ENOMEM; @@ -6540,6 +6544,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->ethtool_ops = &mvpp2_eth_tool_ops; port = netdev_priv(dev); + port->ntxqs = ntxqs; + port->nrxqs = nrxqs; port->irq = irq_of_parse_and_map(port_node, 0); if (port->irq <= 0) { @@ -6553,7 +6559,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->priv = priv; port->id = id; if (priv->hw_version == MVPP21) - port->first_rxq = port->id * rxq_number; + port->first_rxq = port->id * port->nrxqs; else port->first_rxq = port->id * priv->max_port_rxqs; @@ -6662,7 +6668,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, err_free_port_pcpu: free_percpu(port->pcpu); err_free_txq_pcpu: - for (i = 0; i < txq_number; i++) + for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); err_free_stats: free_percpu(port->stats); @@ -6683,7 +6689,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) of_node_put(port->phy_node); free_percpu(port->pcpu); free_percpu(port->stats); - for (i = 0; i < txq_number; i++) + for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); irq_dispose_mapping(port->irq); free_netdev(port->dev); @@ -6800,13 +6806,6 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) int err, i; u32 val; - /* Checks for hardware constraints */ - if (rxq_number % 4 || (rxq_number > priv->max_port_rxqs) || - (txq_number > MVPP2_MAX_TXQ)) { - dev_err(&pdev->dev, "invalid queue size parameter\n"); - return -EINVAL; - } - /* MBUS windows configuration */ dram_target_info = mv_mbus_dram_info(); if (dram_target_info) -- cgit v1.2.3-55-g7522 From df089aa0acd75bb605e4cce72982942173e742ad Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 3 Aug 2017 10:41:58 +0200 Subject: net: mvpp2: move from cpu-centric naming to "software thread" naming The PPv2.2 IP has a concept of "software thread", with all registers of the PPv2.2 mapped 8 times, for concurrent accesses by 8 "software threads". In addition, interrupts on RX queues are associated to such "software thread". For most cases, we map a "software thread" to the more conventional concept of CPU, but we will soon have one exception: we will have a model where we have one TX interrupt per CPU (each using one software thread), and all RX events mapped to another software thread (associated to another interrupt). In preparation for this change, it makes sense to change the naming from MVPP2_MAX_CPUS to MVPP2_MAX_THREADS, and plan for 8 software threads instead of 4 currently. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 84908aa88374..af38a215f0e0 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -685,7 +685,7 @@ enum mvpp2_prs_l3_cast { #define MVPP21_ADDR_SPACE_SZ 0 #define MVPP22_ADDR_SPACE_SZ SZ_64K -#define MVPP2_MAX_CPUS 4 +#define MVPP2_MAX_THREADS 8 enum mvpp2_bm_type { MVPP2_BM_FREE, @@ -701,11 +701,12 @@ struct mvpp2 { void __iomem *lms_base; void __iomem *iface_base; - /* On PPv2.2, each CPU can access the base register through a - * separate address space, each 64 KB apart from each - * other. + /* On PPv2.2, each "software thread" can access the base + * register through a separate address space, each 64 KB apart + * from each other. Typically, such address spaces will be + * used per CPU. */ - void __iomem *cpu_base[MVPP2_MAX_CPUS]; + void __iomem *swth_base[MVPP2_MAX_THREADS]; /* Common clocks */ struct clk *pp_clk; @@ -1071,12 +1072,12 @@ struct mvpp2_bm_pool { static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data) { - writel(data, priv->cpu_base[0] + offset); + writel(data, priv->swth_base[0] + offset); } static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) { - return readl(priv->cpu_base[0] + offset); + return readl(priv->swth_base[0] + offset); } /* These accessors should be used to access: @@ -1118,13 +1119,13 @@ static u32 mvpp2_read(struct mvpp2 *priv, u32 offset) static void mvpp2_percpu_write(struct mvpp2 *priv, int cpu, u32 offset, u32 data) { - writel(data, priv->cpu_base[cpu] + offset); + writel(data, priv->swth_base[cpu] + offset); } static u32 mvpp2_percpu_read(struct mvpp2 *priv, int cpu, u32 offset) { - return readl(priv->cpu_base[cpu] + offset); + return readl(priv->swth_base[cpu] + offset); } static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, @@ -6874,7 +6875,7 @@ static int mvpp2_probe(struct platform_device *pdev) struct mvpp2 *priv; struct resource *res; void __iomem *base; - int port_count, cpu; + int port_count, i; int err; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); @@ -6901,12 +6902,12 @@ static int mvpp2_probe(struct platform_device *pdev) return PTR_ERR(priv->iface_base); } - for_each_present_cpu(cpu) { + for (i = 0; i < MVPP2_MAX_THREADS; i++) { u32 addr_space_sz; addr_space_sz = (priv->hw_version == MVPP21 ? MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ); - priv->cpu_base[cpu] = base + cpu * addr_space_sz; + priv->swth_base[i] = base + i * addr_space_sz; } if (priv->hw_version == MVPP21) -- cgit v1.2.3-55-g7522 From 591f4cfab38a6e69573210b21c0554e1acbbd532 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 3 Aug 2017 10:41:59 +0200 Subject: net: mvpp2: introduce queue_vector concept In preparation to the introduction of TX interrupts and improved RX queue distribution, this commit introduces the concept of "queue vector". A queue vector represents a number of RX and/or TX queues, and an associated NAPI instance and interrupt. This commit currently only creates a single queue_vector, so there are no changes in behavior, but it paves the way for additional queue_vector in the next commits. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 223 ++++++++++++++++++++++++++--------- 1 file changed, 169 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index af38a215f0e0..1bf327271cee 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -686,6 +686,7 @@ enum mvpp2_prs_l3_cast { #define MVPP22_ADDR_SPACE_SZ SZ_64K #define MVPP2_MAX_THREADS 8 +#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS enum mvpp2_bm_type { MVPP2_BM_FREE, @@ -753,6 +754,18 @@ struct mvpp2_port_pcpu { struct tasklet_struct tx_done_tasklet; }; +struct mvpp2_queue_vector { + int irq; + struct napi_struct napi; + enum { MVPP2_QUEUE_VECTOR_SHARED, MVPP2_QUEUE_VECTOR_PRIVATE } type; + int sw_thread_id; + u16 sw_thread_mask; + int first_rxq; + int nrxqs; + u32 pending_cause_rx; + struct mvpp2_port *port; +}; + struct mvpp2_port { u8 id; @@ -761,8 +774,6 @@ struct mvpp2_port { */ int gop_id; - int irq; - struct mvpp2 *priv; /* Per-port registers' base address */ @@ -776,9 +787,6 @@ struct mvpp2_port { int pkt_size; - u32 pending_cause_rx; - struct napi_struct napi; - /* Per-CPU port control */ struct mvpp2_port_pcpu __percpu *pcpu; @@ -800,6 +808,9 @@ struct mvpp2_port { /* Index of first port's physical RXQ */ u8 first_rxq; + + struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; + unsigned int nqvecs; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the @@ -4121,22 +4132,40 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) static inline void mvpp2_interrupts_enable(struct mvpp2_port *port) { - int cpu, cpu_mask = 0; + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; - for_each_present_cpu(cpu) - cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask)); + MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask)); } static inline void mvpp2_interrupts_disable(struct mvpp2_port *port) { - int cpu, cpu_mask = 0; + int i, sw_thread_mask = 0; + + for (i = 0; i < port->nqvecs; i++) + sw_thread_mask |= port->qvecs[i].sw_thread_mask; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; - for_each_present_cpu(cpu) - cpu_mask |= 1 << cpu; mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), - MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); + MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask)); +} + +static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec) +{ + struct mvpp2_port *port = qvec->port; + + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask)); } /* Mask the current CPU's Rx/Tx interrupts @@ -5287,11 +5316,11 @@ err_cleanup: /* The callback for per-port interrupt */ static irqreturn_t mvpp2_isr(int irq, void *dev_id) { - struct mvpp2_port *port = (struct mvpp2_port *)dev_id; + struct mvpp2_queue_vector *qv = dev_id; - mvpp2_interrupts_disable(port); + mvpp2_qvec_interrupt_disable(qv); - napi_schedule(&port->napi); + napi_schedule(&qv->napi); return IRQ_HANDLED; } @@ -5494,8 +5523,8 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb) } /* Main rx processing */ -static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, - struct mvpp2_rx_queue *rxq) +static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, + int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; int rx_received; @@ -5573,7 +5602,7 @@ err_drop_frame: skb->protocol = eth_type_trans(skb, dev); mvpp2_rx_csum(port, rx_status, skb); - napi_gro_receive(&port->napi, skb); + napi_gro_receive(napi, skb); } if (rcvd_pkts) { @@ -5782,8 +5811,11 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) u32 cause_rx_tx, cause_rx, cause_misc; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); + struct mvpp2_queue_vector *qv; int cpu = smp_processor_id(); + qv = container_of(napi, struct mvpp2_queue_vector, napi); + /* Rx/Tx cause register * * Bits 0-15: each bit indicates received packets on the Rx queue @@ -5812,7 +5844,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; /* Process RX packets */ - cause_rx |= port->pending_cause_rx; + cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { int count; struct mvpp2_rx_queue *rxq; @@ -5821,7 +5853,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) if (!rxq) break; - count = mvpp2_rx(port, budget, rxq); + count = mvpp2_rx(port, napi, budget, rxq); rx_done += count; budget -= count; if (budget > 0) { @@ -5837,9 +5869,9 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx = 0; napi_complete_done(napi, rx_done); - mvpp2_interrupts_enable(port); + mvpp2_qvec_interrupt_enable(qv); } - port->pending_cause_rx = cause_rx; + qv->pending_cause_rx = cause_rx; return rx_done; } @@ -5847,11 +5879,13 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) static void mvpp2_start_dev(struct mvpp2_port *port) { struct net_device *ndev = port->dev; + int i; mvpp2_gmac_max_rx_size_set(port); mvpp2_txp_max_tx_size_set(port); - napi_enable(&port->napi); + for (i = 0; i < port->nqvecs; i++) + napi_enable(&port->qvecs[i].napi); /* Enable interrupts on all CPUs */ mvpp2_interrupts_enable(port); @@ -5865,6 +5899,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) static void mvpp2_stop_dev(struct mvpp2_port *port) { struct net_device *ndev = port->dev; + int i; /* Stop new packets from arriving to RXQs */ mvpp2_ingress_disable(port); @@ -5874,7 +5909,8 @@ static void mvpp2_stop_dev(struct mvpp2_port *port) /* Disable interrupts on all CPUs */ mvpp2_interrupts_disable(port); - napi_disable(&port->napi); + for (i = 0; i < port->nqvecs; i++) + napi_disable(&port->qvecs[i].napi); netif_carrier_off(port->dev); netif_tx_stop_all_queues(port->dev); @@ -5960,6 +5996,40 @@ static void mvpp2_phy_disconnect(struct mvpp2_port *port) phy_disconnect(ndev->phydev); } +static int mvpp2_irqs_init(struct mvpp2_port *port) +{ + int err, i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); + if (err) + goto err; + } + + return 0; +err: + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + free_irq(qv->irq, qv); + } + + return err; +} + +static void mvpp2_irqs_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + free_irq(qv->irq, qv); + } +} + static int mvpp2_open(struct net_device *dev) { struct mvpp2_port *port = netdev_priv(dev); @@ -6002,9 +6072,9 @@ static int mvpp2_open(struct net_device *dev) goto err_cleanup_rxqs; } - err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port); + err = mvpp2_irqs_init(port); if (err) { - netdev_err(port->dev, "cannot request IRQ %d\n", port->irq); + netdev_err(port->dev, "cannot init IRQs\n"); goto err_cleanup_txqs; } @@ -6023,7 +6093,7 @@ static int mvpp2_open(struct net_device *dev) return 0; err_free_irq: - free_irq(port->irq, port); + mvpp2_irqs_deinit(port); err_cleanup_txqs: mvpp2_cleanup_txqs(port); err_cleanup_rxqs: @@ -6043,7 +6113,7 @@ static int mvpp2_stop(struct net_device *dev) /* Mask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_mask, port, 1); - free_irq(port->irq, port); + mvpp2_irqs_deinit(port); for_each_present_cpu(cpu) { port_pcpu = per_cpu_ptr(port->pcpu, cpu); @@ -6361,6 +6431,66 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; +static int mvpp2_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v = &port->qvecs[0]; + + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + v->sw_thread_id = 0; + v->sw_thread_mask = *cpumask_bits(cpu_online_mask); + v->port = port; + v->irq = irq_of_parse_and_map(port_node, 0); + if (v->irq <= 0) + return -EINVAL; + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + + port->nqvecs = 1; + + return 0; +} + +static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) +{ + int i; + + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); +} + +/* Configure Rx queue group interrupt for this port */ +static void mvpp2_rx_irqs_setup(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + int i; + + if (priv->hw_version == MVPP21) { + mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), + port->nrxqs); + return; + } + + /* Handle the more complicated PPv2.2 case */ + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *qv = port->qvecs + i; + + if (!qv->nrxqs) + continue; + + val = qv->sw_thread_id; + val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); + + val = qv->first_rxq; + val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET; + mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); + } +} + /* Initialize port HW */ static int mvpp2_port_init(struct mvpp2_port *port) { @@ -6442,19 +6572,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) port->rxqs[queue] = rxq; } - /* Configure Rx queue group interrupt for this port */ - if (priv->hw_version == MVPP21) { - mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id), - port->nrxqs); - } else { - u32 val; - - val = (port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val); - - val = (port->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET); - mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val); - } + mvpp2_rx_irqs_setup(port); /* Create Rx descriptor rings */ for (queue = 0; queue < port->nrxqs; queue++) { @@ -6545,14 +6663,13 @@ static int mvpp2_port_probe(struct platform_device *pdev, dev->ethtool_ops = &mvpp2_eth_tool_ops; port = netdev_priv(dev); + port->dev = dev; port->ntxqs = ntxqs; port->nrxqs = nrxqs; - port->irq = irq_of_parse_and_map(port_node, 0); - if (port->irq <= 0) { - err = -EINVAL; + err = mvpp2_queue_vectors_init(port, port_node); + if (err) goto err_free_netdev; - } if (of_property_read_bool(port_node, "marvell,loopback")) port->flags |= MVPP2_F_LOOPBACK; @@ -6572,14 +6689,14 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(port->base)) { err = PTR_ERR(port->base); - goto err_free_irq; + goto err_deinit_qvecs; } } else { if (of_property_read_u32(port_node, "gop-port-id", &port->gop_id)) { err = -EINVAL; dev_err(&pdev->dev, "missing gop-port-id value\n"); - goto err_free_irq; + goto err_deinit_qvecs; } port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id); @@ -6589,7 +6706,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats); if (!port->stats) { err = -ENOMEM; - goto err_free_irq; + goto err_deinit_qvecs; } dt_mac_addr = of_get_mac_address(port_node); @@ -6610,7 +6727,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->tx_ring_size = MVPP2_MAX_TXD; port->rx_ring_size = MVPP2_MAX_RXD; - port->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); err = mvpp2_port_init(port); @@ -6645,7 +6761,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, (unsigned long)dev); } - netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); features = NETIF_F_SG | NETIF_F_IP_CSUM; dev->features = features | NETIF_F_RXCSUM; dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; @@ -6673,8 +6788,8 @@ err_free_txq_pcpu: free_percpu(port->txqs[i]->pcpu); err_free_stats: free_percpu(port->stats); -err_free_irq: - irq_dispose_mapping(port->irq); +err_deinit_qvecs: + mvpp2_queue_vectors_deinit(port); err_free_netdev: of_node_put(phy_node); free_netdev(dev); @@ -6692,7 +6807,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) free_percpu(port->stats); for (i = 0; i < port->ntxqs; i++) free_percpu(port->txqs[i]->pcpu); - irq_dispose_mapping(port->irq); + mvpp2_queue_vectors_deinit(port); free_netdev(port->dev); } -- cgit v1.2.3-55-g7522 From 213f428f5056affa627056a5953eec58e3adf5a3 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 3 Aug 2017 10:42:00 +0200 Subject: net: mvpp2: add support for TX interrupts and RX queue distribution modes This commit adds the support for two related features: - Support for TX interrupts, with one interrupt for each CPU - Support for different RX queue distribution modes MVPP2_QDIST_SINGLE_MODE where a single interrupt, shared by all CPUs, receives the RX events, and MVPP2_QDIST_MULTI_MODE, where the per-CPU interrupts used for TX events are also used for RX events. Since additional interrupts are needed, an update to the Device Tree binding is needed. However, backward compatibility is preserved with the old Device Tree binding, by gracefully degrading to the original behavior, with only one RX interrupt, and TX completion being handled by an hrtimer. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 275 +++++++++++++++++++++++++++++++---- 1 file changed, 246 insertions(+), 29 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 1bf327271cee..39bc8fbbdd65 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -120,6 +120,9 @@ #define MVPP2_TXQ_DESC_ADDR_REG 0x2084 #define MVPP2_TXQ_DESC_SIZE_REG 0x2088 #define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TXQ_THRESH_OFFSET 16 +#define MVPP2_TXQ_THRESH_MASK 0x3fff #define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 #define MVPP2_TXQ_INDEX_REG 0x2098 #define MVPP2_TXQ_PREF_BUF_REG 0x209c @@ -183,6 +186,9 @@ #define MVPP22_AXI_CODE_DOMAIN_SYSTEM 3 /* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_TX_THRESHOLD_REG(port) (0x5140 + 4 * (port)) +#define MVPP2_MAX_ISR_TX_THRESHOLD 0xfffff0 + #define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) @@ -206,6 +212,7 @@ #define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) #define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff #define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET 16 #define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) #define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) #define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) @@ -372,6 +379,7 @@ /* Coalescing */ #define MVPP2_TXDONE_COAL_PKTS_THRESH 15 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_TXDONE_COAL_USEC 1000 #define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_USEC 100 @@ -811,6 +819,9 @@ struct mvpp2_port { struct mvpp2_queue_vector qvecs[MVPP2_MAX_QVECS]; unsigned int nqvecs; + bool has_tx_irqs; + + u32 tx_time_coal; }; /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the @@ -1076,6 +1087,15 @@ struct mvpp2_bm_pool { u32 port_map; }; +/* Queue modes */ +#define MVPP2_QDIST_SINGLE_MODE 0 +#define MVPP2_QDIST_MULTI_MODE 1 + +static int queue_mode = MVPP2_QDIST_SINGLE_MODE; + +module_param(queue_mode, int, 0444); +MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)"); + #define MVPP2_DRIVER_NAME "mvpp2" #define MVPP2_DRIVER_VERSION "1.0" @@ -4187,11 +4207,40 @@ static void mvpp2_interrupts_mask(void *arg) static void mvpp2_interrupts_unmask(void *arg) { struct mvpp2_port *port = arg; + u32 val; + + val = MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + if (port->has_tx_irqs) + val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; mvpp2_percpu_write(port->priv, smp_processor_id(), - MVPP2_ISR_RX_TX_MASK_REG(port->id), - (MVPP2_CAUSE_MISC_SUM_MASK | - MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); +} + +static void +mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) +{ + u32 val; + int i; + + if (port->priv->hw_version != MVPP22) + return; + + if (mask) + val = 0; + else + val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + + for (i = 0; i < port->nqvecs; i++) { + struct mvpp2_queue_vector *v = port->qvecs + i; + + if (v->type != MVPP2_QUEUE_VECTOR_SHARED) + continue; + + mvpp2_percpu_write(port->priv, v->sw_thread_id, + MVPP2_ISR_RX_TX_MASK_REG(port->id), val); + } } /* Port configuration routines */ @@ -4812,6 +4861,23 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, put_cpu(); } +/* For some reason in the LSP this is done on each CPU. Why ? */ +static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + int cpu = get_cpu(); + u32 val; + + if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK) + txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK; + + val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_THRESH_REG, val); + + put_cpu(); +} + static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) { u64 tmp = (u64)clk_hz * usec; @@ -4848,6 +4914,22 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); } +static void mvpp2_tx_time_coal_set(struct mvpp2_port *port) +{ + unsigned long freq = port->priv->tclk; + u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + + if (val > MVPP2_MAX_ISR_TX_THRESHOLD) { + port->tx_time_coal = + mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq); + + /* re-evaluate to get actual register value */ + val = mvpp2_usec_to_cycles(port->tx_time_coal, freq); + } + + mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val); +} + /* Free Tx queue skbuffs */ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, @@ -4906,7 +4988,8 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, netif_tx_wake_queue(nq); } -static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) +static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, + int cpu) { struct mvpp2_tx_queue *txq; struct mvpp2_txq_pcpu *txq_pcpu; @@ -4917,7 +5000,7 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause) if (!txq) break; - txq_pcpu = this_cpu_ptr(txq->pcpu); + txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); if (txq_pcpu->count) { mvpp2_txq_done(port, txq, txq_pcpu); @@ -5305,6 +5388,14 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) goto err_cleanup; } + if (port->has_tx_irqs) { + mvpp2_tx_time_coal_set(port); + for (queue = 0; queue < port->ntxqs; queue++) { + txq = port->txqs[queue]; + mvpp2_tx_pkts_coal_set(port, txq); + } + } + on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); return 0; @@ -5411,7 +5502,7 @@ static void mvpp2_tx_proc_cb(unsigned long data) /* Process all the Tx queues */ cause = (1 << port->ntxqs) - 1; - tx_todo = mvpp2_tx_done(port, cause); + tx_todo = mvpp2_tx_done(port, cause, smp_processor_id()); /* Set the timer in case not all the packets were processed */ if (tx_todo) @@ -5787,7 +5878,8 @@ out: mvpp2_txq_done(port, txq, txq_pcpu); /* Set the timer in case not all frags were processed */ - if (txq_pcpu->count <= frags && txq_pcpu->count > 0) { + if (!port->has_tx_irqs && txq_pcpu->count <= frags && + txq_pcpu->count > 0) { struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu); mvpp2_timer_set(port_pcpu); @@ -5808,7 +5900,7 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) static int mvpp2_poll(struct napi_struct *napi, int budget) { - u32 cause_rx_tx, cause_rx, cause_misc; + u32 cause_rx_tx, cause_rx, cause_tx, cause_misc; int rx_done = 0; struct mvpp2_port *port = netdev_priv(napi->dev); struct mvpp2_queue_vector *qv; @@ -5826,11 +5918,10 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) * * Each CPU has its own Rx/Tx cause register */ - cause_rx_tx = mvpp2_percpu_read(port->priv, cpu, + cause_rx_tx = mvpp2_percpu_read(port->priv, qv->sw_thread_id, MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); - cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; - cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; + cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; if (cause_misc) { mvpp2_cause_error(port->dev, cause_misc); @@ -5841,9 +5932,15 @@ static int mvpp2_poll(struct napi_struct *napi, int budget) cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); } - cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; + if (cause_tx) { + cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; + mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); + } /* Process RX packets */ + cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; + cause_rx <<= qv->first_rxq; cause_rx |= qv->pending_cause_rx; while (cause_rx && budget > 0) { int count; @@ -6006,6 +6103,10 @@ static int mvpp2_irqs_init(struct mvpp2_port *port) err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv); if (err) goto err; + + if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) + irq_set_affinity_hint(qv->irq, + cpumask_of(qv->sw_thread_id)); } return 0; @@ -6013,6 +6114,7 @@ err: for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; + irq_set_affinity_hint(qv->irq, NULL); free_irq(qv->irq, qv); } @@ -6026,6 +6128,7 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port) for (i = 0; i < port->nqvecs; i++) { struct mvpp2_queue_vector *qv = port->qvecs + i; + irq_set_affinity_hint(qv->irq, NULL); free_irq(qv->irq, qv); } } @@ -6087,6 +6190,7 @@ static int mvpp2_open(struct net_device *dev) /* Unmask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_unmask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, false); mvpp2_start_dev(port); @@ -6112,14 +6216,17 @@ static int mvpp2_stop(struct net_device *dev) /* Mask interrupts on all CPUs */ on_each_cpu(mvpp2_interrupts_mask, port, 1); + mvpp2_shared_interrupt_mask_unmask(port, true); mvpp2_irqs_deinit(port); - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); - hrtimer_cancel(&port_pcpu->tx_done_timer); - port_pcpu->timer_scheduled = false; - tasklet_kill(&port_pcpu->tx_done_tasklet); + hrtimer_cancel(&port_pcpu->tx_done_timer); + port_pcpu->timer_scheduled = false; + tasklet_kill(&port_pcpu->tx_done_tasklet); + } } mvpp2_cleanup_rxqs(port); mvpp2_cleanup_txqs(port); @@ -6303,10 +6410,18 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, mvpp2_rx_time_coal_set(port, rxq); } + if (port->has_tx_irqs) { + port->tx_time_coal = c->tx_coalesce_usecs; + mvpp2_tx_time_coal_set(port); + } + for (queue = 0; queue < port->ntxqs; queue++) { struct mvpp2_tx_queue *txq = port->txqs[queue]; txq->done_pkts_coal = c->tx_max_coalesced_frames; + + if (port->has_tx_irqs) + mvpp2_tx_pkts_coal_set(port, txq); } return 0; @@ -6431,8 +6546,11 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -static int mvpp2_queue_vectors_init(struct mvpp2_port *port, - struct device_node *port_node) +/* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that + * had a single IRQ defined per-port. + */ +static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) { struct mvpp2_queue_vector *v = &port->qvecs[0]; @@ -6453,6 +6571,66 @@ static int mvpp2_queue_vectors_init(struct mvpp2_port *port, return 0; } +static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + struct mvpp2_queue_vector *v; + int i, ret; + + port->nqvecs = num_possible_cpus(); + if (queue_mode == MVPP2_QDIST_SINGLE_MODE) + port->nqvecs += 1; + + for (i = 0; i < port->nqvecs; i++) { + char irqname[16]; + + v = port->qvecs + i; + + v->port = port; + v->type = MVPP2_QUEUE_VECTOR_PRIVATE; + v->sw_thread_id = i; + v->sw_thread_mask = BIT(i); + + snprintf(irqname, sizeof(irqname), "tx-cpu%d", i); + + if (queue_mode == MVPP2_QDIST_MULTI_MODE) { + v->first_rxq = i * MVPP2_DEFAULT_RXQ; + v->nrxqs = MVPP2_DEFAULT_RXQ; + } else if (queue_mode == MVPP2_QDIST_SINGLE_MODE && + i == (port->nqvecs - 1)) { + v->first_rxq = 0; + v->nrxqs = port->nrxqs; + v->type = MVPP2_QUEUE_VECTOR_SHARED; + strncpy(irqname, "rx-shared", sizeof(irqname)); + } + + v->irq = of_irq_get_byname(port_node, irqname); + if (v->irq <= 0) { + ret = -EINVAL; + goto err; + } + + netif_napi_add(port->dev, &v->napi, mvpp2_poll, + NAPI_POLL_WEIGHT); + } + + return 0; + +err: + for (i = 0; i < port->nqvecs; i++) + irq_dispose_mapping(port->qvecs[i].irq); + return ret; +} + +static int mvpp2_queue_vectors_init(struct mvpp2_port *port, + struct device_node *port_node) +{ + if (port->has_tx_irqs) + return mvpp2_multi_queue_vectors_init(port, port_node); + else + return mvpp2_simple_queue_vectors_init(port, port_node); +} + static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port) { int i; @@ -6512,6 +6690,8 @@ static int mvpp2_port_init(struct mvpp2_port *port) mvpp2_egress_disable(port); mvpp2_port_disable(port); + port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; + port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), GFP_KERNEL); if (!port->txqs) @@ -6611,6 +6791,30 @@ err_free_percpu: return err; } +/* Checks if the port DT description has the TX interrupts + * described. On PPv2.1, there are no such interrupts. On PPv2.2, + * there are available, but we need to keep support for old DTs. + */ +static bool mvpp2_port_has_tx_irqs(struct mvpp2 *priv, + struct device_node *port_node) +{ + char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", + "tx-cpu2", "tx-cpu3" }; + int ret, i; + + if (priv->hw_version == MVPP21) + return false; + + for (i = 0; i < 5; i++) { + ret = of_property_match_string(port_node, "interrupt-names", + irqs[i]); + if (ret < 0) + return false; + } + + return true; +} + /* Ports initialization */ static int mvpp2_port_probe(struct platform_device *pdev, struct device_node *port_node, @@ -6625,13 +6829,22 @@ static int mvpp2_port_probe(struct platform_device *pdev, const char *mac_from; char hw_mac_addr[ETH_ALEN]; unsigned int ntxqs, nrxqs; + bool has_tx_irqs; u32 id; int features; int phy_mode; int err, i, cpu; + has_tx_irqs = mvpp2_port_has_tx_irqs(priv, port_node); + + if (!has_tx_irqs) + queue_mode = MVPP2_QDIST_SINGLE_MODE; + ntxqs = MVPP2_MAX_TXQ; - nrxqs = MVPP2_DEFAULT_RXQ; + if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_MULTI_MODE) + nrxqs = MVPP2_DEFAULT_RXQ * num_possible_cpus(); + else + nrxqs = MVPP2_DEFAULT_RXQ; dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs); if (!dev) @@ -6666,6 +6879,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, port->dev = dev; port->ntxqs = ntxqs; port->nrxqs = nrxqs; + port->priv = priv; + port->has_tx_irqs = has_tx_irqs; err = mvpp2_queue_vectors_init(port, port_node); if (err) @@ -6674,7 +6889,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (of_property_read_bool(port_node, "marvell,loopback")) port->flags |= MVPP2_F_LOOPBACK; - port->priv = priv; port->id = id; if (priv->hw_version == MVPP21) port->first_rxq = port->id * port->nrxqs; @@ -6749,16 +6963,19 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_txq_pcpu; } - for_each_present_cpu(cpu) { - port_pcpu = per_cpu_ptr(port->pcpu, cpu); + if (!port->has_tx_irqs) { + for_each_present_cpu(cpu) { + port_pcpu = per_cpu_ptr(port->pcpu, cpu); - hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, - HRTIMER_MODE_REL_PINNED); - port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; - port_pcpu->timer_scheduled = false; + hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_PINNED); + port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb; + port_pcpu->timer_scheduled = false; - tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb, - (unsigned long)dev); + tasklet_init(&port_pcpu->tx_done_tasklet, + mvpp2_tx_proc_cb, + (unsigned long)dev); + } } features = NETIF_F_SG | NETIF_F_IP_CSUM; -- cgit v1.2.3-55-g7522 From 5d3ecb24b5363f5249b78792409e879ec5ec7922 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 3 Aug 2017 10:42:01 +0200 Subject: dt-bindings: net: marvell-pp2: update interrupt-names with TX interrupts The PPv2.2 unit has several interrupts used for TX completion notification. This commit updates the Device Tree binding describing this HW block to mention such interrupts. While at it, we update the example to use a recent Device Tree example, that uses interrupts going through the ICU, and not to the GIC directly. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- .../devicetree/bindings/net/marvell-pp2.txt | 28 +++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index 6b4956beff8c..8918ad3ccf14 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt @@ -41,6 +41,10 @@ Optional properties (port): - marvell,loopback: port is loopback mode - phy: a phandle to a phy node defining the PHY address (as the reg property, a single integer). +- interrupt-names: if more than a single interrupt for rx is given, must + be the name associated to the interrupts listed. Valid + names are: "tx-cpu0", "tx-cpu1", "tx-cpu2", "tx-cpu3", + "rx-shared". Example for marvell,armada-375-pp2: @@ -80,19 +84,37 @@ cpm_ethernet: ethernet@0 { clock-names = "pp_clk", "gop_clk", "gp_clk"; eth0: eth0 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <0>; gop-port-id = <0>; }; eth1: eth1 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <1>; gop-port-id = <2>; }; eth2: eth2 { - interrupts = ; + interrupts = , + , + , + , + ; + interrupt-names = "tx-cpu0", "tx-cpu1", "tx-cpu2", + "tx-cpu3", "rx-shared"; port-id = <2>; gop-port-id = <3>; }; -- cgit v1.2.3-55-g7522 From 04b1d4e50e82536c12da00ee04a77510c459c844 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:11 +0200 Subject: net: core: Make the FIB notification chain generic The FIB notification chain is currently soley used by IPv4 code. However, we're going to introduce IPv6 FIB offload support, which requires these notification as well. As explained in commit c3852ef7f2f8 ("ipv4: fib: Replay events when registering FIB notifier"), upon registration to the chain, the callee receives a full dump of the FIB tables and rules by traversing all the net namespaces. The integrity of the dump is ensured by a per-namespace sequence counter that is incremented whenever a change to the tables or rules occurs. In order to allow more address families to use the chain, each family is expected to register its fib_notifier_ops in its pernet init. These operations allow the common code to read the family's sequence counter as well as dump its tables and rules in the given net namespace. Additionally, a 'family' parameter is added to sent notifications, so that listeners could distinguish between the different families. Implement the common code that allows listeners to register to the chain and for address families to register their fib_notifier_ops. Subsequent patches will implement these operations in IPv6. In the future, ipmr and ip6mr will be extended to provide these notifications as well. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 1 + drivers/net/ethernet/rocker/rocker_main.c | 1 + include/net/fib_notifier.h | 44 ++++++ include/net/ip_fib.h | 30 +--- include/net/net_namespace.h | 1 + include/net/netns/ipv4.h | 1 + net/core/Makefile | 3 +- net/core/fib_notifier.c | 164 +++++++++++++++++++++ net/ipv4/fib_frontend.c | 17 ++- net/ipv4/fib_notifier.c | 94 +++++------- net/ipv4/fib_rules.c | 5 +- net/ipv4/fib_semantics.c | 9 +- net/ipv4/fib_trie.c | 5 +- 13 files changed, 282 insertions(+), 93 deletions(-) create mode 100644 include/net/fib_notifier.h create mode 100644 net/core/fib_notifier.c diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2f03c7e71584..b79f9b67f285 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -53,6 +53,7 @@ #include #include #include +#include #include "spectrum.h" #include "core.h" diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index b1e5c07099fa..ef38c1a41bdd 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h new file mode 100644 index 000000000000..241475224f74 --- /dev/null +++ b/include/net/fib_notifier.h @@ -0,0 +1,44 @@ +#ifndef __NET_FIB_NOTIFIER_H +#define __NET_FIB_NOTIFIER_H + +#include +#include +#include + +struct fib_notifier_info { + struct net *net; + int family; +}; + +enum fib_event_type { + FIB_EVENT_ENTRY_REPLACE, + FIB_EVENT_ENTRY_APPEND, + FIB_EVENT_ENTRY_ADD, + FIB_EVENT_ENTRY_DEL, + FIB_EVENT_RULE_ADD, + FIB_EVENT_RULE_DEL, + FIB_EVENT_NH_ADD, + FIB_EVENT_NH_DEL, +}; + +struct fib_notifier_ops { + int family; + struct list_head list; + unsigned int (*fib_seq_read)(struct net *net); + int (*fib_dump)(struct net *net, struct notifier_block *nb); + struct rcu_head rcu; +}; + +int call_fib_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info); +int call_fib_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info); +int register_fib_notifier(struct notifier_block *nb, + void (*cb)(struct notifier_block *nb)); +int unregister_fib_notifier(struct notifier_block *nb); +struct fib_notifier_ops * +fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net); +void fib_notifier_ops_unregister(struct fib_notifier_ops *ops); + +#endif diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index ef8992d49bc3..c0295c3ec5f3 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -188,10 +189,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh); #define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \ FIB_RES_SADDR(net, res)) -struct fib_notifier_info { - struct net *net; -}; - struct fib_entry_notifier_info { struct fib_notifier_info info; /* must be first */ u32 dst; @@ -212,25 +209,14 @@ struct fib_nh_notifier_info { struct fib_nh *fib_nh; }; -enum fib_event_type { - FIB_EVENT_ENTRY_REPLACE, - FIB_EVENT_ENTRY_APPEND, - FIB_EVENT_ENTRY_ADD, - FIB_EVENT_ENTRY_DEL, - FIB_EVENT_RULE_ADD, - FIB_EVENT_RULE_DEL, - FIB_EVENT_NH_ADD, - FIB_EVENT_NH_DEL, -}; - -int register_fib_notifier(struct notifier_block *nb, - void (*cb)(struct notifier_block *nb)); -int unregister_fib_notifier(struct notifier_block *nb); -int call_fib_notifier(struct notifier_block *nb, struct net *net, - enum fib_event_type event_type, - struct fib_notifier_info *info); -int call_fib_notifiers(struct net *net, enum fib_event_type event_type, +int call_fib4_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, struct fib_notifier_info *info); +int call_fib4_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info); + +int __net_init fib4_notifier_init(struct net *net); +void __net_exit fib4_notifier_exit(struct net *net); void fib_notify(struct net *net, struct notifier_block *nb); #ifdef CONFIG_IP_MULTIPLE_TABLES diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 1c401bd4c2e0..57faa375eab9 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -88,6 +88,7 @@ struct net { /* core fib_rules */ struct list_head rules_ops; + struct list_head fib_notifier_ops; /* protected by net_mutex */ struct net_device *loopback_dev; /* The loopback */ struct netns_core core; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 9a14a0850b0e..20d061c805e3 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -159,6 +159,7 @@ struct netns_ipv4 { int sysctl_fib_multipath_hash_policy; #endif + struct fib_notifier_ops *notifier_ops; unsigned int fib_seq; /* protected by rtnl_mutex */ atomic_t rt_genid; diff --git a/net/core/Makefile b/net/core/Makefile index d501c4278015..56d771a887b6 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -9,7 +9,8 @@ obj-$(CONFIG_SYSCTL) += sysctl_net_core.o obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \ neighbour.o rtnetlink.o utils.o link_watch.o filter.o \ - sock_diag.o dev_ioctl.o tso.o sock_reuseport.o + sock_diag.o dev_ioctl.o tso.o sock_reuseport.o \ + fib_notifier.o obj-y += net-sysfs.o obj-$(CONFIG_PROC_FS) += net-procfs.o diff --git a/net/core/fib_notifier.c b/net/core/fib_notifier.c new file mode 100644 index 000000000000..292aab83702f --- /dev/null +++ b/net/core/fib_notifier.c @@ -0,0 +1,164 @@ +#include +#include +#include +#include +#include +#include +#include + +static ATOMIC_NOTIFIER_HEAD(fib_chain); + +int call_fib_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info) +{ + info->net = net; + return nb->notifier_call(nb, event_type, info); +} +EXPORT_SYMBOL(call_fib_notifier); + +int call_fib_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info) +{ + info->net = net; + return atomic_notifier_call_chain(&fib_chain, event_type, info); +} +EXPORT_SYMBOL(call_fib_notifiers); + +static unsigned int fib_seq_sum(void) +{ + struct fib_notifier_ops *ops; + unsigned int fib_seq = 0; + struct net *net; + + rtnl_lock(); + for_each_net(net) { + list_for_each_entry(ops, &net->fib_notifier_ops, list) + fib_seq += ops->fib_seq_read(net); + } + rtnl_unlock(); + + return fib_seq; +} + +static int fib_net_dump(struct net *net, struct notifier_block *nb) +{ + struct fib_notifier_ops *ops; + + list_for_each_entry_rcu(ops, &net->fib_notifier_ops, list) { + int err = ops->fib_dump(net, nb); + + if (err) + return err; + } + + return 0; +} + +static bool fib_dump_is_consistent(struct notifier_block *nb, + void (*cb)(struct notifier_block *nb), + unsigned int fib_seq) +{ + atomic_notifier_chain_register(&fib_chain, nb); + if (fib_seq == fib_seq_sum()) + return true; + atomic_notifier_chain_unregister(&fib_chain, nb); + if (cb) + cb(nb); + return false; +} + +#define FIB_DUMP_MAX_RETRIES 5 +int register_fib_notifier(struct notifier_block *nb, + void (*cb)(struct notifier_block *nb)) +{ + int retries = 0; + int err; + + do { + unsigned int fib_seq = fib_seq_sum(); + struct net *net; + + rcu_read_lock(); + for_each_net_rcu(net) { + err = fib_net_dump(net, nb); + if (err) + goto err_fib_net_dump; + } + rcu_read_unlock(); + + if (fib_dump_is_consistent(nb, cb, fib_seq)) + return 0; + } while (++retries < FIB_DUMP_MAX_RETRIES); + + return -EBUSY; + +err_fib_net_dump: + rcu_read_unlock(); + return err; +} +EXPORT_SYMBOL(register_fib_notifier); + +int unregister_fib_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&fib_chain, nb); +} +EXPORT_SYMBOL(unregister_fib_notifier); + +static int __fib_notifier_ops_register(struct fib_notifier_ops *ops, + struct net *net) +{ + struct fib_notifier_ops *o; + + list_for_each_entry(o, &net->fib_notifier_ops, list) + if (ops->family == o->family) + return -EEXIST; + list_add_tail_rcu(&ops->list, &net->fib_notifier_ops); + return 0; +} + +struct fib_notifier_ops * +fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net) +{ + struct fib_notifier_ops *ops; + int err; + + ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL); + if (!ops) + return ERR_PTR(-ENOMEM); + + err = __fib_notifier_ops_register(ops, net); + if (err) + goto err_register; + + return ops; + +err_register: + kfree(ops); + return ERR_PTR(err); +} +EXPORT_SYMBOL(fib_notifier_ops_register); + +void fib_notifier_ops_unregister(struct fib_notifier_ops *ops) +{ + list_del_rcu(&ops->list); + kfree_rcu(ops, rcu); +} +EXPORT_SYMBOL(fib_notifier_ops_unregister); + +static int __net_init fib_notifier_net_init(struct net *net) +{ + INIT_LIST_HEAD(&net->fib_notifier_ops); + return 0; +} + +static struct pernet_operations fib_notifier_net_ops = { + .init = fib_notifier_net_init, +}; + +static int __init fib_notifier_init(void) +{ + return register_pernet_subsys(&fib_notifier_net_ops); +} + +subsys_initcall(fib_notifier_init); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 044d2a159a3c..2cba559f14df 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1247,22 +1247,28 @@ static int __net_init ip_fib_net_init(struct net *net) int err; size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ; - net->ipv4.fib_seq = 0; + err = fib4_notifier_init(net); + if (err) + return err; /* Avoid false sharing : Use at least a full cache line */ size = max_t(size_t, size, L1_CACHE_BYTES); net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL); - if (!net->ipv4.fib_table_hash) - return -ENOMEM; + if (!net->ipv4.fib_table_hash) { + err = -ENOMEM; + goto err_table_hash_alloc; + } err = fib4_rules_init(net); if (err < 0) - goto fail; + goto err_rules_init; return 0; -fail: +err_rules_init: kfree(net->ipv4.fib_table_hash); +err_table_hash_alloc: + fib4_notifier_exit(net); return err; } @@ -1292,6 +1298,7 @@ static void ip_fib_net_exit(struct net *net) #endif rtnl_unlock(); kfree(net->ipv4.fib_table_hash); + fib4_notifier_exit(net); } static int __net_init fib_net_init(struct net *net) diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c index e0714d975947..7cf1954bbadc 100644 --- a/net/ipv4/fib_notifier.c +++ b/net/ipv4/fib_notifier.c @@ -1,86 +1,66 @@ #include #include -#include +#include #include #include +#include #include #include -static ATOMIC_NOTIFIER_HEAD(fib_chain); - -int call_fib_notifier(struct notifier_block *nb, struct net *net, - enum fib_event_type event_type, - struct fib_notifier_info *info) +int call_fib4_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info) { - info->net = net; - return nb->notifier_call(nb, event_type, info); + info->family = AF_INET; + return call_fib_notifier(nb, net, event_type, info); } -int call_fib_notifiers(struct net *net, enum fib_event_type event_type, - struct fib_notifier_info *info) +int call_fib4_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info) { + ASSERT_RTNL(); + + info->family = AF_INET; net->ipv4.fib_seq++; - info->net = net; - return atomic_notifier_call_chain(&fib_chain, event_type, info); + return call_fib_notifiers(net, event_type, info); } -static unsigned int fib_seq_sum(void) +static unsigned int fib4_seq_read(struct net *net) { - unsigned int fib_seq = 0; - struct net *net; + ASSERT_RTNL(); - rtnl_lock(); - for_each_net(net) - fib_seq += net->ipv4.fib_seq; - rtnl_unlock(); - - return fib_seq; + return net->ipv4.fib_seq; } -static bool fib_dump_is_consistent(struct notifier_block *nb, - void (*cb)(struct notifier_block *nb), - unsigned int fib_seq) +static int fib4_dump(struct net *net, struct notifier_block *nb) { - atomic_notifier_chain_register(&fib_chain, nb); - if (fib_seq == fib_seq_sum()) - return true; - atomic_notifier_chain_unregister(&fib_chain, nb); - if (cb) - cb(nb); - return false; + fib_rules_notify(net, nb); + fib_notify(net, nb); + + return 0; } -#define FIB_DUMP_MAX_RETRIES 5 -int register_fib_notifier(struct notifier_block *nb, - void (*cb)(struct notifier_block *nb)) -{ - int retries = 0; +static const struct fib_notifier_ops fib4_notifier_ops_template = { + .family = AF_INET, + .fib_seq_read = fib4_seq_read, + .fib_dump = fib4_dump, +}; - do { - unsigned int fib_seq = fib_seq_sum(); - struct net *net; +int __net_init fib4_notifier_init(struct net *net) +{ + struct fib_notifier_ops *ops; - /* Mutex semantics guarantee that every change done to - * FIB tries before we read the change sequence counter - * is now visible to us. - */ - rcu_read_lock(); - for_each_net_rcu(net) { - fib_rules_notify(net, nb); - fib_notify(net, nb); - } - rcu_read_unlock(); + net->ipv4.fib_seq = 0; - if (fib_dump_is_consistent(nb, cb, fib_seq)) - return 0; - } while (++retries < FIB_DUMP_MAX_RETRIES); + ops = fib_notifier_ops_register(&fib4_notifier_ops_template, net); + if (IS_ERR(ops)) + return PTR_ERR(ops); + net->ipv4.notifier_ops = ops; - return -EBUSY; + return 0; } -EXPORT_SYMBOL(register_fib_notifier); -int unregister_fib_notifier(struct notifier_block *nb) +void __net_exit fib4_notifier_exit(struct net *net) { - return atomic_notifier_chain_unregister(&fib_chain, nb); + fib_notifier_ops_unregister(net->ipv4.notifier_ops); } -EXPORT_SYMBOL(unregister_fib_notifier); diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 778ecf977eb2..acdbf5a24ac9 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -32,6 +32,7 @@ #include #include #include +#include struct fib4_rule { struct fib_rule common; @@ -193,7 +194,7 @@ static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net, .rule = rule, }; - return call_fib_notifier(nb, net, event_type, &info.info); + return call_fib4_notifier(nb, net, event_type, &info.info); } static int call_fib_rule_notifiers(struct net *net, @@ -204,7 +205,7 @@ static int call_fib_rule_notifiers(struct net *net, .rule = rule, }; - return call_fib_notifiers(net, event_type, &info.info); + return call_fib4_notifiers(net, event_type, &info.info); } /* Called with rcu_read_lock() */ diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index f62dc2463280..632b454ce77c 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -44,6 +44,7 @@ #include #include #include +#include #include "fib_lookup.h" @@ -1451,14 +1452,14 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh, if (IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && fib_nh->nh_flags & RTNH_F_LINKDOWN) break; - return call_fib_notifiers(dev_net(fib_nh->nh_dev), event_type, - &info.info); + return call_fib4_notifiers(dev_net(fib_nh->nh_dev), event_type, + &info.info); case FIB_EVENT_NH_DEL: if ((in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) && fib_nh->nh_flags & RTNH_F_LINKDOWN) || (fib_nh->nh_flags & RTNH_F_DEAD)) - return call_fib_notifiers(dev_net(fib_nh->nh_dev), - event_type, &info.info); + return call_fib4_notifiers(dev_net(fib_nh->nh_dev), + event_type, &info.info); default: break; } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 64668c69dda6..1a6ffb0dab9c 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -81,6 +81,7 @@ #include #include #include +#include #include #include "fib_lookup.h" @@ -97,7 +98,7 @@ static int call_fib_entry_notifier(struct notifier_block *nb, struct net *net, .type = type, .tb_id = tb_id, }; - return call_fib_notifier(nb, net, event_type, &info.info); + return call_fib4_notifier(nb, net, event_type, &info.info); } static int call_fib_entry_notifiers(struct net *net, @@ -113,7 +114,7 @@ static int call_fib_entry_notifiers(struct net *net, .type = type, .tb_id = tb_id, }; - return call_fib_notifiers(net, event_type, &info.info); + return call_fib4_notifiers(net, event_type, &info.info); } #define MAX_STAT_DEPTH 32 -- cgit v1.2.3-55-g7522 From 64e5e8252d69c68ae76258328ac7e5d2e5e923b0 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:12 +0200 Subject: mlxsw: spectrum_router: Ignore address families other than IPv4 We're about to add IPv6 notifications in the FIB notification chain, but the driver currently doesn't support these, so ignore them. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index b79f9b67f285..78c19512250d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3093,7 +3093,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct fib_notifier_info *info = ptr; struct mlxsw_sp_router *router; - if (!net_eq(info->net, &init_net)) + if (!net_eq(info->net, &init_net) || info->family != AF_INET) return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); -- cgit v1.2.3-55-g7522 From d371ac1e1b1124e22bc9a82a5d170ea721a73bef Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:13 +0200 Subject: rocker: Ignore address families other than IPv4 As in previous patch, ignore IPv6 notifications since the driver doesn't support these. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index ef38c1a41bdd..fc8f8bdf6579 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2192,6 +2192,10 @@ static int rocker_router_fib_event(struct notifier_block *nb, { struct rocker *rocker = container_of(nb, struct rocker, fib_nb); struct rocker_fib_event_work *fib_work; + struct fib_notifier_info *info = ptr; + + if (info->family != AF_INET) + return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); if (WARN_ON(!fib_work)) -- cgit v1.2.3-55-g7522 From 1b2a4440858857f2f93bb2ec5bb3a60f4fcc25be Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:14 +0200 Subject: net: fib_rules: Implement notification logic in core Unlike the routing tables, the FIB rules share a common core, so instead of replicating the same logic for each address family we can simply dump the rules and send notifications from the core itself. To protect the integrity of the dump, a rules-specific sequence counter is added for each address family and incremented whenever a rule is added or deleted (under RTNL). Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/fib_rules.h | 9 +++++++ include/net/ip_fib.h | 24 +++++++++---------- net/core/fib_rules.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/fib_notifier.c | 9 +++++-- net/ipv4/fib_rules.c | 45 ++++++++--------------------------- 5 files changed, 101 insertions(+), 49 deletions(-) diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index c487bfa2f479..3d7f1cefc6f5 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -8,6 +8,7 @@ #include #include #include +#include struct fib_kuid_range { kuid_t start; @@ -57,6 +58,7 @@ struct fib_rules_ops { int addr_size; int unresolved_rules; int nr_goto_rules; + unsigned int fib_rules_seq; int (*action)(struct fib_rule *, struct flowi *, int, @@ -89,6 +91,11 @@ struct fib_rules_ops { struct rcu_head rcu; }; +struct fib_rule_notifier_info { + struct fib_notifier_info info; /* must be first */ + struct fib_rule *rule; +}; + #define FRA_GENERIC_POLICY \ [FRA_IIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \ @@ -143,6 +150,8 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags, int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table, u32 flags); bool fib_rule_matchall(const struct fib_rule *rule); +int fib_rules_dump(struct net *net, struct notifier_block *nb, int family); +unsigned int fib_rules_seq_read(struct net *net, int family); int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index c0295c3ec5f3..1a7f7e424320 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -199,11 +199,6 @@ struct fib_entry_notifier_info { u32 tb_id; }; -struct fib_rule_notifier_info { - struct fib_notifier_info info; /* must be first */ - struct fib_rule *rule; -}; - struct fib_nh_notifier_info { struct fib_notifier_info info; /* must be first */ struct fib_nh *fib_nh; @@ -219,13 +214,6 @@ int __net_init fib4_notifier_init(struct net *net); void __net_exit fib4_notifier_exit(struct net *net); void fib_notify(struct net *net, struct notifier_block *nb); -#ifdef CONFIG_IP_MULTIPLE_TABLES -void fib_rules_notify(struct net *net, struct notifier_block *nb); -#else -static inline void fib_rules_notify(struct net *net, struct notifier_block *nb) -{ -} -#endif struct fib_table { struct hlist_node tb_hlist; @@ -298,6 +286,16 @@ static inline bool fib4_rule_default(const struct fib_rule *rule) return true; } +static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb) +{ + return 0; +} + +static inline unsigned int fib4_rules_seq_read(struct net *net) +{ + return 0; +} + #else /* CONFIG_IP_MULTIPLE_TABLES */ int __net_init fib4_rules_init(struct net *net); void __net_exit fib4_rules_exit(struct net *net); @@ -343,6 +341,8 @@ out: } bool fib4_rule_default(const struct fib_rule *rule); +int fib4_rules_dump(struct net *net, struct notifier_block *nb); +unsigned int fib4_rules_seq_read(struct net *net); #endif /* CONFIG_IP_MULTIPLE_TABLES */ diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index fdcb1bcd2afa..fc0b65093417 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -299,6 +299,67 @@ out: } EXPORT_SYMBOL_GPL(fib_rules_lookup); +static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_rule *rule, int family) +{ + struct fib_rule_notifier_info info = { + .info.family = family, + .rule = rule, + }; + + return call_fib_notifier(nb, net, event_type, &info.info); +} + +static int call_fib_rule_notifiers(struct net *net, + enum fib_event_type event_type, + struct fib_rule *rule, + struct fib_rules_ops *ops) +{ + struct fib_rule_notifier_info info = { + .info.family = ops->family, + .rule = rule, + }; + + ops->fib_rules_seq++; + return call_fib_notifiers(net, event_type, &info.info); +} + +/* Called with rcu_read_lock() */ +int fib_rules_dump(struct net *net, struct notifier_block *nb, int family) +{ + struct fib_rules_ops *ops; + struct fib_rule *rule; + + ops = lookup_rules_ops(net, family); + if (!ops) + return -EAFNOSUPPORT; + list_for_each_entry_rcu(rule, &ops->rules_list, list) + call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule, + family); + rules_ops_put(ops); + + return 0; +} +EXPORT_SYMBOL_GPL(fib_rules_dump); + +unsigned int fib_rules_seq_read(struct net *net, int family) +{ + unsigned int fib_rules_seq; + struct fib_rules_ops *ops; + + ASSERT_RTNL(); + + ops = lookup_rules_ops(net, family); + if (!ops) + return 0; + fib_rules_seq = ops->fib_rules_seq; + rules_ops_put(ops); + + return fib_rules_seq; +} +EXPORT_SYMBOL_GPL(fib_rules_seq_read); + static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, struct fib_rules_ops *ops) { @@ -548,6 +609,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, if (rule->tun_id) ip_tunnel_need_metadata(); + call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule, ops); notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid); flush_route_cache(ops); rules_ops_put(ops); @@ -687,6 +749,7 @@ int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, } } + call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule, ops); notify_rule_change(RTM_DELRULE, rule, ops, nlh, NETLINK_CB(skb).portid); fib_rule_put(rule); diff --git a/net/ipv4/fib_notifier.c b/net/ipv4/fib_notifier.c index 7cf1954bbadc..5d7afb145562 100644 --- a/net/ipv4/fib_notifier.c +++ b/net/ipv4/fib_notifier.c @@ -29,12 +29,17 @@ static unsigned int fib4_seq_read(struct net *net) { ASSERT_RTNL(); - return net->ipv4.fib_seq; + return net->ipv4.fib_seq + fib4_rules_seq_read(net); } static int fib4_dump(struct net *net, struct notifier_block *nb) { - fib_rules_notify(net, nb); + int err; + + err = fib4_rules_dump(net, nb); + if (err) + return err; + fib_notify(net, nb); return 0; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index acdbf5a24ac9..35d646a62ad4 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -32,7 +32,6 @@ #include #include #include -#include struct fib4_rule { struct fib_rule common; @@ -69,6 +68,16 @@ bool fib4_rule_default(const struct fib_rule *rule) } EXPORT_SYMBOL_GPL(fib4_rule_default); +int fib4_rules_dump(struct net *net, struct notifier_block *nb) +{ + return fib_rules_dump(net, nb, AF_INET); +} + +unsigned int fib4_rules_seq_read(struct net *net) +{ + return fib_rules_seq_read(net, AF_INET); +} + int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res, unsigned int flags) { @@ -186,38 +195,6 @@ static struct fib_table *fib_empty_table(struct net *net) return NULL; } -static int call_fib_rule_notifier(struct notifier_block *nb, struct net *net, - enum fib_event_type event_type, - struct fib_rule *rule) -{ - struct fib_rule_notifier_info info = { - .rule = rule, - }; - - return call_fib4_notifier(nb, net, event_type, &info.info); -} - -static int call_fib_rule_notifiers(struct net *net, - enum fib_event_type event_type, - struct fib_rule *rule) -{ - struct fib_rule_notifier_info info = { - .rule = rule, - }; - - return call_fib4_notifiers(net, event_type, &info.info); -} - -/* Called with rcu_read_lock() */ -void fib_rules_notify(struct net *net, struct notifier_block *nb) -{ - struct fib_rules_ops *ops = net->ipv4.rules_ops; - struct fib_rule *rule; - - list_for_each_entry_rcu(rule, &ops->rules_list, list) - call_fib_rule_notifier(nb, net, FIB_EVENT_RULE_ADD, rule); -} - static const struct nla_policy fib4_rule_policy[FRA_MAX+1] = { FRA_GENERIC_POLICY, [FRA_FLOW] = { .type = NLA_U32 }, @@ -274,7 +251,6 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, rule4->tos = frh->tos; net->ipv4.fib_has_custom_rules = true; - call_fib_rule_notifiers(net, FIB_EVENT_RULE_ADD, rule); err = 0; errout: @@ -296,7 +272,6 @@ static int fib4_rule_delete(struct fib_rule *rule) net->ipv4.fib_num_tclassid_users--; #endif net->ipv4.fib_has_custom_rules = true; - call_fib_rule_notifiers(net, FIB_EVENT_RULE_DEL, rule); errout: return err; } -- cgit v1.2.3-55-g7522 From e3ea973159d53559c5ae9a9dbc824da9aba6cac0 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:15 +0200 Subject: ipv6: fib_rules: Check if rule is a default rule As explained in commit 3c71006d15fd ("ipv4: fib_rules: Check if rule is a default rule"), drivers supporting IPv6 FIB offload need to be able to sanitize the rules they don't support and potentially flush their tables. Add an IPv6 helper to check if a FIB rule is a default rule. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 5 +++++ net/ipv6/fib6_rules.c | 20 ++++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 1a88008cc6f5..6000b0dc51ee 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -295,6 +295,7 @@ int ipv6_route_open(struct inode *inode, struct file *file); #ifdef CONFIG_IPV6_MULTIPLE_TABLES int fib6_rules_init(void); void fib6_rules_cleanup(void); +bool fib6_rule_default(const struct fib_rule *rule); #else static inline int fib6_rules_init(void) { @@ -304,5 +305,9 @@ static inline void fib6_rules_cleanup(void) { return ; } +static inline bool fib6_rule_default(const struct fib_rule *rule) +{ + return true; +} #endif #endif diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index ec849d88a662..ef1fcee6bf16 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -29,6 +29,26 @@ struct fib6_rule { u8 tclass; }; +static bool fib6_rule_matchall(const struct fib_rule *rule) +{ + struct fib6_rule *r = container_of(rule, struct fib6_rule, common); + + if (r->dst.plen || r->src.plen || r->tclass) + return false; + return fib_rule_matchall(rule); +} + +bool fib6_rule_default(const struct fib_rule *rule) +{ + if (!fib6_rule_matchall(rule) || rule->action != FR_ACT_TO_TBL || + rule->l3mdev) + return false; + if (rule->table != RT6_TABLE_LOCAL && rule->table != RT6_TABLE_MAIN) + return false; + return true; +} +EXPORT_SYMBOL_GPL(fib6_rule_default); + struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { -- cgit v1.2.3-55-g7522 From 16ab6d7d4d8cc037bb4be12c2b849ac92787e1ff Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:16 +0200 Subject: ipv6: fib: Add FIB notifiers callbacks We're about to add IPv6 FIB offload support, so implement the necessary callbacks in IPv6 code, which will later allow us to add routes and rules notifications. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 11 ++++++++++ include/net/netns/ipv6.h | 1 + net/ipv6/Makefile | 2 +- net/ipv6/fib6_notifier.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv6/ip6_fib.c | 7 ++++++ 5 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 net/ipv6/fib6_notifier.c diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 6000b0dc51ee..be8ddf3253dc 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -16,10 +16,12 @@ #include #include #include +#include #include #include #include #include +#include #ifdef CONFIG_IPV6_MULTIPLE_TABLES #define FIB6_TABLE_HASHSZ 256 @@ -292,6 +294,15 @@ int fib6_init(void); int ipv6_route_open(struct inode *inode, struct file *file); +int call_fib6_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info); +int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info); + +int __net_init fib6_notifier_init(struct net *net); +void __net_exit fib6_notifier_exit(struct net *net); + #ifdef CONFIG_IPV6_MULTIPLE_TABLES int fib6_rules_init(void); void fib6_rules_cleanup(void); diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index de7745e2edcc..abdf3b40303b 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -86,6 +86,7 @@ struct netns_ipv6 { atomic_t dev_addr_genid; atomic_t fib6_sernum; struct seg6_pernet_data *seg6_data; + struct fib_notifier_ops *notifier_ops; }; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index 217e9ff0e24b..f8b24c2e0d77 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -9,7 +9,7 @@ ipv6-objs := af_inet6.o anycast.o ip6_output.o ip6_input.o addrconf.o \ route.o ip6_fib.o ipv6_sockglue.o ndisc.o udp.o udplite.o \ raw.o icmp.o mcast.o reassembly.o tcp_ipv6.o ping.o \ exthdrs.o datagram.o ip6_flowlabel.o inet6_connection_sock.o \ - udp_offload.o seg6.o + udp_offload.o seg6.o fib6_notifier.o ipv6-offload := ip6_offload.o tcpv6_offload.o exthdrs_offload.o diff --git a/net/ipv6/fib6_notifier.c b/net/ipv6/fib6_notifier.c new file mode 100644 index 000000000000..c2bb1ab5b5eb --- /dev/null +++ b/net/ipv6/fib6_notifier.c @@ -0,0 +1,55 @@ +#include +#include +#include +#include +#include +#include +#include + +int call_fib6_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct fib_notifier_info *info) +{ + info->family = AF_INET6; + return call_fib_notifier(nb, net, event_type, info); +} + +int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, + struct fib_notifier_info *info) +{ + info->family = AF_INET6; + return call_fib_notifiers(net, event_type, info); +} + +static unsigned int fib6_seq_read(struct net *net) +{ + return 0; +} + +static int fib6_dump(struct net *net, struct notifier_block *nb) +{ + return 0; +} + +static const struct fib_notifier_ops fib6_notifier_ops_template = { + .family = AF_INET6, + .fib_seq_read = fib6_seq_read, + .fib_dump = fib6_dump, +}; + +int __net_init fib6_notifier_init(struct net *net) +{ + struct fib_notifier_ops *ops; + + ops = fib_notifier_ops_register(&fib6_notifier_ops_template, net); + if (IS_ERR(ops)) + return PTR_ERR(ops); + net->ipv6.notifier_ops = ops; + + return 0; +} + +void __net_exit fib6_notifier_exit(struct net *net) +{ + fib_notifier_ops_unregister(net->ipv6.notifier_ops); +} diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index ebb299cf72b7..f93976e3f65c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1839,6 +1839,11 @@ static void fib6_gc_timer_cb(unsigned long arg) static int __net_init fib6_net_init(struct net *net) { size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ; + int err; + + err = fib6_notifier_init(net); + if (err) + return err; spin_lock_init(&net->ipv6.fib6_gc_lock); rwlock_init(&net->ipv6.fib6_walker_lock); @@ -1891,6 +1896,7 @@ out_fib_table_hash: out_rt6_stats: kfree(net->ipv6.rt6_stats); out_timer: + fib6_notifier_exit(net); return -ENOMEM; } @@ -1907,6 +1913,7 @@ static void fib6_net_exit(struct net *net) kfree(net->ipv6.fib6_main_tbl); kfree(net->ipv6.fib_table_hash); kfree(net->ipv6.rt6_stats); + fib6_notifier_exit(net); } static struct pernet_operations fib6_net_ops = { -- cgit v1.2.3-55-g7522 From df77fe4d9865c6354372876632bcbceeda84f6c8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:17 +0200 Subject: ipv6: fib: Add in-kernel notifications for route add / delete As with IPv4, allow listeners of the FIB notification chain to receive notifications whenever a route is added, replaced or deleted. This is done by placing calls to the FIB notification chain in the two lowest level functions that end up performing these operations - namely, fib6_add_rt2node() and fib6_del_route(). Unlike IPv4, APPEND notifications aren't sent as the kernel doesn't distinguish between "append" (NLM_F_CREATE|NLM_F_APPEND) and "prepend" (NLM_F_CREATE). If NLM_F_EXCL isn't set, duplicate routes are always added after the existing duplicate routes. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 5 +++++ net/ipv6/ip6_fib.c | 17 +++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index be8ddf3253dc..e2b292b79e99 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -258,6 +258,11 @@ typedef struct rt6_info *(*pol_lookup_t)(struct net *, struct fib6_table *, struct flowi6 *, int); +struct fib6_entry_notifier_info { + struct fib_notifier_info info; /* must be first */ + struct rt6_info *rt; +}; + /* * exported functions */ diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index f93976e3f65c..595a57cbbc7b 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -302,6 +303,17 @@ static void __net_init fib6_tables_init(struct net *net) #endif +static int call_fib6_entry_notifiers(struct net *net, + enum fib_event_type event_type, + struct rt6_info *rt) +{ + struct fib6_entry_notifier_info info = { + .rt = rt, + }; + + return call_fib6_notifiers(net, event_type, &info.info); +} + static int fib6_dump_node(struct fib6_walker *w) { int res; @@ -879,6 +891,8 @@ add: *ins = rt; rt->rt6i_node = fn; atomic_inc(&rt->rt6i_ref); + call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_ADD, + rt); if (!info->skip_notify) inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); info->nl_net->ipv6.rt6_stats->fib_rt_entries++; @@ -906,6 +920,8 @@ add: rt->rt6i_node = fn; rt->dst.rt6_next = iter->dst.rt6_next; atomic_inc(&rt->rt6i_ref); + call_fib6_entry_notifiers(info->nl_net, FIB_EVENT_ENTRY_REPLACE, + rt); if (!info->skip_notify) inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE); if (!(fn->fn_flags & RTN_RTINFO)) { @@ -1459,6 +1475,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp, fib6_purge_rt(rt, fn, net); + call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, rt); if (!info->skip_notify) inet6_rt_notify(RTM_DELROUTE, rt, info, 0); rt6_release(rt); -- cgit v1.2.3-55-g7522 From dcb18f762f6ac83a6dc9cdc26dd694dcc167beb7 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:18 +0200 Subject: ipv6: fib_rules: Dump rules during registration to FIB chain Allow users of the FIB notification chain to receive a complete view of the IPv6 FIB rules upon registration to the chain. The integrity of the dump is ensured by a per-family sequence counter that is incremented (under RTNL) whenever a rule is added or deleted. All the sequence counters are read (under RTNL) and summed, prior and after the dump. In case the counters differ, then the dump is either restarted or the registration fails. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 10 ++++++++++ net/ipv6/fib6_notifier.c | 4 ++-- net/ipv6/fib6_rules.c | 11 +++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index e2b292b79e99..dbe5537809f5 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -312,6 +312,8 @@ void __net_exit fib6_notifier_exit(struct net *net); int fib6_rules_init(void); void fib6_rules_cleanup(void); bool fib6_rule_default(const struct fib_rule *rule); +int fib6_rules_dump(struct net *net, struct notifier_block *nb); +unsigned int fib6_rules_seq_read(struct net *net); #else static inline int fib6_rules_init(void) { @@ -325,5 +327,13 @@ static inline bool fib6_rule_default(const struct fib_rule *rule) { return true; } +static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb) +{ + return 0; +} +static inline unsigned int fib6_rules_seq_read(struct net *net) +{ + return 0; +} #endif #endif diff --git a/net/ipv6/fib6_notifier.c b/net/ipv6/fib6_notifier.c index c2bb1ab5b5eb..298efc678f3b 100644 --- a/net/ipv6/fib6_notifier.c +++ b/net/ipv6/fib6_notifier.c @@ -23,12 +23,12 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, static unsigned int fib6_seq_read(struct net *net) { - return 0; + return fib6_rules_seq_read(net); } static int fib6_dump(struct net *net, struct notifier_block *nb) { - return 0; + return fib6_rules_dump(net, nb); } static const struct fib_notifier_ops fib6_notifier_ops_template = { diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index ef1fcee6bf16..2f29e4e33bd3 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -14,6 +14,7 @@ */ #include +#include #include #include @@ -49,6 +50,16 @@ bool fib6_rule_default(const struct fib_rule *rule) } EXPORT_SYMBOL_GPL(fib6_rule_default); +int fib6_rules_dump(struct net *net, struct notifier_block *nb) +{ + return fib_rules_dump(net, nb, AF_INET6); +} + +unsigned int fib6_rules_seq_read(struct net *net) +{ + return fib_rules_seq_read(net, AF_INET6); +} + struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { -- cgit v1.2.3-55-g7522 From e1ee0a5ba35d999caef94d659b4cb842e63aeb68 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:19 +0200 Subject: ipv6: fib: Dump tables during registration to FIB chain Dump all the FIB tables in each net namespace upon registration to the FIB notification chain so that the callee will have a complete view of the tables. The integrity of the dump is ensured by a per-table sequence counter that is incremented (under write lock) whenever a route is added or deleted from the table. All the sequence counters are read (under each table's read lock) and summed, prior and after the dump. In case the counters differ, then the dump is either restarted or the registration fails. While it's possible for a table to be modified after its counter has been read, this isn't really a problem. In case it happened before it was read the second time, then the comparison at the end will fail. If it happened afterwards, then we're guaranteed to be notified about the change, as the notification block is registered prior to the second read. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 4 +++ net/ipv6/fib6_notifier.c | 10 ++++-- net/ipv6/ip6_fib.c | 92 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 104 insertions(+), 2 deletions(-) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index dbe5537809f5..0b3052157e6b 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -235,6 +235,7 @@ struct fib6_table { struct fib6_node tb6_root; struct inet_peer_base tb6_peers; unsigned int flags; + unsigned int fib_seq; #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) }; @@ -308,6 +309,9 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, int __net_init fib6_notifier_init(struct net *net); void __net_exit fib6_notifier_exit(struct net *net); +unsigned int fib6_tables_seq_read(struct net *net); +int fib6_tables_dump(struct net *net, struct notifier_block *nb); + #ifdef CONFIG_IPV6_MULTIPLE_TABLES int fib6_rules_init(void); void fib6_rules_cleanup(void); diff --git a/net/ipv6/fib6_notifier.c b/net/ipv6/fib6_notifier.c index 298efc678f3b..66a103ef7e86 100644 --- a/net/ipv6/fib6_notifier.c +++ b/net/ipv6/fib6_notifier.c @@ -23,12 +23,18 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, static unsigned int fib6_seq_read(struct net *net) { - return fib6_rules_seq_read(net); + return fib6_tables_seq_read(net) + fib6_rules_seq_read(net); } static int fib6_dump(struct net *net, struct notifier_block *nb) { - return fib6_rules_dump(net, nb); + int err; + + err = fib6_rules_dump(net, nb); + if (err) + return err; + + return fib6_tables_dump(net, nb); } static const struct fib_notifier_ops fib6_notifier_ops_template = { diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 595a57cbbc7b..719c10480c74 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -303,6 +303,37 @@ static void __net_init fib6_tables_init(struct net *net) #endif +unsigned int fib6_tables_seq_read(struct net *net) +{ + unsigned int h, fib_seq = 0; + + rcu_read_lock(); + for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { + struct hlist_head *head = &net->ipv6.fib_table_hash[h]; + struct fib6_table *tb; + + hlist_for_each_entry_rcu(tb, head, tb6_hlist) { + read_lock_bh(&tb->tb6_lock); + fib_seq += tb->fib_seq; + read_unlock_bh(&tb->tb6_lock); + } + } + rcu_read_unlock(); + + return fib_seq; +} + +static int call_fib6_entry_notifier(struct notifier_block *nb, struct net *net, + enum fib_event_type event_type, + struct rt6_info *rt) +{ + struct fib6_entry_notifier_info info = { + .rt = rt, + }; + + return call_fib6_notifier(nb, net, event_type, &info.info); +} + static int call_fib6_entry_notifiers(struct net *net, enum fib_event_type event_type, struct rt6_info *rt) @@ -311,9 +342,70 @@ static int call_fib6_entry_notifiers(struct net *net, .rt = rt, }; + rt->rt6i_table->fib_seq++; return call_fib6_notifiers(net, event_type, &info.info); } +struct fib6_dump_arg { + struct net *net; + struct notifier_block *nb; +}; + +static void fib6_rt_dump(struct rt6_info *rt, struct fib6_dump_arg *arg) +{ + if (rt == arg->net->ipv6.ip6_null_entry) + return; + call_fib6_entry_notifier(arg->nb, arg->net, FIB_EVENT_ENTRY_ADD, rt); +} + +static int fib6_node_dump(struct fib6_walker *w) +{ + struct rt6_info *rt; + + for (rt = w->leaf; rt; rt = rt->dst.rt6_next) + fib6_rt_dump(rt, w->args); + w->leaf = NULL; + return 0; +} + +static void fib6_table_dump(struct net *net, struct fib6_table *tb, + struct fib6_walker *w) +{ + w->root = &tb->tb6_root; + read_lock_bh(&tb->tb6_lock); + fib6_walk(net, w); + read_unlock_bh(&tb->tb6_lock); +} + +/* Called with rcu_read_lock() */ +int fib6_tables_dump(struct net *net, struct notifier_block *nb) +{ + struct fib6_dump_arg arg; + struct fib6_walker *w; + unsigned int h; + + w = kzalloc(sizeof(*w), GFP_ATOMIC); + if (!w) + return -ENOMEM; + + w->func = fib6_node_dump; + arg.net = net; + arg.nb = nb; + w->args = &arg; + + for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { + struct hlist_head *head = &net->ipv6.fib_table_hash[h]; + struct fib6_table *tb; + + hlist_for_each_entry_rcu(tb, head, tb6_hlist) + fib6_table_dump(net, tb, w); + } + + kfree(w); + + return 0; +} + static int fib6_dump_node(struct fib6_walker *w) { int res; -- cgit v1.2.3-55-g7522 From 61e4d01e16acddadb9723143637a20417fa67ac9 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:20 +0200 Subject: ipv6: fib: Add offload indication to routes Allow user space applications to see which routes are offloaded and which aren't by setting the RTNH_F_OFFLOAD flag when dumping them. To be consistent with IPv4, offload indication is provided on a per-nexthop basis. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/ipv6_route.h | 1 + net/ipv6/route.c | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h index d496c02e14bc..33e2a5732bd1 100644 --- a/include/uapi/linux/ipv6_route.h +++ b/include/uapi/linux/ipv6_route.h @@ -35,6 +35,7 @@ #define RTF_PREF(pref) ((pref) << 27) #define RTF_PREF_MASK 0x18000000 +#define RTF_OFFLOAD 0x20000000 /* offloaded route */ #define RTF_PCPU 0x40000000 /* read-only: can not be set by user */ #define RTF_LOCAL 0x80000000 diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 4d30c96a819d..aba07fce67fb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1820,6 +1820,11 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, goto out; } + if (cfg->fc_flags & RTF_OFFLOAD) { + NL_SET_ERR_MSG(extack, "Userspace can not set RTF_OFFLOAD"); + goto out; + } + if (cfg->fc_dst_len > 128) { NL_SET_ERR_MSG(extack, "Invalid prefix length"); goto out; @@ -3327,6 +3332,9 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, goto nla_put_failure; } + if (rt->rt6i_flags & RTF_OFFLOAD) + *flags |= RTNH_F_OFFLOAD; + /* not needed for multipath encoding b/c it has a rtnexthop struct */ if (!skip_oif && rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) -- cgit v1.2.3-55-g7522 From c5b12410fa591acb1d48e167b9bd0d2a7a38498d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:21 +0200 Subject: ipv6: fib: Don't assume only nodes hold a reference on routes The code currently assumes that only FIB nodes can hold a reference on routes. Therefore, after fib6_purge_rt() has run and the route is no longer present in any intermediate nodes, it's assumed that its reference count would be 1 - taken by the node where it's currently stored. However, we're going to allow users other than the FIB to take a reference on a route, so this assumption is no longer valid and the BUG_ON() needs to be removed. Note that purging only takes place if the initial reference count is different than 1. I've left that check intact, as in the majority of systems (where routes are only referenced by the FIB), it does actually mean the route is present in intermediate nodes. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/ipv6/ip6_fib.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 719c10480c74..fa27905de92e 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -837,8 +837,6 @@ static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn, } fn = fn->parent; } - /* No more references are possible at this point. */ - BUG_ON(atomic_read(&rt->rt6i_ref) != 1); } } -- cgit v1.2.3-55-g7522 From 7483cea79957312e9f8e9cf760a1bc5d6c507113 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:22 +0200 Subject: ipv6: fib: Unlink replaced routes from their nodes When a route is deleted its node pointer is set to NULL to indicate it's no longer linked to its node. Do the same for routes that are replaced. This will later allow us to test if a route is still in the FIB by checking its node pointer instead of its reference count. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/ipv6/ip6_fib.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index fa27905de92e..fe193b48ef61 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1019,6 +1019,7 @@ add: fn->fn_flags |= RTN_RTINFO; } nsiblings = iter->rt6i_nsiblings; + iter->rt6i_node = NULL; fib6_purge_rt(iter, fn, info->nl_net); rt6_release(iter); @@ -1031,6 +1032,7 @@ add: break; if (rt6_qualify_for_ecmp(iter)) { *ins = iter->dst.rt6_next; + iter->rt6i_node = NULL; fib6_purge_rt(iter, fn, info->nl_net); rt6_release(iter); nsiblings--; -- cgit v1.2.3-55-g7522 From 9217d8c2fe743f02a3ce6d430fe3b5d514fd5f1c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:23 +0200 Subject: ipv6: Regenerate host route according to node pointer upon loopback up When the loopback device is brought back up we need to check if the host route attached to the address is still in the FIB and regenerate one in case it's not. Host routes using the loopback device are always inserted into and removed from the FIB under RTNL (under which this function is called), so we can test their node pointer instead of the reference count in order to check if the route is in the FIB or not. Tested using the following script from Nicolas mentioned in commit a220445f9f43 ("ipv6: correctly add local routes when lo goes up"): $ ip link add dummy1 type dummy $ ip link set dummy1 up $ ip link set lo down ; ip link set lo up The host route is correctly regenerated. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3c46e9513a31..e8e4f867b994 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3066,7 +3066,7 @@ static void init_loopback(struct net_device *dev) * lo device down, release this obsolete dst and * reallocate a new router for ifa. */ - if (!atomic_read(&sp_ifa->rt->rt6i_ref)) { + if (!sp_ifa->rt->rt6i_node) { ip6_rt_put(sp_ifa->rt); sp_ifa->rt = NULL; } else { -- cgit v1.2.3-55-g7522 From fc882fcff1ee774cb6be9d3c714ae5ab9eec5aa4 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:24 +0200 Subject: ipv6: Regenerate host route according to node pointer upon interface up When an interface is brought back up, the kernel tries to restore the host routes tied to its permanent addresses. However, if the host route was removed from the FIB, then we need to reinsert it. This is done by releasing the current dst and allocating a new, so as to not reuse a dst with obsolete values. Since this function is called under RTNL and using the same explanation from the previous patch, we can test if the route is in the FIB by checking its node pointer instead of its reference count. Tested using the following script and Andrey's reproducer mentioned in commit 8048ced9beb2 ("net: ipv6: regenerate host route if moved to gc list") and linked below: $ ip link set dev lo up $ ip link add dummy1 type dummy $ ip -6 address add cafe::1/64 dev dummy1 $ ip link set dev lo down # cafe::1/128 is removed $ ip link set dev dummy1 up $ ip link set dev lo up The host route is correctly regenerated. Signed-off-by: Ido Schimmel Link: http://lkml.kernel.org/r/CAAeHK+zSe82vc5gCRgr_EoUwiALPnWVdWJBPwJZBpbxYz=kGJw@mail.gmail.com Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e8e4f867b994..30ee23eef268 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3321,11 +3321,11 @@ static void addrconf_gre_config(struct net_device *dev) static int fixup_permanent_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) { - /* rt6i_ref == 0 means the host route was removed from the + /* !rt6i_node means the host route was removed from the * FIB, for example, if 'lo' device is taken down. In that * case regenerate the host route. */ - if (!ifp->rt || !atomic_read(&ifp->rt->rt6i_ref)) { + if (!ifp->rt || !ifp->rt->rt6i_node) { struct rt6_info *rt, *prev; rt = addrconf_dst_alloc(idev, &ifp->addr, false); -- cgit v1.2.3-55-g7522 From a460aa83963b185a32a6377eb486b6e613ac8e38 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:25 +0200 Subject: ipv6: fib: Add helpers to hold / drop a reference on rt6_info Similar to commit 1c677b3d2828 ("ipv4: fib: Add fib_info_hold() helper") and commit b423cb10807b ("ipv4: fib: Export free_fib_info()") add an helper to hold a reference on rt6_info and export rt6_release() to drop it and potentially release the route. This is needed so that drivers capable of FIB offload could hold a reference on the route before queueing it for offload and drop it after the route has been programmed to the device's tables. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/ip6_fib.h | 16 ++++++++++++++++ net/ipv6/ip6_fib.c | 12 ++---------- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 0b3052157e6b..1d790ea40ea7 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -187,6 +187,22 @@ static inline void ip6_rt_put(struct rt6_info *rt) dst_release(&rt->dst); } +void rt6_free_pcpu(struct rt6_info *non_pcpu_rt); + +static inline void rt6_hold(struct rt6_info *rt) +{ + atomic_inc(&rt->rt6i_ref); +} + +static inline void rt6_release(struct rt6_info *rt) +{ + if (atomic_dec_and_test(&rt->rt6i_ref)) { + rt6_free_pcpu(rt); + dst_dev_put(&rt->dst); + dst_release(&rt->dst); + } +} + enum fib6_walk_state { #ifdef CONFIG_IPV6_SUBTREES FWS_S, diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index fe193b48ef61..69ed0043d117 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -154,7 +154,7 @@ static void node_free(struct fib6_node *fn) kmem_cache_free(fib6_node_kmem, fn); } -static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) +void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) { int cpu; @@ -177,15 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt) free_percpu(non_pcpu_rt->rt6i_pcpu); non_pcpu_rt->rt6i_pcpu = NULL; } - -static void rt6_release(struct rt6_info *rt) -{ - if (atomic_dec_and_test(&rt->rt6i_ref)) { - rt6_free_pcpu(rt); - dst_dev_put(&rt->dst); - dst_release(&rt->dst); - } -} +EXPORT_SYMBOL_GPL(rt6_free_pcpu); static void fib6_link_table(struct net *net, struct fib6_table *tb) { -- cgit v1.2.3-55-g7522 From 66a5763ac180d43f4a16770791669dc1e085cd5d Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:26 +0200 Subject: mlxsw: spectrum_router: Demultiplex FIB event based on family The FIB notification block currently only handles IPv4 events, but we want to start handling IPv6 events soon, so lay the groundwork now. Do that by preparing the work item and process it according to the notified address family. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 65 +++++++++++++++------- 1 file changed, 44 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 78c19512250d..166ecf5d58f1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3040,7 +3040,7 @@ struct mlxsw_sp_fib_event_work { unsigned long event; }; -static void mlxsw_sp_router_fib_event_work(struct work_struct *work) +static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) { struct mlxsw_sp_fib_event_work *fib_work = container_of(work, struct mlxsw_sp_fib_event_work, work); @@ -3085,6 +3085,42 @@ static void mlxsw_sp_router_fib_event_work(struct work_struct *work) kfree(fib_work); } +static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) +{ +} + +static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, + struct fib_notifier_info *info) +{ + switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ + case FIB_EVENT_ENTRY_APPEND: /* fall through */ + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info)); + /* Take referece on fib_info to prevent it from being + * freed while work is queued. Release it afterwards. + */ + fib_info_hold(fib_work->fen_info.fi); + break; + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + case FIB_EVENT_NH_ADD: /* fall through */ + case FIB_EVENT_NH_DEL: + memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info)); + fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + break; + } +} + +static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, + struct fib_notifier_info *info) +{ +} + /* Called with rcu_read_lock() */ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, unsigned long event, void *ptr) @@ -3100,31 +3136,18 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, if (WARN_ON(!fib_work)) return NOTIFY_BAD; - INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work); router = container_of(nb, struct mlxsw_sp_router, fib_nb); fib_work->mlxsw_sp = router->mlxsw_sp; fib_work->event = event; - switch (event) { - case FIB_EVENT_ENTRY_REPLACE: /* fall through */ - case FIB_EVENT_ENTRY_APPEND: /* fall through */ - case FIB_EVENT_ENTRY_ADD: /* fall through */ - case FIB_EVENT_ENTRY_DEL: - memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info)); - /* Take referece on fib_info to prevent it from being - * freed while work is queued. Release it afterwards. - */ - fib_info_hold(fib_work->fen_info.fi); + switch (info->family) { + case AF_INET: + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work); + mlxsw_sp_router_fib4_event(fib_work, info); break; - case FIB_EVENT_RULE_ADD: /* fall through */ - case FIB_EVENT_RULE_DEL: - memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info)); - fib_rule_get(fib_work->fr_info.rule); - break; - case FIB_EVENT_NH_ADD: /* fall through */ - case FIB_EVENT_NH_DEL: - memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info)); - fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent); + case AF_INET6: + INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work); + mlxsw_sp_router_fib6_event(fib_work, info); break; } -- cgit v1.2.3-55-g7522 From 583419fdf22bd2fc39e49520b29960f206b7ab44 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:27 +0200 Subject: mlxsw: spectrum_router: Sanitize IPv6 FIB rules We only allow FIB offload in the presence of default rules or an l3mdev rule. In a similar fashion to IPv4 FIB rules, sanitize IPv6 rules. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 166ecf5d58f1..6c7fc6a66aca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -3087,6 +3088,23 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work) static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) { + struct mlxsw_sp_fib_event_work *fib_work = + container_of(work, struct mlxsw_sp_fib_event_work, work); + struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; + struct fib_rule *rule; + + rtnl_lock(); + switch (fib_work->event) { + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + rule = fib_work->fr_info.rule; + if (!fib6_rule_default(rule) && !rule->l3mdev) + mlxsw_sp_router_fib_abort(mlxsw_sp); + fib_rule_put(rule); + break; + } + rtnl_unlock(); + kfree(fib_work); } static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, @@ -3119,6 +3137,13 @@ static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work, static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { + switch (fib_work->event) { + case FIB_EVENT_RULE_ADD: /* fall through */ + case FIB_EVENT_RULE_DEL: + memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); + fib_rule_get(fib_work->fr_info.rule); + break; + } } /* Called with rcu_read_lock() */ -- cgit v1.2.3-55-g7522 From 428b851f565f11c483cfa62021d674b6fb9d6ddc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:28 +0200 Subject: mlxsw: spectrum_router: Add support for IPv6 routes addition / deletion Allow directly connected and remote unicast IPv6 routes to be programmed to the device's tables. As with IPv4, identical routes - sharing the same destination prefix - are ordered in a FIB node according to their table ID and then the metric. While the kernel doesn't share the same trie for the local and main table, this does happen in the device, so ordering according to table ID is needed. Since individual nexthops can be added and deleted in IPv6, each FIB entry stores a linked list of the rt6_info structs it represents. Upon the addition or deletion of a nexthop, a new nexthop group is allocated according to the new configuration and the old one is destroyed. Identical groups aren't currently consolidated, but will be in a follow-up patchset. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/Kconfig | 1 + .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 726 ++++++++++++++++++++- 2 files changed, 724 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 695adff89d71..d56eea310509 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -75,6 +75,7 @@ config MLXSW_SPECTRUM depends on MLXSW_CORE && MLXSW_PCI && NET_SWITCHDEV && VLAN_8021Q depends on PSAMPLE || PSAMPLE=n depends on BRIDGE || BRIDGE=n + depends on IPV6 || IPV6=n select PARMAN select MLXFW default m diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6c7fc6a66aca..2345c00ef706 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include #include @@ -406,6 +407,17 @@ struct mlxsw_sp_fib4_entry { u8 type; }; +struct mlxsw_sp_fib6_entry { + struct mlxsw_sp_fib_entry common; + struct list_head rt6_list; + unsigned int nrt6; +}; + +struct mlxsw_sp_rt6 { + struct list_head list; + struct rt6_info *rt; +}; + enum mlxsw_sp_l3proto { MLXSW_SP_L3_PROTO_IPV4, MLXSW_SP_L3_PROTO_IPV6, @@ -2126,6 +2138,26 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry) } } +static struct mlxsw_sp_nexthop * +mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_rt6 *mlxsw_sp_rt6) +{ + int i; + + for (i = 0; i < nh_grp->count; i++) { + struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + if (nh->rif && nh->rif->dev == rt->dst.dev && + ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr, + &rt->rt6i_gateway)) + return nh; + continue; + } + + return NULL; +} + static void mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { @@ -2160,6 +2192,48 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) } } +static void +mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, + common); + + if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { + list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, + list)->rt->rt6i_flags |= RTF_OFFLOAD; + return; + } + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + struct mlxsw_sp_nexthop *nh; + + nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); + if (nh && nh->offloaded) + mlxsw_sp_rt6->rt->rt6i_flags |= RTF_OFFLOAD; + else + mlxsw_sp_rt6->rt->rt6i_flags &= ~RTF_OFFLOAD; + } +} + +static void +mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry, + common); + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + rt->rt6i_flags &= ~RTF_OFFLOAD; + } +} + static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) { switch (fib_entry->fib_node->fib->proto) { @@ -2167,7 +2241,8 @@ static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) mlxsw_sp_fib4_entry_offload_set(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_entry_offload_set(fib_entry); + break; } } @@ -2179,7 +2254,8 @@ mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) mlxsw_sp_fib4_entry_offload_unset(fib_entry); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_entry_offload_unset(fib_entry); + break; } } @@ -2887,6 +2963,615 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); } +static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt) +{ + /* Packets with link-local destination IP arriving to the router + * are trapped to the CPU, so no need to program specific routes + * for them. + */ + if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL) + return true; + + /* Multicast routes aren't supported, so ignore them. Neighbour + * Discovery packets are specifically trapped. + */ + if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) + return true; + + /* Cloned routes are irrelevant in the forwarding path. */ + if (rt->rt6i_flags & RTF_CACHE) + return true; + + return false; +} + +static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL); + if (!mlxsw_sp_rt6) + return ERR_PTR(-ENOMEM); + + /* In case of route replace, replaced route is deleted with + * no notification. Take reference to prevent accessing freed + * memory. + */ + mlxsw_sp_rt6->rt = rt; + rt6_hold(rt); + + return mlxsw_sp_rt6; +} + +#if IS_ENABLED(CONFIG_IPV6) +static void mlxsw_sp_rt6_release(struct rt6_info *rt) +{ + rt6_release(rt); +} +#else +static void mlxsw_sp_rt6_release(struct rt6_info *rt) +{ +} +#endif + +static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6) +{ + mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt); + kfree(mlxsw_sp_rt6); +} + +static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt) +{ + /* RTF_CACHE routes are ignored */ + return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY; +} + +static struct rt6_info * +mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) +{ + return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, + list)->rt; +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct rt6_info *nrt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + + if (!mlxsw_sp_fib6_rt_can_mp(nrt)) + return NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same + * virtual router. + */ + if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + continue; + if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + break; + if (rt->rt6i_metric < nrt->rt6i_metric) + continue; + if (rt->rt6i_metric == nrt->rt6i_metric && + mlxsw_sp_fib6_rt_can_mp(rt)) + return fib6_entry; + if (rt->rt6i_metric > nrt->rt6i_metric) + break; + } + + return NULL; +} + +static struct mlxsw_sp_rt6 * +mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry, + const struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + if (mlxsw_sp_rt6->rt == rt) + return mlxsw_sp_rt6; + } + + return NULL; +} + +static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp, + struct mlxsw_sp_nexthop *nh, + const struct rt6_info *rt) +{ + struct net_device *dev = rt->dst.dev; + struct mlxsw_sp_rif *rif; + int err; + + nh->nh_grp = nh_grp; + memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr)); + + if (!dev) + return 0; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return 0; + mlxsw_sp_nexthop_rif_init(nh, rif); + + err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh); + if (err) + goto err_nexthop_neigh_init; + + return 0; + +err_nexthop_neigh_init: + mlxsw_sp_nexthop_rif_fini(nh); + return err; +} + +static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop *nh) +{ + mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh); + mlxsw_sp_nexthop_rif_fini(nh); +} + +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + struct mlxsw_sp_nexthop *nh; + size_t alloc_size; + int i = 0; + int err; + + alloc_size = sizeof(*nh_grp) + + fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop); + nh_grp = kzalloc(alloc_size, GFP_KERNEL); + if (!nh_grp) + return ERR_PTR(-ENOMEM); + INIT_LIST_HEAD(&nh_grp->fib_list); +#if IS_ENABLED(CONFIG_IPV6) + nh_grp->neigh_tbl = &nd_tbl; +#endif + mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list, + struct mlxsw_sp_rt6, list); + nh_grp->gateway = !!(mlxsw_sp_rt6->rt->rt6i_flags & RTF_GATEWAY); + nh_grp->count = fib6_entry->nrt6; + for (i = 0; i < nh_grp->count; i++) { + struct rt6_info *rt = mlxsw_sp_rt6->rt; + + nh = &nh_grp->nexthops[i]; + err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt); + if (err) + goto err_nexthop6_init; + mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + return nh_grp; + +err_nexthop6_init: + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + kfree(nh_grp); + return ERR_PTR(err); +} + +static void +mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_nexthop_group *nh_grp) +{ + struct mlxsw_sp_nexthop *nh; + int i = nh_grp->count; + + for (i--; i >= 0; i--) { + nh = &nh_grp->nexthops[i]; + mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); + } + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); + WARN_ON(nh_grp->adj_index_valid); + kfree(nh_grp); +} + +static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp; + + /* For now, don't consolidate nexthop groups */ + nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); + if (IS_ERR(nh_grp)) + return PTR_ERR(nh_grp); + + list_add_tail(&fib6_entry->common.nexthop_group_node, + &nh_grp->fib_list); + fib6_entry->common.nh_group = nh_grp; + + return 0; +} + +static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_entry *fib_entry) +{ + struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; + + list_del(&fib_entry->nexthop_group_node); + if (!list_empty(&nh_grp->fib_list)) + return; + mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp); +} + +static int +mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group; + int err; + + fib6_entry->common.nh_group = NULL; + list_del(&fib6_entry->common.nexthop_group_node); + + err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_get; + + /* In case this entry is offloaded, then the adjacency index + * currently associated with it in the device's table is that + * of the old group. Start using the new one instead. + */ + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); + if (err) + goto err_fib_node_entry_add; + + if (list_empty(&old_nh_grp->fib_list)) + mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp); + + return 0; + +err_fib_node_entry_add: + mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); +err_nexthop6_group_get: + list_add_tail(&fib6_entry->common.nexthop_group_node, + &old_nh_grp->fib_list); + fib6_entry->common.nh_group = old_nh_grp; + return err; +} + +static int +mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + int err; + + mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt); + if (IS_ERR(mlxsw_sp_rt6)) + return PTR_ERR(mlxsw_sp_rt6); + + list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); + fib6_entry->nrt6++; + + err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_update; + + return 0; + +err_nexthop6_group_update: + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); + return err; +} + +static void +mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + struct rt6_info *rt) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt); + if (WARN_ON(!mlxsw_sp_rt6)) + return; + + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); +} + +static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp_fib_entry *fib_entry, + const struct rt6_info *rt) +{ + /* Packets hitting RTF_REJECT routes need to be discarded by the + * stack. We can rely on their destination device not having a + * RIF (it's the loopback device) and can thus use action type + * local, which will cause them to be trapped with a lower + * priority than packets that need to be locally received. + */ + if (rt->rt6i_flags & RTF_LOCAL) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP; + else if (rt->rt6i_flags & RTF_REJECT) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; + else if (rt->rt6i_flags & RTF_GATEWAY) + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE; + else + fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL; +} + +static void +mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp; + + list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list, + list) { + fib6_entry->nrt6--; + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); + } +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node, + struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_entry *fib_entry; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + int err; + + fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL); + if (!fib6_entry) + return ERR_PTR(-ENOMEM); + fib_entry = &fib6_entry->common; + + mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt); + if (IS_ERR(mlxsw_sp_rt6)) { + err = PTR_ERR(mlxsw_sp_rt6); + goto err_rt6_create; + } + + mlxsw_sp_fib6_entry_type_set(fib_entry, mlxsw_sp_rt6->rt); + + INIT_LIST_HEAD(&fib6_entry->rt6_list); + list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list); + fib6_entry->nrt6 = 1; + err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry); + if (err) + goto err_nexthop6_group_get; + + fib_entry->fib_node = fib_node; + + return fib6_entry; + +err_nexthop6_group_get: + list_del(&mlxsw_sp_rt6->list); + mlxsw_sp_rt6_destroy(mlxsw_sp_rt6); +err_rt6_create: + kfree(fib6_entry); + return ERR_PTR(err); +} + +static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common); + mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry); + WARN_ON(fib6_entry->nrt6); + kfree(fib6_entry); +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, + const struct rt6_info *nrt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id) + continue; + if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) + break; + if (rt->rt6i_metric > nrt->rt6i_metric) + return fib6_entry; + } + + return NULL; +} + +static int +mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry) +{ + struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; + struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); + struct mlxsw_sp_fib6_entry *fib6_entry; + + fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt); + + if (fib6_entry) { + list_add_tail(&new6_entry->common.list, + &fib6_entry->common.list); + } else { + struct mlxsw_sp_fib6_entry *last; + + list_for_each_entry(last, &fib_node->entry_list, common.list) { + struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last); + + if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id) + break; + fib6_entry = last; + } + + if (fib6_entry) + list_add(&new6_entry->common.list, + &fib6_entry->common.list); + else + list_add(&new6_entry->common.list, + &fib_node->entry_list); + } + + return 0; +} + +static void +mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry) +{ + list_del(&fib6_entry->common.list); +} + +static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + int err; + + err = mlxsw_sp_fib6_node_list_insert(fib6_entry); + if (err) + return err; + + err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common); + if (err) + goto err_fib_node_entry_add; + + return 0; + +err_fib_node_entry_add: + mlxsw_sp_fib6_node_list_remove(fib6_entry); + return err; +} + +static void +mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common); + mlxsw_sp_fib6_node_list_remove(fib6_entry); +} + +static struct mlxsw_sp_fib6_entry * +mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, + const struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + struct mlxsw_sp_fib *fib; + struct mlxsw_sp_vr *vr; + + vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id); + if (!vr) + return NULL; + fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6); + + fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr, + sizeof(rt->rt6i_dst.addr), + rt->rt6i_dst.plen); + if (!fib_node) + return NULL; + + list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { + struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry); + + if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id && + rt->rt6i_metric == iter_rt->rt6i_metric && + mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt)) + return fib6_entry; + } + + return NULL; +} + +static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, + struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + int err; + + if (mlxsw_sp->router->aborted) + return 0; + + if (mlxsw_sp_fib6_rt_should_ignore(rt)) + return 0; + + fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id, + &rt->rt6i_dst.addr, + sizeof(rt->rt6i_dst.addr), + rt->rt6i_dst.plen, + MLXSW_SP_L3_PROTO_IPV6); + if (IS_ERR(fib_node)) + return PTR_ERR(fib_node); + + /* Before creating a new entry, try to append route to an existing + * multipath entry. + */ + fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt); + if (fib6_entry) { + err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); + if (err) + goto err_fib6_entry_nexthop_add; + return 0; + } + + fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); + if (IS_ERR(fib6_entry)) { + err = PTR_ERR(fib6_entry); + goto err_fib6_entry_create; + } + + err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry); + if (err) + goto err_fib6_node_entry_link; + + return 0; + +err_fib6_node_entry_link: + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); +err_fib6_entry_create: +err_fib6_entry_nexthop_add: + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + return err; +} + +static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp, + struct rt6_info *rt) +{ + struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib_node *fib_node; + + if (mlxsw_sp->router->aborted) + return; + + if (mlxsw_sp_fib6_rt_should_ignore(rt)) + return; + + fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt); + if (WARN_ON(!fib6_entry)) + return; + + /* If route is part of a multipath entry, but not the last one + * removed, then only reduce its nexthop group. + */ + if (!list_is_singular(&fib6_entry->rt6_list)) { + mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt); + return; + } + + fib_node = fib6_entry->common.fib_node; + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); +} + static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp, enum mlxsw_reg_ralxx_protocol proto, u8 tree_id) @@ -2967,6 +3652,23 @@ static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp, } } +static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_fib6_entry *fib6_entry, *tmp; + + list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list, + common.list) { + bool do_break = &tmp->common.list == &fib_node->entry_list; + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); + if (do_break) + break; + } +} + static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { @@ -2975,7 +3677,7 @@ static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node); break; case MLXSW_SP_L3_PROTO_IPV6: - WARN_ON_ONCE(1); + mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node); break; } } @@ -3033,6 +3735,7 @@ static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp) struct mlxsw_sp_fib_event_work { struct work_struct work; union { + struct fib6_entry_notifier_info fen6_info; struct fib_entry_notifier_info fen_info; struct fib_rule_notifier_info fr_info; struct fib_nh_notifier_info fnh_info; @@ -3092,9 +3795,21 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; struct fib_rule *rule; + int err; rtnl_lock(); switch (fib_work->event) { + case FIB_EVENT_ENTRY_ADD: + err = mlxsw_sp_router_fib6_add(mlxsw_sp, + fib_work->fen6_info.rt); + if (err) + mlxsw_sp_router_fib_abort(mlxsw_sp); + mlxsw_sp_rt6_release(fib_work->fen6_info.rt); + break; + case FIB_EVENT_ENTRY_DEL: + mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt); + mlxsw_sp_rt6_release(fib_work->fen6_info.rt); + break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: rule = fib_work->fr_info.rule; @@ -3138,6 +3853,11 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { switch (fib_work->event) { + case FIB_EVENT_ENTRY_ADD: /* fall through */ + case FIB_EVENT_ENTRY_DEL: + memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info)); + rt6_hold(fib_work->fen6_info.rt); + break; case FIB_EVENT_RULE_ADD: /* fall through */ case FIB_EVENT_RULE_DEL: memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info)); -- cgit v1.2.3-55-g7522 From 0a7fd1ac2a6b316ceeb9a57a41ce0c45f6bff549 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:29 +0200 Subject: mlxsw: spectrum_router: Add support for route replace In case we got a replace event, then the replaced route must exist. If the route isn't capable of multipath, then replace first matching non-multipath capable route. If the route is capable of multipath and matching multipath capable route is found, then replace it. Otherwise, replace first matching non-multipath capable route. The new route is inserted before the replaced one. In case the replaced route is currently offloaded, then it's overwritten in the device's table by the new route and later deleted, thus not impacting routed traffic. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 63 +++++++++++++++++----- 1 file changed, 49 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2345c00ef706..cded8e8039bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3035,11 +3035,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct rt6_info *nrt) + const struct rt6_info *nrt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; - if (!mlxsw_sp_fib6_rt_can_mp(nrt)) + if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace) return NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { @@ -3371,9 +3371,9 @@ static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp, static struct mlxsw_sp_fib6_entry * mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, - const struct rt6_info *nrt) + const struct rt6_info *nrt, bool replace) { - struct mlxsw_sp_fib6_entry *fib6_entry; + struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL; list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); @@ -3382,21 +3382,32 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, continue; if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id) break; + if (replace && rt->rt6i_metric == nrt->rt6i_metric) { + if (mlxsw_sp_fib6_rt_can_mp(rt) == + mlxsw_sp_fib6_rt_can_mp(nrt)) + return fib6_entry; + if (mlxsw_sp_fib6_rt_can_mp(nrt)) + fallback = fallback ?: fib6_entry; + } if (rt->rt6i_metric > nrt->rt6i_metric) - return fib6_entry; + return fallback ?: fib6_entry; } - return NULL; + return fallback; } static int -mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry) +mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry, + bool replace) { struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node; struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry); struct mlxsw_sp_fib6_entry *fib6_entry; - fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt); + fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace); + + if (replace && WARN_ON(!fib6_entry)) + return -EINVAL; if (fib6_entry) { list_add_tail(&new6_entry->common.list, @@ -3430,11 +3441,12 @@ mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry) } static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_fib6_entry *fib6_entry) + struct mlxsw_sp_fib6_entry *fib6_entry, + bool replace) { int err; - err = mlxsw_sp_fib6_node_list_insert(fib6_entry); + err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace); if (err) return err; @@ -3489,8 +3501,25 @@ mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp, return NULL; } +static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry, + bool replace) +{ + struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node; + struct mlxsw_sp_fib6_entry *replaced; + + if (!replace) + return; + + replaced = list_next_entry(fib6_entry, common.list); + + mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced); + mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced); + mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); +} + static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, - struct rt6_info *rt) + struct rt6_info *rt, bool replace) { struct mlxsw_sp_fib6_entry *fib6_entry; struct mlxsw_sp_fib_node *fib_node; @@ -3513,7 +3542,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, /* Before creating a new entry, try to append route to an existing * multipath entry. */ - fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt); + fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace); if (fib6_entry) { err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); if (err) @@ -3527,10 +3556,12 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, goto err_fib6_entry_create; } - err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry); + err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace); if (err) goto err_fib6_node_entry_link; + mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace); + return 0; err_fib6_node_entry_link: @@ -3795,13 +3826,16 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work) container_of(work, struct mlxsw_sp_fib_event_work, work); struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; struct fib_rule *rule; + bool replace; int err; rtnl_lock(); switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_ADD: + replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; err = mlxsw_sp_router_fib6_add(mlxsw_sp, - fib_work->fen6_info.rt); + fib_work->fen6_info.rt, replace); if (err) mlxsw_sp_router_fib_abort(mlxsw_sp); mlxsw_sp_rt6_release(fib_work->fen6_info.rt); @@ -3853,6 +3887,7 @@ static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work, struct fib_notifier_info *info) { switch (fib_work->event) { + case FIB_EVENT_ENTRY_REPLACE: /* fall through */ case FIB_EVENT_ENTRY_ADD: /* fall through */ case FIB_EVENT_ENTRY_DEL: memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info)); -- cgit v1.2.3-55-g7522 From f36f5ac677d184a62404169c781339c0cc64ea87 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:30 +0200 Subject: mlxsw: spectrum_router: Abort on source-specific routes Without resorting to ACLs, the device performs route lookup solely based on the destination IP address. In case source-specific routing is needed, an error is returned and the abort mechanism is activated, thus allowing the kernel to take over forwarding decisions. Instead of aborting, we can trap specific destination prefixes where source-specific routes are present, but this will result in a lot more code that is unlikely to ever be used. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index cded8e8039bd..45cf32ca4126 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3528,6 +3528,9 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, if (mlxsw_sp->router->aborted) return 0; + if (rt->rt6i_src.plen) + return -EINVAL; + if (mlxsw_sp_fib6_rt_should_ignore(rt)) return 0; -- cgit v1.2.3-55-g7522 From 65e65ec137f4abe78b6c90c72c0a6ca7474e9ae6 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 3 Aug 2017 13:28:31 +0200 Subject: mlxsw: spectrum_router: Don't ignore IPv6 notifications We now have all the necessary IPv6 infrastructure in place, so stop ignoring these notifications. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 45cf32ca4126..93b6da88e79c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -3912,7 +3912,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, struct fib_notifier_info *info = ptr; struct mlxsw_sp_router *router; - if (!net_eq(info->net, &init_net) || info->family != AF_INET) + if (!net_eq(info->net, &init_net)) return NOTIFY_DONE; fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC); -- cgit v1.2.3-55-g7522 From 98ba0bd5505dcbb90322a4be07bcfe6b8a18c73f Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:37 -0400 Subject: sock: allocate skbs from optmem Add sock_omalloc and sock_ofree to be able to allocate control skbs, for instance for looping errors onto sk_error_queue. The transmit budget (sk_wmem_alloc) is involved in transmit skb shaping, most notably in TCP Small Queues. Using this budget for control packets would impact transmission. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/sock.h | 2 ++ net/core/sock.c | 27 +++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/include/net/sock.h b/include/net/sock.h index 393c38e9f6aa..0f778d3c4300 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1531,6 +1531,8 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, gfp_t priority); void __sock_wfree(struct sk_buff *skb); void sock_wfree(struct sk_buff *skb); +struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, + gfp_t priority); void skb_orphan_partial(struct sk_buff *skb); void sock_rfree(struct sk_buff *skb); void sock_efree(struct sk_buff *skb); diff --git a/net/core/sock.c b/net/core/sock.c index 742f68c9c84a..1261880bdcc8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1923,6 +1923,33 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, } EXPORT_SYMBOL(sock_wmalloc); +static void sock_ofree(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + + atomic_sub(skb->truesize, &sk->sk_omem_alloc); +} + +struct sk_buff *sock_omalloc(struct sock *sk, unsigned long size, + gfp_t priority) +{ + struct sk_buff *skb; + + /* small safe race: SKB_TRUESIZE may differ from final skb->truesize */ + if (atomic_read(&sk->sk_omem_alloc) + SKB_TRUESIZE(size) > + sysctl_optmem_max) + return NULL; + + skb = alloc_skb(size, priority); + if (!skb) + return NULL; + + atomic_add(skb->truesize, &sk->sk_omem_alloc); + skb->sk = sk; + skb->destructor = sock_ofree; + return skb; +} + /* * Allocate a memory block from the socket's option memory buffer. */ -- cgit v1.2.3-55-g7522 From 3ece782693c4b64d588dd217868558ab9a19bfe7 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:38 -0400 Subject: sock: skb_copy_ubufs support for compound pages Refine skb_copy_ubufs to support compound pages. With upcoming TCP zerocopy sendmsg, such fragments may appear. The existing code replaces each page one for one. Splitting each compound page into an independent number of regular pages can result in exceeding limit MAX_SKB_FRAGS if data is not exactly page aligned. Instead, fill all destination pages but the last to PAGE_SIZE. Split the existing alloc + copy loop into separate stages: 1. compute bytelength and minimum number of pages to store this. 2. allocate 3. copy, filling each page except the last to PAGE_SIZE bytes 4. update skb frag array Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++-- net/core/skbuff.c | 53 ++++++++++++++++++++++++++++++++++++-------------- 2 files changed, 45 insertions(+), 17 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index be76082f48aa..2f64e2bbb592 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1796,13 +1796,18 @@ static inline unsigned int skb_headlen(const struct sk_buff *skb) return skb->len - skb->data_len; } -static inline unsigned int skb_pagelen(const struct sk_buff *skb) +static inline unsigned int __skb_pagelen(const struct sk_buff *skb) { unsigned int i, len = 0; for (i = skb_shinfo(skb)->nr_frags - 1; (int)i >= 0; i--) len += skb_frag_size(&skb_shinfo(skb)->frags[i]); - return len + skb_headlen(skb); + return len; +} + +static inline unsigned int skb_pagelen(const struct sk_buff *skb) +{ + return skb_headlen(skb) + __skb_pagelen(skb); } /** diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0f0933b338d7..a95877a8ac8b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -932,17 +932,20 @@ EXPORT_SYMBOL_GPL(skb_morph); */ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) { - int i; + struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; int num_frags = skb_shinfo(skb)->nr_frags; struct page *page, *head = NULL; - struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; + int i, new_frags; + u32 d_off; - for (i = 0; i < num_frags; i++) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - u32 p_off, p_len, copied; - struct page *p; - u8 *vaddr; + if (!num_frags) + return 0; + + if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) + return -EINVAL; + new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; + for (i = 0; i < new_frags; i++) { page = alloc_page(gfp_mask); if (!page) { while (head) { @@ -952,17 +955,36 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) } return -ENOMEM; } + set_page_private(page, (unsigned long)head); + head = page; + } + + page = head; + d_off = 0; + for (i = 0; i < num_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u32 p_off, p_len, copied; + struct page *p; + u8 *vaddr; skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f), p, p_off, p_len, copied) { + u32 copy, done = 0; vaddr = kmap_atomic(p); - memcpy(page_address(page) + copied, vaddr + p_off, - p_len); + + while (done < p_len) { + if (d_off == PAGE_SIZE) { + d_off = 0; + page = (struct page *)page_private(page); + } + copy = min_t(u32, PAGE_SIZE - d_off, p_len - done); + memcpy(page_address(page) + d_off, + vaddr + p_off + done, copy); + done += copy; + d_off += copy; + } kunmap_atomic(vaddr); } - - set_page_private(page, (unsigned long)head); - head = page; } /* skb frags release userspace buffers */ @@ -972,11 +994,12 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) uarg->callback(uarg, false); /* skb frags point to kernel buffers */ - for (i = num_frags - 1; i >= 0; i--) { - __skb_fill_page_desc(skb, i, head, 0, - skb_shinfo(skb)->frags[i].size); + for (i = 0; i < new_frags - 1; i++) { + __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); head = (struct page *)page_private(head); } + __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); + skb_shinfo(skb)->nr_frags = new_frags; skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; return 0; -- cgit v1.2.3-55-g7522 From 52267790ef52d7513879238ca9fac22c1733e0e3 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:39 -0400 Subject: sock: add MSG_ZEROCOPY The kernel supports zerocopy sendmsg in virtio and tap. Expand the infrastructure to support other socket types. Introduce a completion notification channel over the socket error queue. Notifications are returned with ee_origin SO_EE_ORIGIN_ZEROCOPY. ee_errno is 0 to avoid blocking the send/recv path on receiving notifications. Add reference counting, to support the skb split, merge, resize and clone operations possible with SOCK_STREAM and other socket types. The patch does not yet modify any datapaths. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 60 +++++++++++++++++++ include/linux/socket.h | 1 + include/net/sock.h | 2 + include/uapi/linux/errqueue.h | 3 + net/core/datagram.c | 55 ++++++++++------- net/core/skbuff.c | 133 ++++++++++++++++++++++++++++++++++++++++++ net/core/sock.c | 2 + 7 files changed, 235 insertions(+), 21 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 2f64e2bbb592..59cff7aa494e 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -429,6 +429,7 @@ enum { SKBTX_SCHED_TSTAMP = 1 << 6, }; +#define SKBTX_ZEROCOPY_FRAG (SKBTX_DEV_ZEROCOPY | SKBTX_SHARED_FRAG) #define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ SKBTX_SCHED_TSTAMP) #define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) @@ -445,8 +446,28 @@ struct ubuf_info { void (*callback)(struct ubuf_info *, bool zerocopy_success); void *ctx; unsigned long desc; + u16 zerocopy:1; + atomic_t refcnt; }; +#define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) + +struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size); + +static inline void sock_zerocopy_get(struct ubuf_info *uarg) +{ + atomic_inc(&uarg->refcnt); +} + +void sock_zerocopy_put(struct ubuf_info *uarg); +void sock_zerocopy_put_abort(struct ubuf_info *uarg); + +void sock_zerocopy_callback(struct ubuf_info *uarg, bool success); + +int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, + struct msghdr *msg, int len, + struct ubuf_info *uarg); + /* This data is invariant across clones and lives at * the end of the header data, ie. at skb->end. */ @@ -1214,6 +1235,45 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) return &skb_shinfo(skb)->hwtstamps; } +static inline struct ubuf_info *skb_zcopy(struct sk_buff *skb) +{ + bool is_zcopy = skb && skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY; + + return is_zcopy ? skb_uarg(skb) : NULL; +} + +static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) +{ + if (skb && uarg && !skb_zcopy(skb)) { + sock_zerocopy_get(uarg); + skb_shinfo(skb)->destructor_arg = uarg; + skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; + } +} + +/* Release a reference on a zerocopy structure */ +static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) +{ + struct ubuf_info *uarg = skb_zcopy(skb); + + if (uarg) { + uarg->zerocopy = uarg->zerocopy && zerocopy; + sock_zerocopy_put(uarg); + skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; + } +} + +/* Abort a zerocopy operation and revert zckey on error in send syscall */ +static inline void skb_zcopy_abort(struct sk_buff *skb) +{ + struct ubuf_info *uarg = skb_zcopy(skb); + + if (uarg) { + sock_zerocopy_put_abort(uarg); + skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; + } +} + /** * skb_queue_empty - check if a queue is empty * @list: queue head diff --git a/include/linux/socket.h b/include/linux/socket.h index 8b13db5163cc..8ad963cdc88c 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -287,6 +287,7 @@ struct ucred { #define MSG_BATCH 0x40000 /* sendmmsg(): more messages coming */ #define MSG_EOF MSG_FIN +#define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ #define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file descriptor received through diff --git a/include/net/sock.h b/include/net/sock.h index 0f778d3c4300..fe1a0bc25cd3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -294,6 +294,7 @@ struct sock_common { * @sk_stamp: time stamp of last packet received * @sk_tsflags: SO_TIMESTAMPING socket options * @sk_tskey: counter to disambiguate concurrent tstamp requests + * @sk_zckey: counter to order MSG_ZEROCOPY notifications * @sk_socket: Identd and reporting IO signals * @sk_user_data: RPC layer private data * @sk_frag: cached page frag @@ -462,6 +463,7 @@ struct sock { u16 sk_tsflags; u8 sk_shutdown; u32 sk_tskey; + atomic_t sk_zckey; struct socket *sk_socket; void *sk_user_data; #ifdef CONFIG_SECURITY diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h index 07bdce1f444a..78fdf52d6b2f 100644 --- a/include/uapi/linux/errqueue.h +++ b/include/uapi/linux/errqueue.h @@ -18,10 +18,13 @@ struct sock_extended_err { #define SO_EE_ORIGIN_ICMP 2 #define SO_EE_ORIGIN_ICMP6 3 #define SO_EE_ORIGIN_TXSTATUS 4 +#define SO_EE_ORIGIN_ZEROCOPY 5 #define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS #define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1)) +#define SO_EE_CODE_ZEROCOPY_COPIED 1 + /** * struct scm_timestamping - timestamps exposed through cmsg * diff --git a/net/core/datagram.c b/net/core/datagram.c index ee5647bd91b3..2f3277945d35 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -573,27 +573,12 @@ fault: } EXPORT_SYMBOL(skb_copy_datagram_from_iter); -/** - * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter - * @skb: buffer to copy - * @from: the source to copy from - * - * The function will first copy up to headlen, and then pin the userspace - * pages and build frags through them. - * - * Returns 0, -EFAULT or -EMSGSIZE. - */ -int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) +int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, + struct iov_iter *from, size_t length) { - int len = iov_iter_count(from); - int copy = min_t(int, skb_headlen(skb), len); - int frag = 0; + int frag = skb_shinfo(skb)->nr_frags; - /* copy up to skb headlen */ - if (skb_copy_datagram_from_iter(skb, 0, from, copy)) - return -EFAULT; - - while (iov_iter_count(from)) { + while (length && iov_iter_count(from)) { struct page *pages[MAX_SKB_FRAGS]; size_t start; ssize_t copied; @@ -603,18 +588,24 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) if (frag == MAX_SKB_FRAGS) return -EMSGSIZE; - copied = iov_iter_get_pages(from, pages, ~0U, + copied = iov_iter_get_pages(from, pages, length, MAX_SKB_FRAGS - frag, &start); if (copied < 0) return -EFAULT; iov_iter_advance(from, copied); + length -= copied; truesize = PAGE_ALIGN(copied + start); skb->data_len += copied; skb->len += copied; skb->truesize += truesize; - refcount_add(truesize, &skb->sk->sk_wmem_alloc); + if (sk && sk->sk_type == SOCK_STREAM) { + sk->sk_wmem_queued += truesize; + sk_mem_charge(sk, truesize); + } else { + refcount_add(truesize, &skb->sk->sk_wmem_alloc); + } while (copied) { int size = min_t(int, copied, PAGE_SIZE - start); skb_fill_page_desc(skb, frag++, pages[n], start, size); @@ -625,6 +616,28 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) } return 0; } +EXPORT_SYMBOL(__zerocopy_sg_from_iter); + +/** + * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter + * @skb: buffer to copy + * @from: the source to copy from + * + * The function will first copy up to headlen, and then pin the userspace + * pages and build frags through them. + * + * Returns 0, -EFAULT or -EMSGSIZE. + */ +int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from) +{ + int copy = min_t(int, skb_headlen(skb), iov_iter_count(from)); + + /* copy up to skb headlen */ + if (skb_copy_datagram_from_iter(skb, 0, from, copy)) + return -EFAULT; + + return __zerocopy_sg_from_iter(NULL, skb, from, ~0U); +} EXPORT_SYMBOL(zerocopy_sg_from_iter); static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a95877a8ac8b..0603e44950da 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -915,6 +915,139 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) } EXPORT_SYMBOL_GPL(skb_morph); +struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) +{ + struct ubuf_info *uarg; + struct sk_buff *skb; + + WARN_ON_ONCE(!in_task()); + + skb = sock_omalloc(sk, 0, GFP_KERNEL); + if (!skb) + return NULL; + + BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); + uarg = (void *)skb->cb; + + uarg->callback = sock_zerocopy_callback; + uarg->desc = atomic_inc_return(&sk->sk_zckey) - 1; + uarg->zerocopy = 1; + atomic_set(&uarg->refcnt, 0); + sock_hold(sk); + + return uarg; +} +EXPORT_SYMBOL_GPL(sock_zerocopy_alloc); + +static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) +{ + return container_of((void *)uarg, struct sk_buff, cb); +} + +void sock_zerocopy_callback(struct ubuf_info *uarg, bool success) +{ + struct sk_buff *skb = skb_from_uarg(uarg); + struct sock_exterr_skb *serr; + struct sock *sk = skb->sk; + u16 id = uarg->desc; + + if (sock_flag(sk, SOCK_DEAD)) + goto release; + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + serr->ee.ee_errno = 0; + serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; + serr->ee.ee_data = id; + if (!success) + serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; + + skb_queue_tail(&sk->sk_error_queue, skb); + skb = NULL; + + sk->sk_error_report(sk); + +release: + consume_skb(skb); + sock_put(sk); +} +EXPORT_SYMBOL_GPL(sock_zerocopy_callback); + +void sock_zerocopy_put(struct ubuf_info *uarg) +{ + if (uarg && atomic_dec_and_test(&uarg->refcnt)) { + if (uarg->callback) + uarg->callback(uarg, uarg->zerocopy); + else + consume_skb(skb_from_uarg(uarg)); + } +} +EXPORT_SYMBOL_GPL(sock_zerocopy_put); + +void sock_zerocopy_put_abort(struct ubuf_info *uarg) +{ + if (uarg) { + struct sock *sk = skb_from_uarg(uarg)->sk; + + atomic_dec(&sk->sk_zckey); + + /* sock_zerocopy_put expects a ref. Most sockets take one per + * skb, which is zero on abort. tcp_sendmsg holds one extra, to + * avoid an skb send inside the main loop triggering uarg free. + */ + if (sk->sk_type != SOCK_STREAM) + atomic_inc(&uarg->refcnt); + + sock_zerocopy_put(uarg); + } +} +EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort); + +extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb, + struct iov_iter *from, size_t length); + +int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, + struct msghdr *msg, int len, + struct ubuf_info *uarg) +{ + struct iov_iter orig_iter = msg->msg_iter; + int err, orig_len = skb->len; + + err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); + if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { + /* Streams do not free skb on error. Reset to prev state. */ + msg->msg_iter = orig_iter; + ___pskb_trim(skb, orig_len); + return err; + } + + skb_zcopy_set(skb, uarg); + return skb->len - orig_len; +} +EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); + +/* unused only until next patch in the series; will remove attribute */ +static int __attribute__((unused)) + skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, + gfp_t gfp_mask) +{ + if (skb_zcopy(orig)) { + if (skb_zcopy(nskb)) { + /* !gfp_mask callers are verified to !skb_zcopy(nskb) */ + if (!gfp_mask) { + WARN_ON_ONCE(1); + return -ENOMEM; + } + if (skb_uarg(nskb) == skb_uarg(orig)) + return 0; + if (skb_copy_ubufs(nskb, GFP_ATOMIC)) + return -EIO; + } + skb_zcopy_set(nskb, skb_uarg(orig)); + } + return 0; +} + /** * skb_copy_ubufs - copy userspace skb frags buffers to kernel * @skb: the skb to modify diff --git a/net/core/sock.c b/net/core/sock.c index 1261880bdcc8..e8b696858cad 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1670,6 +1670,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) atomic_set(&newsk->sk_drops, 0); newsk->sk_send_head = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; + atomic_set(&newsk->sk_zckey, 0); sock_reset_flag(newsk, SOCK_DONE); @@ -2722,6 +2723,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; sk->sk_stamp = SK_DEFAULT_STAMP; + atomic_set(&sk->sk_zckey, 0); #ifdef CONFIG_NET_RX_BUSY_POLL sk->sk_napi_id = 0; -- cgit v1.2.3-55-g7522 From 76851d1212c11365362525e1e2c0a18c97478e6b Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:40 -0400 Subject: sock: add SOCK_ZEROCOPY sockopt The send call ignores unknown flags. Legacy applications may already unwittingly pass MSG_ZEROCOPY. Continue to ignore this flag unless a socket opts in to zerocopy. Introduce socket option SO_ZEROCOPY to enable MSG_ZEROCOPY processing. Processes can also query this socket option to detect kernel support for the feature. Older kernels will return ENOPROTOOPT. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- arch/alpha/include/uapi/asm/socket.h | 2 ++ arch/frv/include/uapi/asm/socket.h | 2 ++ arch/ia64/include/uapi/asm/socket.h | 2 ++ arch/m32r/include/uapi/asm/socket.h | 2 ++ arch/mips/include/uapi/asm/socket.h | 2 ++ arch/mn10300/include/uapi/asm/socket.h | 2 ++ arch/parisc/include/uapi/asm/socket.h | 2 ++ arch/s390/include/uapi/asm/socket.h | 2 ++ arch/sparc/include/uapi/asm/socket.h | 2 ++ arch/xtensa/include/uapi/asm/socket.h | 2 ++ include/uapi/asm-generic/socket.h | 2 ++ net/core/skbuff.c | 3 +++ net/core/sock.c | 18 ++++++++++++++++++ 13 files changed, 43 insertions(+) diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h index 7b285dd4fe05..c6133a045352 100644 --- a/arch/alpha/include/uapi/asm/socket.h +++ b/arch/alpha/include/uapi/asm/socket.h @@ -109,4 +109,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h index f1e3b20dce9f..9abf02d6855a 100644 --- a/arch/frv/include/uapi/asm/socket.h +++ b/arch/frv/include/uapi/asm/socket.h @@ -102,5 +102,7 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h index 5dd5c5d0d642..002eb85a6941 100644 --- a/arch/ia64/include/uapi/asm/socket.h +++ b/arch/ia64/include/uapi/asm/socket.h @@ -111,4 +111,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_IA64_SOCKET_H */ diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index f8f7b47e247f..e268e51a38d1 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -102,4 +102,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h index 882823bec153..6c755bc07975 100644 --- a/arch/mips/include/uapi/asm/socket.h +++ b/arch/mips/include/uapi/asm/socket.h @@ -120,4 +120,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index c710db354ff2..ac82a3f26dbf 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -102,4 +102,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h index a0d4dc9f4eb2..3b2bf7ae703b 100644 --- a/arch/parisc/include/uapi/asm/socket.h +++ b/arch/parisc/include/uapi/asm/socket.h @@ -101,4 +101,6 @@ #define SO_PEERGROUPS 0x4034 +#define SO_ZEROCOPY 0x4035 + #endif /* _UAPI_ASM_SOCKET_H */ diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h index 52a63f4175cb..a56916c83565 100644 --- a/arch/s390/include/uapi/asm/socket.h +++ b/arch/s390/include/uapi/asm/socket.h @@ -108,4 +108,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h index 186fd8199f54..b2f5c50d0947 100644 --- a/arch/sparc/include/uapi/asm/socket.h +++ b/arch/sparc/include/uapi/asm/socket.h @@ -98,6 +98,8 @@ #define SO_PEERGROUPS 0x003d +#define SO_ZEROCOPY 0x003e + /* Security levels - as per NRL IPv6 - don't actually do anything */ #define SO_SECURITY_AUTHENTICATION 0x5001 #define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h index 3eed2761c149..220059999e74 100644 --- a/arch/xtensa/include/uapi/asm/socket.h +++ b/arch/xtensa/include/uapi/asm/socket.h @@ -113,4 +113,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* _XTENSA_SOCKET_H */ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 9861be8da65e..e47c9e436221 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -104,4 +104,6 @@ #define SO_PEERGROUPS 59 +#define SO_ZEROCOPY 60 + #endif /* __ASM_GENERIC_SOCKET_H */ diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0603e44950da..29e34bc6a17c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -922,6 +922,9 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) WARN_ON_ONCE(!in_task()); + if (!sock_flag(sk, SOCK_ZEROCOPY)) + return NULL; + skb = sock_omalloc(sk, 0, GFP_KERNEL); if (!skb) return NULL; diff --git a/net/core/sock.c b/net/core/sock.c index e8b696858cad..9ea988d25b0a 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1055,6 +1055,20 @@ set_rcvbuf: if (val == 1) dst_negative_advice(sk); break; + + case SO_ZEROCOPY: + if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) + ret = -ENOTSUPP; + else if (sk->sk_protocol != IPPROTO_TCP) + ret = -ENOTSUPP; + else if (sk->sk_state != TCP_CLOSE) + ret = -EBUSY; + else if (val < 0 || val > 1) + ret = -EINVAL; + else + sock_valbool_flag(sk, SOCK_ZEROCOPY, valbool); + break; + default: ret = -ENOPROTOOPT; break; @@ -1383,6 +1397,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val64 = sock_gen_cookie(sk); break; + case SO_ZEROCOPY: + v.val = sock_flag(sk, SOCK_ZEROCOPY); + break; + default: /* We implement the SO_SNDLOWAT etc to not be settable * (1003.1g 7). -- cgit v1.2.3-55-g7522 From 1f8b977ab32dc5d148f103326e80d9097f1cefb5 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:41 -0400 Subject: sock: enable MSG_ZEROCOPY Prepare the datapath for refcounted ubuf_info. Clone ubuf_info with skb_zerocopy_clone() wherever needed due to skb split, merge, resize or clone. Split skb_orphan_frags into two variants. The split, merge, .. paths support reference counted zerocopy buffers, so do not do a deep copy. Add skb_orphan_frags_rx for paths that may loop packets to receive sockets. That is not allowed, as it may cause unbounded latency. Deep copy all zerocopy copy buffers, ref-counted or not, in this path. The exact locations to modify were chosen by exhaustively searching through all code that might modify skb_frag references and/or the the SKBTX_DEV_ZEROCOPY tx_flags bit. The changes err on the safe side, in two ways. (1) legacy ubuf_info paths virtio and tap are not modified. They keep a 1:1 ubuf_info to sk_buff relationship. Calls to skb_orphan_frags still call skb_copy_ubufs and thus copy frags in this case. (2) not all copies deep in the stack are addressed yet. skb_shift, skb_split and skb_try_coalesce can be refined to avoid copying. These are not in the hot path and this patch is hairy enough as is, so that is left for future refinement. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/tun.c | 2 +- drivers/vhost/net.c | 1 + include/linux/skbuff.h | 14 +++++++++++++- net/core/dev.c | 4 ++-- net/core/skbuff.c | 48 +++++++++++++++++++----------------------------- 5 files changed, 36 insertions(+), 33 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 68add55f8460..d21510d47aa2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -892,7 +892,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) sk_filter(tfile->socket.sk, skb)) goto drop; - if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) goto drop; skb_tx_timestamp(skb); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 06d044862e58..ba08b78ed630 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -533,6 +533,7 @@ static void handle_tx(struct vhost_net *net) ubuf->callback = vhost_zerocopy_callback; ubuf->ctx = nvq->ubufs; ubuf->desc = nvq->upend_idx; + atomic_set(&ubuf->refcnt, 1); msg.msg_control = ubuf; msg.msg_controllen = sizeof(ubuf); ubufs = nvq->ubufs; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 59cff7aa494e..e5387932c266 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2512,7 +2512,17 @@ static inline void skb_orphan(struct sk_buff *skb) */ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask) { - if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY))) + if (likely(!skb_zcopy(skb))) + return 0; + if (skb_uarg(skb)->callback == sock_zerocopy_callback) + return 0; + return skb_copy_ubufs(skb, gfp_mask); +} + +/* Frags must be orphaned, even if refcounted, if skb might loop to rx path */ +static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) +{ + if (likely(!skb_zcopy(skb))) return 0; return skb_copy_ubufs(skb, gfp_mask); } @@ -2944,6 +2954,8 @@ static inline int skb_add_data(struct sk_buff *skb, static inline bool skb_can_coalesce(struct sk_buff *skb, int i, const struct page *page, int off) { + if (skb_zcopy(skb)) + return false; if (i) { const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; diff --git a/net/core/dev.c b/net/core/dev.c index 8ea6b4b42611..1d75499add72 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1853,7 +1853,7 @@ static inline int deliver_skb(struct sk_buff *skb, struct packet_type *pt_prev, struct net_device *orig_dev) { - if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) return -ENOMEM; refcount_inc(&skb->users); return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); @@ -4412,7 +4412,7 @@ skip_classify: } if (pt_prev) { - if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) + if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) goto drop; else ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 29e34bc6a17c..74d3c36f8419 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -567,21 +567,10 @@ static void skb_release_data(struct sk_buff *skb) for (i = 0; i < shinfo->nr_frags; i++) __skb_frag_unref(&shinfo->frags[i]); - /* - * If skb buf is from userspace, we need to notify the caller - * the lower device DMA has done; - */ - if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) { - struct ubuf_info *uarg; - - uarg = shinfo->destructor_arg; - if (uarg->callback) - uarg->callback(uarg, true); - } - if (shinfo->frag_list) kfree_skb_list(shinfo->frag_list); + skb_zcopy_clear(skb, true); skb_free_head(skb); } @@ -695,14 +684,7 @@ EXPORT_SYMBOL(kfree_skb_list); */ void skb_tx_error(struct sk_buff *skb) { - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { - struct ubuf_info *uarg; - - uarg = skb_shinfo(skb)->destructor_arg; - if (uarg->callback) - uarg->callback(uarg, false); - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; - } + skb_zcopy_clear(skb, true); } EXPORT_SYMBOL(skb_tx_error); @@ -1029,9 +1011,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); -/* unused only until next patch in the series; will remove attribute */ -static int __attribute__((unused)) - skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, +static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig, gfp_t gfp_mask) { if (skb_zcopy(orig)) { @@ -1068,7 +1048,6 @@ static int __attribute__((unused)) */ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) { - struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; int num_frags = skb_shinfo(skb)->nr_frags; struct page *page, *head = NULL; int i, new_frags; @@ -1127,8 +1106,6 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) for (i = 0; i < num_frags; i++) skb_frag_unref(skb, i); - uarg->callback(uarg, false); - /* skb frags point to kernel buffers */ for (i = 0; i < new_frags - 1; i++) { __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); @@ -1137,7 +1114,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); skb_shinfo(skb)->nr_frags = new_frags; - skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; + skb_zcopy_clear(skb, false); return 0; } EXPORT_SYMBOL_GPL(skb_copy_ubufs); @@ -1298,7 +1275,8 @@ struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, if (skb_shinfo(skb)->nr_frags) { int i; - if (skb_orphan_frags(skb, gfp_mask)) { + if (skb_orphan_frags(skb, gfp_mask) || + skb_zerocopy_clone(n, skb, gfp_mask)) { kfree_skb(n); n = NULL; goto out; @@ -1375,9 +1353,10 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, * be since all we did is relocate the values */ if (skb_cloned(skb)) { - /* copy this zero copy skb frags */ if (skb_orphan_frags(skb, gfp_mask)) goto nofrags; + if (skb_zcopy(skb)) + atomic_inc(&skb_uarg(skb)->refcnt); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) skb_frag_ref(skb, i); @@ -1872,6 +1851,9 @@ end: skb->tail += delta; skb->data_len -= delta; + if (!skb->data_len) + skb_zcopy_clear(skb, false); + return skb_tail_pointer(skb); } EXPORT_SYMBOL(__pskb_pull_tail); @@ -2627,6 +2609,7 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) skb_tx_error(from); return -ENOMEM; } + skb_zerocopy_clone(to, from, GFP_ATOMIC); for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { if (!len) @@ -2924,6 +2907,7 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; + skb_zerocopy_clone(skb1, skb, 0); if (len < pos) /* Split line is inside header. */ skb_split_inside_header(skb, skb1, len, pos); else /* Second chunk has no header, nothing to copy. */ @@ -2967,6 +2951,8 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) if (skb_headlen(skb)) return 0; + if (skb_zcopy(tgt) || skb_zcopy(skb)) + return 0; todo = shiftlen; from = 0; @@ -3540,6 +3526,8 @@ normal: skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & SKBTX_SHARED_FRAG; + if (skb_zerocopy_clone(nskb, head_skb, GFP_ATOMIC)) + goto err; while (pos < offset + len) { if (i >= nfrags) { @@ -4663,6 +4651,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, if (skb_has_frag_list(to) || skb_has_frag_list(from)) return false; + if (skb_zcopy(to) || skb_zcopy(from)) + return false; if (skb_headlen(from) != 0) { struct page *page; -- cgit v1.2.3-55-g7522 From 4ab6c99d99bb1bf0fbba8ff4e52114c66109992f Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:42 -0400 Subject: sock: MSG_ZEROCOPY notification coalescing In the simple case, each sendmsg() call generates data and eventually a zerocopy ready notification N, where N indicates the Nth successful invocation of sendmsg() with the MSG_ZEROCOPY flag on this socket. TCP and corked sockets can cause send() calls to append new data to an existing sk_buff and, thus, ubuf_info. In that case the notification must hold a range. odify ubuf_info to store a inclusive range [N..N+m] and add skb_zerocopy_realloc() to optionally extend an existing range. Also coalesce notifications in this common case: if a notification [1, 1] is about to be queued while [0, 0] is the queue tail, just modify the head of the queue to read [0, 1]. Coalescing is limited to a few TSO frames worth of data to bound notification latency. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 17 +++++++-- net/core/skbuff.c | 99 ++++++++++++++++++++++++++++++++++++++++++++++---- 2 files changed, 106 insertions(+), 10 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index e5387932c266..f5bdd93a87da 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -444,15 +444,26 @@ enum { */ struct ubuf_info { void (*callback)(struct ubuf_info *, bool zerocopy_success); - void *ctx; - unsigned long desc; - u16 zerocopy:1; + union { + struct { + unsigned long desc; + void *ctx; + }; + struct { + u32 id; + u16 len; + u16 zerocopy:1; + u32 bytelen; + }; + }; atomic_t refcnt; }; #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size); +struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, + struct ubuf_info *uarg); static inline void sock_zerocopy_get(struct ubuf_info *uarg) { diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 74d3c36f8419..dcee0f64f1fa 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -915,7 +915,9 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) uarg = (void *)skb->cb; uarg->callback = sock_zerocopy_callback; - uarg->desc = atomic_inc_return(&sk->sk_zckey) - 1; + uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; + uarg->len = 1; + uarg->bytelen = size; uarg->zerocopy = 1; atomic_set(&uarg->refcnt, 0); sock_hold(sk); @@ -929,26 +931,101 @@ static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg) return container_of((void *)uarg, struct sk_buff, cb); } +struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, + struct ubuf_info *uarg) +{ + if (uarg) { + const u32 byte_limit = 1 << 19; /* limit to a few TSO */ + u32 bytelen, next; + + /* realloc only when socket is locked (TCP, UDP cork), + * so uarg->len and sk_zckey access is serialized + */ + if (!sock_owned_by_user(sk)) { + WARN_ON_ONCE(1); + return NULL; + } + + bytelen = uarg->bytelen + size; + if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) { + /* TCP can create new skb to attach new uarg */ + if (sk->sk_type == SOCK_STREAM) + goto new_alloc; + return NULL; + } + + next = (u32)atomic_read(&sk->sk_zckey); + if ((u32)(uarg->id + uarg->len) == next) { + uarg->len++; + uarg->bytelen = bytelen; + atomic_set(&sk->sk_zckey, ++next); + return uarg; + } + } + +new_alloc: + return sock_zerocopy_alloc(sk, size); +} +EXPORT_SYMBOL_GPL(sock_zerocopy_realloc); + +static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) +{ + struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); + u32 old_lo, old_hi; + u64 sum_len; + + old_lo = serr->ee.ee_info; + old_hi = serr->ee.ee_data; + sum_len = old_hi - old_lo + 1ULL + len; + + if (sum_len >= (1ULL << 32)) + return false; + + if (lo != old_hi + 1) + return false; + + serr->ee.ee_data += len; + return true; +} + void sock_zerocopy_callback(struct ubuf_info *uarg, bool success) { - struct sk_buff *skb = skb_from_uarg(uarg); + struct sk_buff *tail, *skb = skb_from_uarg(uarg); struct sock_exterr_skb *serr; struct sock *sk = skb->sk; - u16 id = uarg->desc; + struct sk_buff_head *q; + unsigned long flags; + u32 lo, hi; + u16 len; - if (sock_flag(sk, SOCK_DEAD)) + /* if !len, there was only 1 call, and it was aborted + * so do not queue a completion notification + */ + if (!uarg->len || sock_flag(sk, SOCK_DEAD)) goto release; + len = uarg->len; + lo = uarg->id; + hi = uarg->id + len - 1; + serr = SKB_EXT_ERR(skb); memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = 0; serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY; - serr->ee.ee_data = id; + serr->ee.ee_data = hi; + serr->ee.ee_info = lo; if (!success) serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED; - skb_queue_tail(&sk->sk_error_queue, skb); - skb = NULL; + q = &sk->sk_error_queue; + spin_lock_irqsave(&q->lock, flags); + tail = skb_peek_tail(q); + if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY || + !skb_zerocopy_notify_extend(tail, lo, len)) { + __skb_queue_tail(q, skb); + skb = NULL; + } + spin_unlock_irqrestore(&q->lock, flags); sk->sk_error_report(sk); @@ -975,6 +1052,7 @@ void sock_zerocopy_put_abort(struct ubuf_info *uarg) struct sock *sk = skb_from_uarg(uarg)->sk; atomic_dec(&sk->sk_zckey); + uarg->len--; /* sock_zerocopy_put expects a ref. Most sockets take one per * skb, which is zero on abort. tcp_sendmsg holds one extra, to @@ -995,9 +1073,16 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg) { + struct ubuf_info *orig_uarg = skb_zcopy(skb); struct iov_iter orig_iter = msg->msg_iter; int err, orig_len = skb->len; + /* An skb can only point to one uarg. This edge case happens when + * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. + */ + if (orig_uarg && uarg != orig_uarg) + return -EEXIST; + err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { /* Streams do not free skb on error. Reset to prev state. */ -- cgit v1.2.3-55-g7522 From a91dbff551a6f1865b68fa82b654591490b59901 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:43 -0400 Subject: sock: ulimit on MSG_ZEROCOPY pages Bound the number of pages that a user may pin. Follow the lead of perf tools to maintain a per-user bound on memory locked pages commit 789f90fcf6b0 ("perf_counter: per user mlock gift") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/sched/user.h | 3 ++- include/linux/skbuff.h | 5 +++++ net/core/skbuff.c | 48 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 5d5415e129d4..3c07e4135127 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -36,7 +36,8 @@ struct user_struct { struct hlist_node uidhash_node; kuid_t uid; -#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) +#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \ + defined(CONFIG_NET) atomic_long_t locked_vm; #endif }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f5bdd93a87da..8c0708d2e5e6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -457,6 +457,11 @@ struct ubuf_info { }; }; atomic_t refcnt; + + struct mmpin { + struct user_struct *user; + unsigned int num_pg; + } mmp; }; #define skb_uarg(SKB) ((struct ubuf_info *)(skb_shinfo(SKB)->destructor_arg)) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index dcee0f64f1fa..42b62c716a33 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -897,6 +897,44 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) } EXPORT_SYMBOL_GPL(skb_morph); +static int mm_account_pinned_pages(struct mmpin *mmp, size_t size) +{ + unsigned long max_pg, num_pg, new_pg, old_pg; + struct user_struct *user; + + if (capable(CAP_IPC_LOCK) || !size) + return 0; + + num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */ + max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + user = mmp->user ? : current_user(); + + do { + old_pg = atomic_long_read(&user->locked_vm); + new_pg = old_pg + num_pg; + if (new_pg > max_pg) + return -ENOBUFS; + } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) != + old_pg); + + if (!mmp->user) { + mmp->user = get_uid(user); + mmp->num_pg = num_pg; + } else { + mmp->num_pg += num_pg; + } + + return 0; +} + +static void mm_unaccount_pinned_pages(struct mmpin *mmp) +{ + if (mmp->user) { + atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm); + free_uid(mmp->user); + } +} + struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) { struct ubuf_info *uarg; @@ -913,6 +951,12 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size) BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); uarg = (void *)skb->cb; + uarg->mmp.user = NULL; + + if (mm_account_pinned_pages(&uarg->mmp, size)) { + kfree_skb(skb); + return NULL; + } uarg->callback = sock_zerocopy_callback; uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; @@ -956,6 +1000,8 @@ struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size, next = (u32)atomic_read(&sk->sk_zckey); if ((u32)(uarg->id + uarg->len) == next) { + if (mm_account_pinned_pages(&uarg->mmp, size)) + return NULL; uarg->len++; uarg->bytelen = bytelen; atomic_set(&sk->sk_zckey, ++next); @@ -1038,6 +1084,8 @@ EXPORT_SYMBOL_GPL(sock_zerocopy_callback); void sock_zerocopy_put(struct ubuf_info *uarg) { if (uarg && atomic_dec_and_test(&uarg->refcnt)) { + mm_unaccount_pinned_pages(&uarg->mmp); + if (uarg->callback) uarg->callback(uarg, uarg->zerocopy); else -- cgit v1.2.3-55-g7522 From f214f915e7db99091f1312c48b30928c1e0c90b7 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:44 -0400 Subject: tcp: enable MSG_ZEROCOPY Enable support for MSG_ZEROCOPY to the TCP stack. TSO and GSO are both supported. Only data sent to remote destinations is sent without copying. Packets looped onto a local destination have their payload copied to avoid unbounded latency. Tested: A 10x TCP_STREAM between two hosts showed a reduction in netserver process cycles by up to 70%, depending on packet size. Systemwide, savings are of course much less pronounced, at up to 20% best case. msg_zerocopy.sh 4 tcp: without zerocopy tx=121792 (7600 MB) txc=0 zc=n rx=60458 (7600 MB) with zerocopy tx=286257 (17863 MB) txc=286257 zc=y rx=140022 (17863 MB) This test opens a pair of sockets over veth, one one calls send with 64KB and optionally MSG_ZEROCOPY and on the other reads the initial bytes. The receiver truncates, so this is strictly an upper bound on what is achievable. It is more representative of sending data out of a physical NIC (when payload is not touched, either). Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9dd6f4dba9b1..71b25567e787 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1165,6 +1165,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) { struct tcp_sock *tp = tcp_sk(sk); + struct ubuf_info *uarg = NULL; struct sk_buff *skb; struct sockcm_cookie sockc; int flags, err, copied = 0; @@ -1174,6 +1175,26 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) long timeo; flags = msg->msg_flags; + + if (flags & MSG_ZEROCOPY && size) { + if (sk->sk_state != TCP_ESTABLISHED) { + err = -EINVAL; + goto out_err; + } + + skb = tcp_send_head(sk) ? tcp_write_queue_tail(sk) : NULL; + uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb)); + if (!uarg) { + err = -ENOBUFS; + goto out_err; + } + + /* skb may be freed in main loop, keep extra ref on uarg */ + sock_zerocopy_get(uarg); + if (!(sk_check_csum_caps(sk) && sk->sk_route_caps & NETIF_F_SG)) + uarg->zerocopy = 0; + } + if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) { err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); if (err == -EINPROGRESS && copied_syn > 0) @@ -1297,7 +1318,7 @@ new_segment: err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); if (err) goto do_fault; - } else { + } else if (!uarg || !uarg->zerocopy) { bool merge = true; int i = skb_shinfo(skb)->nr_frags; struct page_frag *pfrag = sk_page_frag(sk); @@ -1335,6 +1356,13 @@ new_segment: page_ref_inc(pfrag->page); } pfrag->offset += copy; + } else { + err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg); + if (err == -EMSGSIZE || err == -EEXIST) + goto new_segment; + if (err < 0) + goto do_error; + copy = err; } if (!copied) @@ -1381,6 +1409,7 @@ out: tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); } out_nopush: + sock_zerocopy_put(uarg); return copied + copied_syn; do_fault: @@ -1397,6 +1426,7 @@ do_error: if (copied + copied_syn) goto out; out_err: + sock_zerocopy_put_abort(uarg); err = sk_stream_error(sk, flags, err); /* make sure we wake any epoll edge trigger waiter */ if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && -- cgit v1.2.3-55-g7522 From 07b65c5b31ce477c3ced6e3541fd2331338be214 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Thu, 3 Aug 2017 16:29:45 -0400 Subject: test: add msg_zerocopy test Introduce regression test for msg_zerocopy feature. Send traffic from one process to another with and without zerocopy. Evaluate tcp, udp, raw and packet sockets, including variants - udp: corking and corking with mixed copy/zerocopy calls - raw: with and without hdrincl - packet: at both raw and dgram level Test on both ipv4 and ipv6, optionally with ethtool changes to disable scatter-gather, tx checksum or tso offload. All of these can affect zerocopy behavior. The regression test can be run on a single machine if over a veth pair. Then skb_orphan_frags_rx must be modified to be identical to skb_orphan_frags to allow forwarding zerocopy locally. The msg_zerocopy.sh script will setup the veth pair in network namespaces and run all tests. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- tools/testing/selftests/net/.gitignore | 1 + tools/testing/selftests/net/Makefile | 2 +- tools/testing/selftests/net/msg_zerocopy.c | 697 ++++++++++++++++++++++++++++ tools/testing/selftests/net/msg_zerocopy.sh | 112 +++++ 4 files changed, 811 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/net/msg_zerocopy.c create mode 100755 tools/testing/selftests/net/msg_zerocopy.sh diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index afe109e5508a..9801253e4802 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -1,3 +1,4 @@ +msg_zerocopy socket psock_fanout psock_tpacket diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index f6c9dbf478f8..6135a8448900 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -7,7 +7,7 @@ TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa -TEST_GEN_FILES += reuseport_dualstack +TEST_GEN_FILES += reuseport_dualstack msg_zerocopy include ../lib.mk diff --git a/tools/testing/selftests/net/msg_zerocopy.c b/tools/testing/selftests/net/msg_zerocopy.c new file mode 100644 index 000000000000..448c69a8af74 --- /dev/null +++ b/tools/testing/selftests/net/msg_zerocopy.c @@ -0,0 +1,697 @@ +/* Evaluate MSG_ZEROCOPY + * + * Send traffic between two processes over one of the supported + * protocols and modes: + * + * PF_INET/PF_INET6 + * - SOCK_STREAM + * - SOCK_DGRAM + * - SOCK_DGRAM with UDP_CORK + * - SOCK_RAW + * - SOCK_RAW with IP_HDRINCL + * + * PF_PACKET + * - SOCK_DGRAM + * - SOCK_RAW + * + * Start this program on two connected hosts, one in send mode and + * the other with option '-r' to put it in receiver mode. + * + * If zerocopy mode ('-z') is enabled, the sender will verify that + * the kernel queues completions on the error queue for all zerocopy + * transfers. + */ + +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef SO_EE_ORIGIN_ZEROCOPY +#define SO_EE_ORIGIN_ZEROCOPY SO_EE_ORIGIN_UPAGE +#endif + +#ifndef SO_ZEROCOPY +#define SO_ZEROCOPY 59 +#endif + +#ifndef SO_EE_CODE_ZEROCOPY_COPIED +#define SO_EE_CODE_ZEROCOPY_COPIED 1 +#endif + +#ifndef MSG_ZEROCOPY +#define MSG_ZEROCOPY 0x4000000 +#endif + +static int cfg_cork; +static bool cfg_cork_mixed; +static int cfg_cpu = -1; /* default: pin to last cpu */ +static int cfg_family = PF_UNSPEC; +static int cfg_ifindex = 1; +static int cfg_payload_len; +static int cfg_port = 8000; +static bool cfg_rx; +static int cfg_runtime_ms = 4200; +static int cfg_verbose; +static int cfg_waittime_ms = 500; +static bool cfg_zerocopy; + +static socklen_t cfg_alen; +static struct sockaddr_storage cfg_dst_addr; +static struct sockaddr_storage cfg_src_addr; + +static char payload[IP_MAXPACKET]; +static long packets, bytes, completions, expected_completions; +static int zerocopied = -1; +static uint32_t next_completion; + +static unsigned long gettimeofday_ms(void) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + return (tv.tv_sec * 1000) + (tv.tv_usec / 1000); +} + +static uint16_t get_ip_csum(const uint16_t *start, int num_words) +{ + unsigned long sum = 0; + int i; + + for (i = 0; i < num_words; i++) + sum += start[i]; + + while (sum >> 16) + sum = (sum & 0xFFFF) + (sum >> 16); + + return ~sum; +} + +static int do_setcpu(int cpu) +{ + cpu_set_t mask; + + CPU_ZERO(&mask); + CPU_SET(cpu, &mask); + if (sched_setaffinity(0, sizeof(mask), &mask)) + error(1, 0, "setaffinity %d", cpu); + + if (cfg_verbose) + fprintf(stderr, "cpu: %u\n", cpu); + + return 0; +} + +static void do_setsockopt(int fd, int level, int optname, int val) +{ + if (setsockopt(fd, level, optname, &val, sizeof(val))) + error(1, errno, "setsockopt %d.%d: %d", level, optname, val); +} + +static int do_poll(int fd, int events) +{ + struct pollfd pfd; + int ret; + + pfd.events = events; + pfd.revents = 0; + pfd.fd = fd; + + ret = poll(&pfd, 1, cfg_waittime_ms); + if (ret == -1) + error(1, errno, "poll"); + + return ret && (pfd.revents & events); +} + +static int do_accept(int fd) +{ + int fda = fd; + + fd = accept(fda, NULL, NULL); + if (fd == -1) + error(1, errno, "accept"); + if (close(fda)) + error(1, errno, "close listen sock"); + + return fd; +} + +static bool do_sendmsg(int fd, struct msghdr *msg, bool do_zerocopy) +{ + int ret, len, i, flags; + + len = 0; + for (i = 0; i < msg->msg_iovlen; i++) + len += msg->msg_iov[i].iov_len; + + flags = MSG_DONTWAIT; + if (do_zerocopy) + flags |= MSG_ZEROCOPY; + + ret = sendmsg(fd, msg, flags); + if (ret == -1 && errno == EAGAIN) + return false; + if (ret == -1) + error(1, errno, "send"); + if (cfg_verbose && ret != len) + fprintf(stderr, "send: ret=%u != %u\n", ret, len); + + if (len) { + packets++; + bytes += ret; + if (do_zerocopy && ret) + expected_completions++; + } + + return true; +} + +static void do_sendmsg_corked(int fd, struct msghdr *msg) +{ + bool do_zerocopy = cfg_zerocopy; + int i, payload_len, extra_len; + + /* split up the packet. for non-multiple, make first buffer longer */ + payload_len = cfg_payload_len / cfg_cork; + extra_len = cfg_payload_len - (cfg_cork * payload_len); + + do_setsockopt(fd, IPPROTO_UDP, UDP_CORK, 1); + + for (i = 0; i < cfg_cork; i++) { + + /* in mixed-frags mode, alternate zerocopy and copy frags + * start with non-zerocopy, to ensure attach later works + */ + if (cfg_cork_mixed) + do_zerocopy = (i & 1); + + msg->msg_iov[0].iov_len = payload_len + extra_len; + extra_len = 0; + + do_sendmsg(fd, msg, do_zerocopy); + } + + do_setsockopt(fd, IPPROTO_UDP, UDP_CORK, 0); +} + +static int setup_iph(struct iphdr *iph, uint16_t payload_len) +{ + struct sockaddr_in *daddr = (void *) &cfg_dst_addr; + struct sockaddr_in *saddr = (void *) &cfg_src_addr; + + memset(iph, 0, sizeof(*iph)); + + iph->version = 4; + iph->tos = 0; + iph->ihl = 5; + iph->ttl = 2; + iph->saddr = saddr->sin_addr.s_addr; + iph->daddr = daddr->sin_addr.s_addr; + iph->protocol = IPPROTO_EGP; + iph->tot_len = htons(sizeof(*iph) + payload_len); + iph->check = get_ip_csum((void *) iph, iph->ihl << 1); + + return sizeof(*iph); +} + +static int setup_ip6h(struct ipv6hdr *ip6h, uint16_t payload_len) +{ + struct sockaddr_in6 *daddr = (void *) &cfg_dst_addr; + struct sockaddr_in6 *saddr = (void *) &cfg_src_addr; + + memset(ip6h, 0, sizeof(*ip6h)); + + ip6h->version = 6; + ip6h->payload_len = htons(payload_len); + ip6h->nexthdr = IPPROTO_EGP; + ip6h->hop_limit = 2; + ip6h->saddr = saddr->sin6_addr; + ip6h->daddr = daddr->sin6_addr; + + return sizeof(*ip6h); +} + +static void setup_sockaddr(int domain, const char *str_addr, void *sockaddr) +{ + struct sockaddr_in6 *addr6 = (void *) sockaddr; + struct sockaddr_in *addr4 = (void *) sockaddr; + + switch (domain) { + case PF_INET: + addr4->sin_family = AF_INET; + addr4->sin_port = htons(cfg_port); + if (inet_pton(AF_INET, str_addr, &(addr4->sin_addr)) != 1) + error(1, 0, "ipv4 parse error: %s", str_addr); + break; + case PF_INET6: + addr6->sin6_family = AF_INET6; + addr6->sin6_port = htons(cfg_port); + if (inet_pton(AF_INET6, str_addr, &(addr6->sin6_addr)) != 1) + error(1, 0, "ipv6 parse error: %s", str_addr); + break; + default: + error(1, 0, "illegal domain"); + } +} + +static int do_setup_tx(int domain, int type, int protocol) +{ + int fd; + + fd = socket(domain, type, protocol); + if (fd == -1) + error(1, errno, "socket t"); + + do_setsockopt(fd, SOL_SOCKET, SO_SNDBUF, 1 << 21); + if (cfg_zerocopy) + do_setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, 1); + + if (domain != PF_PACKET) + if (connect(fd, (void *) &cfg_dst_addr, cfg_alen)) + error(1, errno, "connect"); + + return fd; +} + +static bool do_recv_completion(int fd) +{ + struct sock_extended_err *serr; + struct msghdr msg = {}; + struct cmsghdr *cm; + uint32_t hi, lo, range; + int ret, zerocopy; + char control[100]; + + msg.msg_control = control; + msg.msg_controllen = sizeof(control); + + ret = recvmsg(fd, &msg, MSG_ERRQUEUE); + if (ret == -1 && errno == EAGAIN) + return false; + if (ret == -1) + error(1, errno, "recvmsg notification"); + if (msg.msg_flags & MSG_CTRUNC) + error(1, errno, "recvmsg notification: truncated"); + + cm = CMSG_FIRSTHDR(&msg); + if (!cm) + error(1, 0, "cmsg: no cmsg"); + if (!((cm->cmsg_level == SOL_IP && cm->cmsg_type == IP_RECVERR) || + (cm->cmsg_level == SOL_IPV6 && cm->cmsg_type == IPV6_RECVERR) || + (cm->cmsg_level == SOL_PACKET && cm->cmsg_type == PACKET_TX_TIMESTAMP))) + error(1, 0, "serr: wrong type: %d.%d", + cm->cmsg_level, cm->cmsg_type); + + serr = (void *) CMSG_DATA(cm); + if (serr->ee_origin != SO_EE_ORIGIN_ZEROCOPY) + error(1, 0, "serr: wrong origin: %u", serr->ee_origin); + if (serr->ee_errno != 0) + error(1, 0, "serr: wrong error code: %u", serr->ee_errno); + + hi = serr->ee_data; + lo = serr->ee_info; + range = hi - lo + 1; + + /* Detect notification gaps. These should not happen often, if at all. + * Gaps can occur due to drops, reordering and retransmissions. + */ + if (lo != next_completion) + fprintf(stderr, "gap: %u..%u does not append to %u\n", + lo, hi, next_completion); + next_completion = hi + 1; + + zerocopy = !(serr->ee_code & SO_EE_CODE_ZEROCOPY_COPIED); + if (zerocopied == -1) + zerocopied = zerocopy; + else if (zerocopied != zerocopy) { + fprintf(stderr, "serr: inconsistent\n"); + zerocopied = zerocopy; + } + + if (cfg_verbose >= 2) + fprintf(stderr, "completed: %u (h=%u l=%u)\n", + range, hi, lo); + + completions += range; + return true; +} + +/* Read all outstanding messages on the errqueue */ +static void do_recv_completions(int fd) +{ + while (do_recv_completion(fd)) {} +} + +/* Wait for all remaining completions on the errqueue */ +static void do_recv_remaining_completions(int fd) +{ + int64_t tstop = gettimeofday_ms() + cfg_waittime_ms; + + while (completions < expected_completions && + gettimeofday_ms() < tstop) { + if (do_poll(fd, POLLERR)) + do_recv_completions(fd); + } + + if (completions < expected_completions) + error(1, 0, "missing notifications: %lu < %lu\n", + completions, expected_completions); +} + +static void do_tx(int domain, int type, int protocol) +{ + struct iovec iov[3] = { {0} }; + struct sockaddr_ll laddr; + struct msghdr msg = {0}; + struct ethhdr eth; + union { + struct ipv6hdr ip6h; + struct iphdr iph; + } nh; + uint64_t tstop; + int fd; + + fd = do_setup_tx(domain, type, protocol); + + if (domain == PF_PACKET) { + uint16_t proto = cfg_family == PF_INET ? ETH_P_IP : ETH_P_IPV6; + + /* sock_raw passes ll header as data */ + if (type == SOCK_RAW) { + memset(eth.h_dest, 0x06, ETH_ALEN); + memset(eth.h_source, 0x02, ETH_ALEN); + eth.h_proto = htons(proto); + iov[0].iov_base = ð + iov[0].iov_len = sizeof(eth); + msg.msg_iovlen++; + } + + /* both sock_raw and sock_dgram expect name */ + memset(&laddr, 0, sizeof(laddr)); + laddr.sll_family = AF_PACKET; + laddr.sll_ifindex = cfg_ifindex; + laddr.sll_protocol = htons(proto); + laddr.sll_halen = ETH_ALEN; + + memset(laddr.sll_addr, 0x06, ETH_ALEN); + + msg.msg_name = &laddr; + msg.msg_namelen = sizeof(laddr); + } + + /* packet and raw sockets with hdrincl must pass network header */ + if (domain == PF_PACKET || protocol == IPPROTO_RAW) { + if (cfg_family == PF_INET) + iov[1].iov_len = setup_iph(&nh.iph, cfg_payload_len); + else + iov[1].iov_len = setup_ip6h(&nh.ip6h, cfg_payload_len); + + iov[1].iov_base = (void *) &nh; + msg.msg_iovlen++; + } + + iov[2].iov_base = payload; + iov[2].iov_len = cfg_payload_len; + msg.msg_iovlen++; + msg.msg_iov = &iov[3 - msg.msg_iovlen]; + + tstop = gettimeofday_ms() + cfg_runtime_ms; + do { + if (cfg_cork) + do_sendmsg_corked(fd, &msg); + else + do_sendmsg(fd, &msg, cfg_zerocopy); + + while (!do_poll(fd, POLLOUT)) { + if (cfg_zerocopy) + do_recv_completions(fd); + } + + } while (gettimeofday_ms() < tstop); + + if (cfg_zerocopy) + do_recv_remaining_completions(fd); + + if (close(fd)) + error(1, errno, "close"); + + fprintf(stderr, "tx=%lu (%lu MB) txc=%lu zc=%c\n", + packets, bytes >> 20, completions, + zerocopied == 1 ? 'y' : 'n'); +} + +static int do_setup_rx(int domain, int type, int protocol) +{ + int fd; + + /* If tx over PF_PACKET, rx over PF_INET(6)/SOCK_RAW, + * to recv the only copy of the packet, not a clone + */ + if (domain == PF_PACKET) + error(1, 0, "Use PF_INET/SOCK_RAW to read"); + + if (type == SOCK_RAW && protocol == IPPROTO_RAW) + error(1, 0, "IPPROTO_RAW: not supported on Rx"); + + fd = socket(domain, type, protocol); + if (fd == -1) + error(1, errno, "socket r"); + + do_setsockopt(fd, SOL_SOCKET, SO_RCVBUF, 1 << 21); + do_setsockopt(fd, SOL_SOCKET, SO_RCVLOWAT, 1 << 16); + do_setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, 1); + + if (bind(fd, (void *) &cfg_dst_addr, cfg_alen)) + error(1, errno, "bind"); + + if (type == SOCK_STREAM) { + if (listen(fd, 1)) + error(1, errno, "listen"); + fd = do_accept(fd); + } + + return fd; +} + +/* Flush all outstanding bytes for the tcp receive queue */ +static void do_flush_tcp(int fd) +{ + int ret; + + /* MSG_TRUNC flushes up to len bytes */ + ret = recv(fd, NULL, 1 << 21, MSG_TRUNC | MSG_DONTWAIT); + if (ret == -1 && errno == EAGAIN) + return; + if (ret == -1) + error(1, errno, "flush"); + if (!ret) + return; + + packets++; + bytes += ret; +} + +/* Flush all outstanding datagrams. Verify first few bytes of each. */ +static void do_flush_datagram(int fd, int type) +{ + int ret, off = 0; + char buf[64]; + + /* MSG_TRUNC will return full datagram length */ + ret = recv(fd, buf, sizeof(buf), MSG_DONTWAIT | MSG_TRUNC); + if (ret == -1 && errno == EAGAIN) + return; + + /* raw ipv4 return with header, raw ipv6 without */ + if (cfg_family == PF_INET && type == SOCK_RAW) { + off += sizeof(struct iphdr); + ret -= sizeof(struct iphdr); + } + + if (ret == -1) + error(1, errno, "recv"); + if (ret != cfg_payload_len) + error(1, 0, "recv: ret=%u != %u", ret, cfg_payload_len); + if (ret > sizeof(buf) - off) + ret = sizeof(buf) - off; + if (memcmp(buf + off, payload, ret)) + error(1, 0, "recv: data mismatch"); + + packets++; + bytes += cfg_payload_len; +} + +static void do_rx(int domain, int type, int protocol) +{ + uint64_t tstop; + int fd; + + fd = do_setup_rx(domain, type, protocol); + + tstop = gettimeofday_ms() + cfg_runtime_ms; + do { + if (type == SOCK_STREAM) + do_flush_tcp(fd); + else + do_flush_datagram(fd, type); + + do_poll(fd, POLLIN); + + } while (gettimeofday_ms() < tstop); + + if (close(fd)) + error(1, errno, "close"); + + fprintf(stderr, "rx=%lu (%lu MB)\n", packets, bytes >> 20); +} + +static void do_test(int domain, int type, int protocol) +{ + int i; + + if (cfg_cork && (domain == PF_PACKET || type != SOCK_DGRAM)) + error(1, 0, "can only cork udp sockets"); + + do_setcpu(cfg_cpu); + + for (i = 0; i < IP_MAXPACKET; i++) + payload[i] = 'a' + (i % 26); + + if (cfg_rx) + do_rx(domain, type, protocol); + else + do_tx(domain, type, protocol); +} + +static void usage(const char *filepath) +{ + error(1, 0, "Usage: %s [options] ", filepath); +} + +static void parse_opts(int argc, char **argv) +{ + const int max_payload_len = sizeof(payload) - + sizeof(struct ipv6hdr) - + sizeof(struct tcphdr) - + 40 /* max tcp options */; + int c; + + cfg_payload_len = max_payload_len; + + while ((c = getopt(argc, argv, "46c:C:D:i:mp:rs:S:t:vz")) != -1) { + switch (c) { + case '4': + if (cfg_family != PF_UNSPEC) + error(1, 0, "Pass one of -4 or -6"); + cfg_family = PF_INET; + cfg_alen = sizeof(struct sockaddr_in); + break; + case '6': + if (cfg_family != PF_UNSPEC) + error(1, 0, "Pass one of -4 or -6"); + cfg_family = PF_INET6; + cfg_alen = sizeof(struct sockaddr_in6); + break; + case 'c': + cfg_cork = strtol(optarg, NULL, 0); + break; + case 'C': + cfg_cpu = strtol(optarg, NULL, 0); + break; + case 'D': + setup_sockaddr(cfg_family, optarg, &cfg_dst_addr); + break; + case 'i': + cfg_ifindex = if_nametoindex(optarg); + if (cfg_ifindex == 0) + error(1, errno, "invalid iface: %s", optarg); + break; + case 'm': + cfg_cork_mixed = true; + break; + case 'p': + cfg_port = htons(strtoul(optarg, NULL, 0)); + break; + case 'r': + cfg_rx = true; + break; + case 's': + cfg_payload_len = strtoul(optarg, NULL, 0); + break; + case 'S': + setup_sockaddr(cfg_family, optarg, &cfg_src_addr); + break; + case 't': + cfg_runtime_ms = 200 + strtoul(optarg, NULL, 10) * 1000; + break; + case 'v': + cfg_verbose++; + break; + case 'z': + cfg_zerocopy = true; + break; + } + } + + if (cfg_payload_len > max_payload_len) + error(1, 0, "-s: payload exceeds max (%d)", max_payload_len); + if (cfg_cork_mixed && (!cfg_zerocopy || !cfg_cork)) + error(1, 0, "-m: cork_mixed requires corking and zerocopy"); + + if (optind != argc - 1) + usage(argv[0]); +} + +int main(int argc, char **argv) +{ + const char *cfg_test; + + parse_opts(argc, argv); + + cfg_test = argv[argc - 1]; + + if (!strcmp(cfg_test, "packet")) + do_test(PF_PACKET, SOCK_RAW, 0); + else if (!strcmp(cfg_test, "packet_dgram")) + do_test(PF_PACKET, SOCK_DGRAM, 0); + else if (!strcmp(cfg_test, "raw")) + do_test(cfg_family, SOCK_RAW, IPPROTO_EGP); + else if (!strcmp(cfg_test, "raw_hdrincl")) + do_test(cfg_family, SOCK_RAW, IPPROTO_RAW); + else if (!strcmp(cfg_test, "tcp")) + do_test(cfg_family, SOCK_STREAM, 0); + else if (!strcmp(cfg_test, "udp")) + do_test(cfg_family, SOCK_DGRAM, 0); + else + error(1, 0, "unknown cfg_test %s", cfg_test); + + return 0; +} diff --git a/tools/testing/selftests/net/msg_zerocopy.sh b/tools/testing/selftests/net/msg_zerocopy.sh new file mode 100755 index 000000000000..d571d213418d --- /dev/null +++ b/tools/testing/selftests/net/msg_zerocopy.sh @@ -0,0 +1,112 @@ +#!/bin/bash +# +# Send data between two processes across namespaces +# Run twice: once without and once with zerocopy + +set -e + +readonly DEV="veth0" +readonly DEV_MTU=65535 +readonly BIN="./msg_zerocopy" + +readonly RAND="$(mktemp -u XXXXXX)" +readonly NSPREFIX="ns-${RAND}" +readonly NS1="${NSPREFIX}1" +readonly NS2="${NSPREFIX}2" + +readonly SADDR4='192.168.1.1' +readonly DADDR4='192.168.1.2' +readonly SADDR6='fd::1' +readonly DADDR6='fd::2' + +readonly path_sysctl_mem="net.core.optmem_max" + +# Argument parsing +if [[ "$#" -lt "2" ]]; then + echo "Usage: $0 [4|6] [tcp|udp|raw|raw_hdrincl|packet|packet_dgram] " + exit 1 +fi + +readonly IP="$1" +shift +readonly TXMODE="$1" +shift +readonly EXTRA_ARGS="$@" + +# Argument parsing: configure addresses +if [[ "${IP}" == "4" ]]; then + readonly SADDR="${SADDR4}" + readonly DADDR="${DADDR4}" +elif [[ "${IP}" == "6" ]]; then + readonly SADDR="${SADDR6}" + readonly DADDR="${DADDR6}" +else + echo "Invalid IP version ${IP}" + exit 1 +fi + +# Argument parsing: select receive mode +# +# This differs from send mode for +# - packet: use raw recv, because packet receives skb clones +# - raw_hdrinc: use raw recv, because hdrincl is a tx-only option +case "${TXMODE}" in +'packet' | 'packet_dgram' | 'raw_hdrincl') + RXMODE='raw' + ;; +*) + RXMODE="${TXMODE}" + ;; +esac + +# Start of state changes: install cleanup handler +save_sysctl_mem="$(sysctl -n ${path_sysctl_mem})" + +cleanup() { + ip netns del "${NS2}" + ip netns del "${NS1}" + sysctl -w -q "${path_sysctl_mem}=${save_sysctl_mem}" +} + +trap cleanup EXIT + +# Configure system settings +sysctl -w -q "${path_sysctl_mem}=1000000" + +# Create virtual ethernet pair between network namespaces +ip netns add "${NS1}" +ip netns add "${NS2}" + +ip link add "${DEV}" mtu "${DEV_MTU}" netns "${NS1}" type veth \ + peer name "${DEV}" mtu "${DEV_MTU}" netns "${NS2}" + +# Bring the devices up +ip -netns "${NS1}" link set "${DEV}" up +ip -netns "${NS2}" link set "${DEV}" up + +# Set fixed MAC addresses on the devices +ip -netns "${NS1}" link set dev "${DEV}" address 02:02:02:02:02:02 +ip -netns "${NS2}" link set dev "${DEV}" address 06:06:06:06:06:06 + +# Add fixed IP addresses to the devices +ip -netns "${NS1}" addr add 192.168.1.1/24 dev "${DEV}" +ip -netns "${NS2}" addr add 192.168.1.2/24 dev "${DEV}" +ip -netns "${NS1}" addr add fd::1/64 dev "${DEV}" nodad +ip -netns "${NS2}" addr add fd::2/64 dev "${DEV}" nodad + +# Optionally disable sg or csum offload to test edge cases +# ip netns exec "${NS1}" ethtool -K "${DEV}" sg off + +do_test() { + local readonly ARGS="$1" + + echo "ipv${IP} ${TXMODE} ${ARGS}" + ip netns exec "${NS2}" "${BIN}" "-${IP}" -i "${DEV}" -t 2 -C 2 -S "${SADDR}" -D "${DADDR}" ${ARGS} -r "${RXMODE}" & + sleep 0.2 + ip netns exec "${NS1}" "${BIN}" "-${IP}" -i "${DEV}" -t 1 -C 3 -S "${SADDR}" -D "${DADDR}" ${ARGS} "${TXMODE}" + wait +} + +do_test "${EXTRA_ARGS}" +do_test "-z ${EXTRA_ARGS}" +echo ok -- cgit v1.2.3-55-g7522 From 5987feb38aa55e035ce5376c02aba88a604cc881 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 4 Aug 2017 11:17:21 +0300 Subject: net: phy: marvell: logical vs bitwise OR typo This was supposed to be a bitwise OR but there is a || vs | typo. Fixes: 864dc729d528 ("net: phy: marvell: Refactor m88e1121 RGMII delay configuration") Signed-off-by: Dan Carpenter Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 361fe9927ef2..15cbcdba618a 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -83,7 +83,7 @@ #define MII_88E1121_PHY_MSCR_REG 21 #define MII_88E1121_PHY_MSCR_RX_DELAY BIT(5) #define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4) -#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) || BIT(4))) +#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(BIT(5) | BIT(4))) #define MII_88E1121_MISC_TEST 0x1a #define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00 -- cgit v1.2.3-55-g7522 From 967b2e2a76e380abdebe7f3c7ab17e8831accd21 Mon Sep 17 00:00:00 2001 From: Lin Yun Sheng Date: Fri, 4 Aug 2017 17:24:59 +0800 Subject: net: hns: Fix for __udivdi3 compiler error This patch fixes the __udivdi3 undefined error reported by test robot. Fixes: b8c17f708831 ("net: hns: Add self-adaptive interrupt coalesce support in hns driver") Signed-off-by: Yunsheng Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_enet.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 832f27792e3f..36520634c96a 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -847,7 +847,8 @@ static void hns_update_rx_rate(struct hnae_ring *ring) total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes; time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies); - ring->coal_rx_rate = (total_bytes / time_passed_ms) >> 10; + do_div(total_bytes, time_passed_ms); + ring->coal_rx_rate = total_bytes >> 10; ring->coal_last_rx_bytes = ring->stats.rx_bytes; ring->coal_last_jiffies = jiffies; -- cgit v1.2.3-55-g7522 From f7ebdff7575c3a5c099c8b1d661b0a65f4c6f10f Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:28:56 +0200 Subject: net: sched: sch_atm: use Qdisc_class_common structure Even if it is only for classid now, use this common struct a be aligned with the rest of the classful qdiscs. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/sch_atm.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 572fe2584e48..0af4b1c6f674 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -41,6 +41,7 @@ #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back)) struct atm_flow_data { + struct Qdisc_class_common common; struct Qdisc *q; /* FIFO, TBF, etc. */ struct tcf_proto __rcu *filter_list; struct tcf_block *block; @@ -49,7 +50,6 @@ struct atm_flow_data { struct sk_buff *skb); /* chaining */ struct atm_qdisc_data *parent; /* parent qdisc */ struct socket *sock; /* for closing */ - u32 classid; /* x:y type ID */ int ref; /* reference count */ struct gnet_stats_basic_packed bstats; struct gnet_stats_queue qstats; @@ -75,7 +75,7 @@ static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) struct atm_flow_data *flow; list_for_each_entry(flow, &p->flows, list) { - if (flow->classid == classid) + if (flow->common.classid == classid) return flow; } return NULL; @@ -293,7 +293,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, flow->old_pop = flow->vcc->pop; flow->parent = p; flow->vcc->pop = sch_atm_pop; - flow->classid = classid; + flow->common.classid = classid; flow->ref = 1; flow->excess = excess; list_add(&flow->list, &p->link.list); @@ -549,7 +549,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) p->link.vcc = NULL; p->link.sock = NULL; - p->link.classid = sch->handle; + p->link.common.classid = sch->handle; p->link.ref = 1; tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); return 0; @@ -594,7 +594,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, sch, p, flow, skb, tcm); if (list_empty(&flow->list)) return -EINVAL; - tcm->tcm_handle = flow->classid; + tcm->tcm_handle = flow->common.classid; tcm->tcm_info = flow->q->handle; nest = nla_nest_start(skb, TCA_OPTIONS); @@ -619,7 +619,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, goto nla_put_failure; } if (flow->excess) { - if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid)) + if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid)) goto nla_put_failure; } else { if (nla_put_u32(skb, TCA_ATM_EXCESS, 0)) -- cgit v1.2.3-55-g7522 From 4ebc1e3cfcd8778e2150bdb799b19e85348b8efa Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:28:57 +0200 Subject: net: sched: remove unneeded tcf_em_tree_change Since tcf_em_tree_validate could be always called on a newly created filter, there is no need for this change function. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 21 --------------------- net/sched/cls_basic.c | 4 +--- net/sched/cls_cgroup.c | 4 +--- net/sched/cls_flow.c | 12 +++++------- 4 files changed, 7 insertions(+), 34 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 537d0a0ad4c4..f4462ec8b2f4 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -332,26 +332,6 @@ int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int); int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *, struct tcf_pkt_info *); -/** - * tcf_em_tree_change - replace ematch tree of a running classifier - * - * @tp: classifier kind handle - * @dst: destination ematch tree variable - * @src: source ematch tree (temporary tree from tcf_em_tree_validate) - * - * This functions replaces the ematch tree in @dst with the ematch - * tree in @src. The classifier in charge of the ematch tree may be - * running. - */ -static inline void tcf_em_tree_change(struct tcf_proto *tp, - struct tcf_ematch_tree *dst, - struct tcf_ematch_tree *src) -{ - tcf_tree_lock(tp); - memcpy(dst, src, sizeof(*dst)); - tcf_tree_unlock(tp); -} - /** * tcf_em_tree_match - evaulate an ematch tree * @@ -386,7 +366,6 @@ struct tcf_ematch_tree { #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0) #define tcf_em_tree_destroy(t) do { (void)(t); } while(0) #define tcf_em_tree_dump(skb, t, tlv) (0) -#define tcf_em_tree_change(tp, dst, src) do { } while(0) #define tcf_em_tree_match(skb, t, info) ((void)(info), 1) #endif /* CONFIG_NET_EMATCH */ diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index c4fd63a068f9..979cd2683b46 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -130,7 +130,6 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, { int err; struct tcf_exts e; - struct tcf_ematch_tree t; err = tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); if (err < 0) @@ -139,7 +138,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, if (err < 0) goto errout; - err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &t); + err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches); if (err < 0) goto errout; @@ -149,7 +148,6 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, } tcf_exts_change(tp, &f->exts, &e); - tcf_em_tree_change(tp, &f->ematches, &t); f->tp = tp; return 0; diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 12ce547eea04..ce7d38beab95 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -76,7 +76,6 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, struct nlattr *tb[TCA_CGROUP_MAX + 1]; struct cls_cgroup_head *head = rtnl_dereference(tp->root); struct cls_cgroup_head *new; - struct tcf_ematch_tree t; struct tcf_exts e; int err; @@ -112,14 +111,13 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, goto errout; } - err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t); + err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); if (err < 0) { tcf_exts_destroy(&e); goto errout; } tcf_exts_change(tp, &new->exts, &e); - tcf_em_tree_change(tp, &new->ematches, &t); rcu_assign_pointer(tp->root, new); if (head) diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 3065752b9cda..71fd1af01726 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -389,7 +389,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FLOW_MAX + 1]; struct tcf_exts e; - struct tcf_ematch_tree t; unsigned int nkeys = 0; unsigned int perturb_period = 0; u32 baseclass = 0; @@ -432,13 +431,13 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto err1; - err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t); - if (err < 0) - goto err1; - err = -ENOBUFS; fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); if (!fnew) + goto err1; + + err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches); + if (err < 0) goto err2; err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); @@ -512,7 +511,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, (unsigned long)fnew); tcf_exts_change(tp, &fnew->exts, &e); - tcf_em_tree_change(tp, &fnew->ematches, &t); netif_keep_dst(qdisc_dev(tp->q)); @@ -554,8 +552,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, err3: tcf_exts_destroy(&fnew->exts); + tcf_em_tree_destroy(&fnew->ematches); err2: - tcf_em_tree_destroy(&t); kfree(fnew); err1: tcf_exts_destroy(&e); -- cgit v1.2.3-55-g7522 From 3bcc0cec818fa969fe555b44443347211ed787a3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:28:58 +0200 Subject: net: sched: change names of action number helpers to be aligned with the rest The rest of the helpers are named tcf_exts_*, so change the name of the action number helpers to be aligned. While at it, change to inline functions. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 4 +-- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 2 +- drivers/net/ethernet/netronome/nfp/bpf/offload.c | 4 +-- include/net/pkt_cls.h | 36 ++++++++++++++++------ net/dsa/slave.c | 2 +- net/sched/cls_api.c | 2 +- 9 files changed, 37 insertions(+), 19 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index ef06ce8247ab..6f734c52ef25 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -96,7 +96,7 @@ static int fill_action_fields(struct adapter *adap, LIST_HEAD(actions); exts = cls->knode.exts; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_to_list(exts, &actions); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 96606e3eb965..091fcc7e6e43 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8953,7 +8953,7 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_to_list(exts, &actions); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3c536f560dd2..78f50d9f621d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1326,7 +1326,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; @@ -1839,7 +1839,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, bool encap = false; int err = 0; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; memset(attr, 0, sizeof(*attr)); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 88b668ba0d8a..66d511d45c25 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1626,7 +1626,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, LIST_HEAD(actions); int err; - if (!tc_single_action(cls->exts)) { + if (!tcf_exts_has_one_action(cls->exts)) { netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 400ad4081660..9be48d2e43ca 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -53,7 +53,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, LIST_HEAD(actions); int err; - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return 0; /* Count action is inserted first */ diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 78d80a364edb..a88bb5bc0082 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -115,14 +115,14 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) /* TC direct action */ if (cls_bpf->exts_integrated) { - if (tc_no_actions(cls_bpf->exts)) + if (!tcf_exts_has_actions(cls_bpf->exts)) return NN_ACT_DIRECT; return -EOPNOTSUPP; } /* TC legacy mode */ - if (!tc_single_action(cls_bpf->exts)) + if (!tcf_exts_has_one_action(cls_bpf->exts)) return -EOPNOTSUPP; tcf_exts_to_list(cls_bpf->exts, &actions); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index f4462ec8b2f4..7f2563636df0 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -199,17 +199,35 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, return 0; } +/** + * tcf_exts_has_actions - check if at least one action is present + * @exts: tc filter extensions handle + * + * Returns true if at least one action is present. + */ +static inline bool tcf_exts_has_actions(struct tcf_exts *exts) +{ #ifdef CONFIG_NET_CLS_ACT + return exts->nr_actions; +#else + return false; +#endif +} -#define tc_no_actions(_exts) ((_exts)->nr_actions == 0) -#define tc_single_action(_exts) ((_exts)->nr_actions == 1) - -#else /* CONFIG_NET_CLS_ACT */ - -#define tc_no_actions(_exts) true -#define tc_single_action(_exts) false - -#endif /* CONFIG_NET_CLS_ACT */ +/** + * tcf_exts_has_one_action - check if exactly one action is present + * @exts: tc filter extensions handle + * + * Returns true if exactly one action is present. + */ +static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) +{ +#ifdef CONFIG_NET_CLS_ACT + return exts->nr_actions == 1; +#else + return false; +#endif +} int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, diff --git a/net/dsa/slave.c b/net/dsa/slave.c index e196562035b1..83252e8426d7 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -779,7 +779,7 @@ static int dsa_slave_add_cls_matchall(struct net_device *dev, if (!ds->ops->port_mirror_add) return err; - if (!tc_single_action(cls->exts)) + if (!tcf_exts_has_one_action(cls->exts)) return err; tcf_exts_to_list(cls->exts, &actions); diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 39da0c5801c9..287ae6cbf73b 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -972,7 +972,7 @@ int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, const struct tc_action *a; LIST_HEAD(actions); - if (tc_no_actions(exts)) + if (!tcf_exts_has_actions(exts)) return -EINVAL; tcf_exts_to_list(exts, &actions); -- cgit v1.2.3-55-g7522 From af69afc551eb9f9b1f2cc3295e2dfcdaa1dc948d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:28:59 +0200 Subject: net: sched: use tcf_exts_has_actions in tcf_exts_exec Use the tcf_exts_has_actions helper instead or directly testing exts->nr_actions in tcf_exts_exec. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 46 +++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 7f2563636df0..322a2823cc6a 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -176,29 +176,6 @@ tcf_exts_stats_update(const struct tcf_exts *exts, #endif } -/** - * tcf_exts_exec - execute tc filter extensions - * @skb: socket buffer - * @exts: tc filter extensions handle - * @res: desired result - * - * Executes all configured extensions. Returns 0 on a normal execution, - * a negative number if the filter must be considered unmatched or - * a positive action code (TC_ACT_*) which must be returned to the - * underlying layer. - */ -static inline int -tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, - struct tcf_result *res) -{ -#ifdef CONFIG_NET_CLS_ACT - if (exts->nr_actions) - return tcf_action_exec(skb, exts->actions, exts->nr_actions, - res); -#endif - return 0; -} - /** * tcf_exts_has_actions - check if at least one action is present * @exts: tc filter extensions handle @@ -229,6 +206,29 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) #endif } +/** + * tcf_exts_exec - execute tc filter extensions + * @skb: socket buffer + * @exts: tc filter extensions handle + * @res: desired result + * + * Executes all configured extensions. Returns 0 on a normal execution, + * a negative number if the filter must be considered unmatched or + * a positive action code (TC_ACT_*) which must be returned to the + * underlying layer. + */ +static inline int +tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, + struct tcf_result *res) +{ +#ifdef CONFIG_NET_CLS_ACT + if (tcf_exts_has_actions(exts)) + return tcf_action_exec(skb, exts->actions, exts->nr_actions, + res); +#endif + return 0; +} + int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr); -- cgit v1.2.3-55-g7522 From 6fc6d06e5371507e68c6904a3423622b0e465b64 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:00 +0200 Subject: net: sched: remove redundant helpers tcf_exts_is_predicative and tcf_exts_is_available These two helpers are doing the same as tcf_exts_has_actions, so remove them and use tcf_exts_has_actions instead. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 30 ------------------------------ net/sched/cls_fw.c | 2 +- net/sched/cls_route.c | 2 +- net/sched/cls_tcindex.c | 2 +- 4 files changed, 3 insertions(+), 33 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 322a2823cc6a..817badf733b5 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -113,36 +113,6 @@ static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police) return 0; } -/** - * tcf_exts_is_predicative - check if a predicative extension is present - * @exts: tc filter extensions handle - * - * Returns 1 if a predicative extension is present, i.e. an extension which - * might cause further actions and thus overrule the regular tcf_result. - */ -static inline int -tcf_exts_is_predicative(struct tcf_exts *exts) -{ -#ifdef CONFIG_NET_CLS_ACT - return exts->nr_actions; -#else - return 0; -#endif -} - -/** - * tcf_exts_is_available - check if at least one extension is present - * @exts: tc filter extensions handle - * - * Returns 1 if at least one extension is present. - */ -static inline int -tcf_exts_is_available(struct tcf_exts *exts) -{ - /* All non-predicative extensions must be added here. */ - return tcf_exts_is_predicative(exts); -} - static inline void tcf_exts_to_list(const struct tcf_exts *exts, struct list_head *actions) { diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index d3885362e017..a53fa75dfc7b 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -387,7 +387,7 @@ static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, t->tcm_handle = f->id; - if (!f->res.classid && !tcf_exts_is_available(&f->exts)) + if (!f->res.classid && !tcf_exts_has_actions(&f->exts)) return skb->len; nest = nla_nest_start(skb, TCA_OPTIONS); diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index d63d5502ee02..26f863634862 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -113,7 +113,7 @@ static inline int route4_hash_wild(void) #define ROUTE4_APPLY_RESULT() \ { \ *res = f->res; \ - if (tcf_exts_is_available(&f->exts)) { \ + if (tcf_exts_has_actions(&f->exts)) { \ int r = tcf_exts_exec(skb, &f->exts, res); \ if (r < 0) { \ dont_cache = 1; \ diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 8a8a58357c39..66924d147e97 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -52,7 +52,7 @@ struct tcindex_data { static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) { - return tcf_exts_is_predicative(&r->exts) || r->res.classid; + return tcf_exts_has_actions(&r->exts) || r->res.classid; } static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p, -- cgit v1.2.3-55-g7522 From af089e701adfb5898fee00a56ea4bb421edc308d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:01 +0200 Subject: net: sched: fix return value of tcf_exts_exec Return the defined TC_ACT_OK instead of 0. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 817badf733b5..61ce521688b2 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -182,7 +182,7 @@ static inline bool tcf_exts_has_one_action(struct tcf_exts *exts) * @exts: tc filter extensions handle * @res: desired result * - * Executes all configured extensions. Returns 0 on a normal execution, + * Executes all configured extensions. Returns TC_ACT_OK on a normal execution, * a negative number if the filter must be considered unmatched or * a positive action code (TC_ACT_*) which must be returned to the * underlying layer. @@ -196,7 +196,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); #endif - return 0; + return TC_ACT_OK; } int tcf_exts_validate(struct net *net, struct tcf_proto *tp, -- cgit v1.2.3-55-g7522 From ec1a9cca0e13391167567964fd04e61a39d6a4ae Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:02 +0200 Subject: net: sched: remove check for number of actions in tcf_exts_exec Leave it to tcf_action_exec to return TC_ACT_OK in case there is no action present. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 4 +--- net/sched/act_api.c | 3 ++- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 61ce521688b2..b8959c9a190d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -192,9 +192,7 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts, struct tcf_result *res) { #ifdef CONFIG_NET_CLS_ACT - if (tcf_exts_has_actions(exts)) - return tcf_action_exec(skb, exts->actions, exts->nr_actions, - res); + return tcf_action_exec(skb, exts->actions, exts->nr_actions, res); #endif return TC_ACT_OK; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index f19b118df414..a2915d958279 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -473,9 +473,10 @@ static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions, int nr_actions, struct tcf_result *res) { - int ret = -1, i; u32 jmp_prgcnt = 0; u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */ + int i; + int ret = TC_ACT_OK; if (skb_skip_tc_classify(skb)) return TC_ACT_OK; -- cgit v1.2.3-55-g7522 From 978dfd8d14a503d20be1f1e2ae328c3eac675a2d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:03 +0200 Subject: net: sched: use tcf_exts_has_actions instead of exts->nr_actions For check in tcf_exts_dump use tcf_exts_has_actions helper instead of exts->nr_actions for checking if there are any actions present. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 287ae6cbf73b..735d556a5283 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -915,7 +915,7 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts) #ifdef CONFIG_NET_CLS_ACT struct nlattr *nest; - if (exts->action && exts->nr_actions) { + if (exts->action && tcf_exts_has_actions(exts)) { /* * again for backward compatible mode - we want * to work with both old and new modes of entering -- cgit v1.2.3-55-g7522 From 6a725c481df36b1ad471ea788a5bc64c25bf7af8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:04 +0200 Subject: net: sched: cls_bpf: rename cls_bpf_modify_existing function The name cls_bpf_modify_existing is highly misleading, as it indeed does not modify anything existing. It does not modify at all. Signed-off-by: Jiri Pirko Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- net/sched/cls_bpf.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index f57bd531ba98..e9ab8374a877 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -382,10 +382,9 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, return 0; } -static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp, - struct cls_bpf_prog *prog, - unsigned long base, struct nlattr **tb, - struct nlattr *est, bool ovr) +static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, + struct cls_bpf_prog *prog, unsigned long base, + struct nlattr **tb, struct nlattr *est, bool ovr) { bool is_bpf, is_ebpf, have_exts = false; struct tcf_exts exts; @@ -508,8 +507,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, goto errout; } - ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], - ovr); + ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr); if (ret < 0) goto errout; -- cgit v1.2.3-55-g7522 From 1e5003af37352f37ba008f99014cc47c684ae808 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:05 +0200 Subject: net: sched: cls_fw: rename fw_change_attrs function Since the function name is misleading since it is not changing anything, name it similarly to other cls. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_fw.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index a53fa75dfc7b..358bf34f0dd0 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -190,10 +190,9 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = { [TCA_FW_MASK] = { .type = NLA_U32 }, }; -static int -fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f, - struct nlattr **tb, struct nlattr **tca, unsigned long base, - bool ovr) +static int fw_set_parms(struct net *net, struct tcf_proto *tp, + struct fw_filter *f, struct nlattr **tb, + struct nlattr **tca, unsigned long base, bool ovr) { struct fw_head *head = rtnl_dereference(tp->root); struct tcf_exts e; @@ -282,7 +281,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return err; } - err = fw_change_attrs(net, tp, fnew, tb, tca, base, ovr); + err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr); if (err < 0) { tcf_exts_destroy(&fnew->exts); kfree(fnew); @@ -330,7 +329,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, f->id = handle; f->tp = tp; - err = fw_change_attrs(net, tp, f, tb, tca, base, ovr); + err = fw_set_parms(net, tp, f, tb, tca, base, ovr); if (err < 0) goto errout; -- cgit v1.2.3-55-g7522 From 455075292b0d58f708c1afe7011a8b9223b2c9c8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:06 +0200 Subject: net: sched: cls_flower: no need to call tcf_exts_change for newly allocated struct As the f struct was allocated right before fl_set_parms call, no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 7832eb93379b..7ab524fc43f9 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -852,15 +852,11 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr) { - struct tcf_exts e; int err; - err = tcf_exts_init(&e, TCA_FLOWER_ACT, 0); + err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr); if (err < 0) return err; - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); - if (err < 0) - goto errout; if (tb[TCA_FLOWER_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_FLOWER_CLASSID]); @@ -869,17 +865,12 @@ static int fl_set_parms(struct net *net, struct tcf_proto *tp, err = fl_set_key(net, tb, &f->key, &mask->key); if (err) - goto errout; + return err; fl_mask_update_range(mask); fl_set_masked_key(&f->mkey, &f->key, mask); - tcf_exts_change(tp, &f->exts, &e); - return 0; -errout: - tcf_exts_destroy(&e); - return err; } static u32 fl_grab_new_handle(struct tcf_proto *tp, -- cgit v1.2.3-55-g7522 From 94611bff6e1e57c4a74a9242f3fe749aa4a31678 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:07 +0200 Subject: net: sched: cls_fw: no need to call tcf_exts_change for newly allocated struct As the f struct was allocated right before fw_set_parms call, no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_fw.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 358bf34f0dd0..11f178f1b2be 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -195,16 +195,12 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, struct nlattr **tca, unsigned long base, bool ovr) { struct fw_head *head = rtnl_dereference(tp->root); - struct tcf_exts e; u32 mask; int err; - err = tcf_exts_init(&e, TCA_FW_ACT, TCA_FW_POLICE); + err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr); if (err < 0) return err; - err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); - if (err < 0) - goto errout; if (tb[TCA_FW_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); @@ -215,10 +211,8 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, if (tb[TCA_FW_INDEV]) { int ret; ret = tcf_change_indev(net, tb[TCA_FW_INDEV]); - if (ret < 0) { - err = ret; - goto errout; - } + if (ret < 0) + return ret; f->ifindex = ret; } #endif /* CONFIG_NET_CLS_IND */ @@ -227,16 +221,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, if (tb[TCA_FW_MASK]) { mask = nla_get_u32(tb[TCA_FW_MASK]); if (mask != head->mask) - goto errout; + return err; } else if (head->mask != 0xFFFFFFFF) - goto errout; - - tcf_exts_change(tp, &f->exts, &e); + return err; return 0; -errout: - tcf_exts_destroy(&e); - return err; } static int fw_change(struct net *net, struct sk_buff *in_skb, -- cgit v1.2.3-55-g7522 From a74cb36980c3676c888a87dd66bcd410f7e75c08 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:08 +0200 Subject: net: sched: cls_matchall: no need to call tcf_exts_change for newly allocated struct As the head struct was allocated right before mall_set_parms call, no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_matchall.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 9dc26c32cf32..f35177b48373 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -120,27 +120,17 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct nlattr **tb, struct nlattr *est, bool ovr) { - struct tcf_exts e; int err; - err = tcf_exts_init(&e, TCA_MATCHALL_ACT, 0); - if (err) - return err; - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); + err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr); if (err < 0) - goto errout; + return err; if (tb[TCA_MATCHALL_CLASSID]) { head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]); tcf_bind_filter(tp, &head->res, base); } - - tcf_exts_change(tp, &head->exts, &e); - return 0; -errout: - tcf_exts_destroy(&e); - return err; } static int mall_change(struct net *net, struct sk_buff *in_skb, -- cgit v1.2.3-55-g7522 From ff1f8ca0805ab03c31e14a9356aa2ac81d65c338 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:09 +0200 Subject: net: sched: cls_basic: no need to call tcf_exts_change for newly allocated struct As the f struct was allocated right before basic_set_parms call, no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_basic.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 979cd2683b46..7c7a82138f76 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -129,31 +129,22 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, struct nlattr *est, bool ovr) { int err; - struct tcf_exts e; - err = tcf_exts_init(&e, TCA_BASIC_ACT, TCA_BASIC_POLICE); + err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr); if (err < 0) return err; - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); - if (err < 0) - goto errout; err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches); if (err < 0) - goto errout; + return err; if (tb[TCA_BASIC_CLASSID]) { f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]); tcf_bind_filter(tp, &f->res, base); } - tcf_exts_change(tp, &f->exts, &e); f->tp = tp; - return 0; -errout: - tcf_exts_destroy(&e); - return err; } static int basic_change(struct net *net, struct sk_buff *in_skb, -- cgit v1.2.3-55-g7522 From 6839da326dfcb98bf8020d42b416636471f1e462 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:10 +0200 Subject: net: sched: cls_bpf: no need to call tcf_exts_change for newly allocated struct As the prog struct was allocated right before cls_bpf_set_parms call, no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_bpf.c | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index e9ab8374a877..cf248c3137ad 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -387,7 +387,6 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *est, bool ovr) { bool is_bpf, is_ebpf, have_exts = false; - struct tcf_exts exts; u32 gen_flags = 0; int ret; @@ -396,30 +395,23 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) return -EINVAL; - ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE); + ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, ovr); if (ret < 0) return ret; - ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr); - if (ret < 0) - goto errout; if (tb[TCA_BPF_FLAGS]) { u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]); - if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) { - ret = -EINVAL; - goto errout; - } + if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) + return -EINVAL; have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT; } if (tb[TCA_BPF_FLAGS_GEN]) { gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]); if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS || - !tc_flags_valid(gen_flags)) { - ret = -EINVAL; - goto errout; - } + !tc_flags_valid(gen_flags)) + return -EINVAL; } prog->exts_integrated = have_exts; @@ -428,19 +420,14 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp, ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) : cls_bpf_prog_from_efd(tb, prog, tp); if (ret < 0) - goto errout; + return ret; if (tb[TCA_BPF_CLASSID]) { prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]); tcf_bind_filter(tp, &prog->res, base); } - tcf_exts_change(tp, &prog->exts, &exts); return 0; - -errout: - tcf_exts_destroy(&exts); - return ret; } static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, -- cgit v1.2.3-55-g7522 From 8cc6251381183191f99ecd6d49931251ebbbe27d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:11 +0200 Subject: net: sched: cls_cgroup: no need to call tcf_exts_change for newly allocated struct As the new struct just was allocated, so no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_cgroup.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index ce7d38beab95..df7a582775df 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -76,7 +76,6 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, struct nlattr *tb[TCA_CGROUP_MAX + 1]; struct cls_cgroup_head *head = rtnl_dereference(tp->root); struct cls_cgroup_head *new; - struct tcf_exts e; int err; if (!tca[TCA_OPTIONS]) @@ -102,22 +101,13 @@ static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto errout; - err = tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE); + err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, ovr); if (err < 0) goto errout; - err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); - if (err < 0) { - tcf_exts_destroy(&e); - goto errout; - } err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches); - if (err < 0) { - tcf_exts_destroy(&e); + if (err < 0) goto errout; - } - - tcf_exts_change(tp, &new->exts, &e); rcu_assign_pointer(tp->root, new); if (head) -- cgit v1.2.3-55-g7522 From c09fc2e11ed1b7fc8cfa97fb1da544225fc32277 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:12 +0200 Subject: net: sched: cls_flow: no need to call tcf_exts_change for newly allocated struct As the fnew struct just was allocated, so no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flow.c | 41 ++++++++++++++++------------------------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 71fd1af01726..55e281b20140 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -388,7 +388,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, struct flow_filter *fold, *fnew; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FLOW_MAX + 1]; - struct tcf_exts e; unsigned int nkeys = 0; unsigned int perturb_period = 0; u32 baseclass = 0; @@ -424,31 +423,27 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, return -EOPNOTSUPP; } - err = tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE); - if (err < 0) - goto err1; - err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr); - if (err < 0) - goto err1; - - err = -ENOBUFS; fnew = kzalloc(sizeof(*fnew), GFP_KERNEL); if (!fnew) - goto err1; + return -ENOBUFS; err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches); if (err < 0) - goto err2; + goto err1; err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); if (err < 0) - goto err3; + goto err2; + + err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr); + if (err < 0) + goto err2; fold = (struct flow_filter *)*arg; if (fold) { err = -EINVAL; if (fold->handle != handle && handle) - goto err3; + goto err2; /* Copy fold into fnew */ fnew->tp = fold->tp; @@ -468,31 +463,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) - goto err3; + goto err2; if (mode == FLOW_MODE_HASH) perturb_period = fold->perturb_period; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) - goto err3; + goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } } else { err = -EINVAL; if (!handle) - goto err3; + goto err2; if (!tb[TCA_FLOW_KEYS]) - goto err3; + goto err2; mode = FLOW_MODE_MAP; if (tb[TCA_FLOW_MODE]) mode = nla_get_u32(tb[TCA_FLOW_MODE]); if (mode != FLOW_MODE_HASH && nkeys > 1) - goto err3; + goto err2; if (tb[TCA_FLOW_PERTURB]) { if (mode != FLOW_MODE_HASH) - goto err3; + goto err2; perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ; } @@ -510,8 +505,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation, (unsigned long)fnew); - tcf_exts_change(tp, &fnew->exts, &e); - netif_keep_dst(qdisc_dev(tp->q)); if (tb[TCA_FLOW_KEYS]) { @@ -550,13 +543,11 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, call_rcu(&fold->rcu, flow_destroy_filter); return 0; -err3: +err2: tcf_exts_destroy(&fnew->exts); tcf_em_tree_destroy(&fnew->ematches); -err2: - kfree(fnew); err1: - tcf_exts_destroy(&e); + kfree(fnew); return err; } -- cgit v1.2.3-55-g7522 From 8c98d571bb0e9717fd7be7242945e8e0abebbaa3 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:13 +0200 Subject: net: sched: cls_route: no need to call tcf_exts_change for newly allocated struct As the f struct was allocated right before route4_set_parms call, no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_route.c | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 26f863634862..f1e7d7850b44 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -372,37 +372,32 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, struct route4_filter *fp; unsigned int h1; struct route4_bucket *b; - struct tcf_exts e; int err; - err = tcf_exts_init(&e, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE); + err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr); if (err < 0) return err; - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); - if (err < 0) - goto errout; - err = -EINVAL; if (tb[TCA_ROUTE4_TO]) { if (new && handle & 0x8000) - goto errout; + return -EINVAL; to = nla_get_u32(tb[TCA_ROUTE4_TO]); if (to > 0xFF) - goto errout; + return -EINVAL; nhandle = to; } if (tb[TCA_ROUTE4_FROM]) { if (tb[TCA_ROUTE4_IIF]) - goto errout; + return -EINVAL; id = nla_get_u32(tb[TCA_ROUTE4_FROM]); if (id > 0xFF) - goto errout; + return -EINVAL; nhandle |= id << 16; } else if (tb[TCA_ROUTE4_IIF]) { id = nla_get_u32(tb[TCA_ROUTE4_IIF]); if (id > 0x7FFF) - goto errout; + return -EINVAL; nhandle |= (id | 0x8000) << 16; } else nhandle |= 0xFFFF << 16; @@ -410,27 +405,25 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, if (handle && new) { nhandle |= handle & 0x7F00; if (nhandle != handle) - goto errout; + return -EINVAL; } h1 = to_hash(nhandle); b = rtnl_dereference(head->table[h1]); if (!b) { - err = -ENOBUFS; b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL); if (b == NULL) - goto errout; + return -ENOBUFS; rcu_assign_pointer(head->table[h1], b); } else { unsigned int h2 = from_hash(nhandle >> 16); - err = -EEXIST; for (fp = rtnl_dereference(b->ht[h2]); fp; fp = rtnl_dereference(fp->next)) if (fp->handle == f->handle) - goto errout; + return -EEXIST; } if (tb[TCA_ROUTE4_TO]) @@ -450,12 +443,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, tcf_bind_filter(tp, &f->res, base); } - tcf_exts_change(tp, &f->exts, &e); - return 0; -errout: - tcf_exts_destroy(&e); - return err; } static int route4_change(struct net *net, struct sk_buff *in_skb, -- cgit v1.2.3-55-g7522 From 705c7091262d02b09eb686c24491de61bf42fdb2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:14 +0200 Subject: net: sched: cls_u32: no need to call tcf_exts_change for newly allocated struct As the n struct was allocated right before u32_set_parms call, no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_u32.c | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 2d01195153e6..9fd243799fe7 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -723,29 +723,24 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, struct tc_u_knode *n, struct nlattr **tb, struct nlattr *est, bool ovr) { - struct tcf_exts e; int err; - err = tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE); + err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr); if (err < 0) return err; - err = tcf_exts_validate(net, tp, tb, est, &e, ovr); - if (err < 0) - goto errout; - err = -EINVAL; if (tb[TCA_U32_LINK]) { u32 handle = nla_get_u32(tb[TCA_U32_LINK]); struct tc_u_hnode *ht_down = NULL, *ht_old; if (TC_U32_KEY(handle)) - goto errout; + return -EINVAL; if (handle) { ht_down = u32_lookup_ht(ht->tp_c, handle); if (ht_down == NULL) - goto errout; + return -EINVAL; ht_down->refcnt++; } @@ -765,16 +760,11 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, int ret; ret = tcf_change_indev(net, tb[TCA_U32_INDEV]); if (ret < 0) - goto errout; + return -EINVAL; n->ifindex = ret; } #endif - tcf_exts_change(tp, &n->exts, &e); - return 0; -errout: - tcf_exts_destroy(&e); - return err; } static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c, -- cgit v1.2.3-55-g7522 From 9b0d4446b56904b59ae3809913b0ac760fa941a6 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 4 Aug 2017 14:29:15 +0200 Subject: net: sched: avoid atomic swap in tcf_exts_change tcf_exts_change is always called on newly created exts, which are not used on fastpath. Therefore, simple struct copy is enough. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 3 +-- net/sched/cls_api.c | 10 ++-------- net/sched/cls_rsvp.h | 4 ++-- net/sched/cls_tcindex.c | 6 +++--- 4 files changed, 8 insertions(+), 15 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index b8959c9a190d..e0c54f111467 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -201,8 +201,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr); void tcf_exts_destroy(struct tcf_exts *exts); -void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, - struct tcf_exts *src); +void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src); int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts); int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts); int tcf_exts_get_dev(struct net_device *dev, struct tcf_exts *exts, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 735d556a5283..e655221c654e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -883,18 +883,12 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb, } EXPORT_SYMBOL(tcf_exts_validate); -void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst, - struct tcf_exts *src) +void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src) { #ifdef CONFIG_NET_CLS_ACT struct tcf_exts old = *dst; - tcf_tree_lock(tp); - dst->nr_actions = src->nr_actions; - dst->actions = src->actions; - dst->type = src->type; - tcf_tree_unlock(tp); - + *dst = *src; tcf_exts_destroy(&old); #endif } diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 0d9d07798699..4adb67a73491 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -518,7 +518,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, tcf_bind_filter(tp, &n->res, base); } - tcf_exts_change(tp, &n->exts, &e); + tcf_exts_change(&n->exts, &e); rsvp_replace(tp, n, handle); return 0; } @@ -591,7 +591,7 @@ insert: if (f->tunnelhdr == 0) tcf_bind_filter(tp, &f->res, base); - tcf_exts_change(tp, &f->exts, &e); + tcf_exts_change(&f->exts, &e); fp = &s->ht[h2]; for (nfp = rtnl_dereference(*fp); nfp; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 66924d147e97..d69f828f3fed 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -419,9 +419,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, } if (old_r) - tcf_exts_change(tp, &r->exts, &e); + tcf_exts_change(&r->exts, &e); else - tcf_exts_change(tp, &cr.exts, &e); + tcf_exts_change(&cr.exts, &e); if (old_r && old_r != r) { err = tcindex_filter_result_init(old_r); @@ -439,7 +439,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, struct tcindex_filter *nfp; struct tcindex_filter __rcu **fp; - tcf_exts_change(tp, &f->result.exts, &r->exts); + tcf_exts_change(&f->result.exts, &r->exts); fp = cp->h + (handle % cp->hash); for (nfp = rtnl_dereference(*fp); -- cgit v1.2.3-55-g7522 From 56ce097c1caede1f9c191a7c9699b950e7c36ad9 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Fri, 4 Aug 2017 08:24:05 -0700 Subject: net: comment fixes against BPF devmap helper calls Update BPF comments to accurately reflect XDP usage. Fixes: 97f91a7cf04ff ("bpf: add bpf_redirect_map helper routine") Reported-by: Alexei Starovoitov Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 1106a8c4cd36..1d06be1569b1 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -345,14 +345,20 @@ union bpf_attr { * int bpf_redirect(ifindex, flags) * redirect to another netdev * @ifindex: ifindex of the net device - * @flags: bit 0 - if set, redirect to ingress instead of egress - * other bits - reserved - * Return: TC_ACT_REDIRECT - * int bpf_redirect_map(key, map, flags) + * @flags: + * cls_bpf: + * bit 0 - if set, redirect to ingress instead of egress + * other bits - reserved + * xdp_bpf: + * all bits - reserved + * Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error + * xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error + * int bpf_redirect_map(map, key, flags) * redirect to endpoint in map + * @map: pointer to dev map * @key: index in map to lookup - * @map: fd of map to do lookup in * @flags: -- + * Return: XDP_REDIRECT on success or XDP_ABORT on error * * u32 bpf_get_route_realm(skb) * retrieve a dst's tclassid -- cgit v1.2.3-55-g7522 From a54df682e559da9cf09b41779ee62bc9f11d3804 Mon Sep 17 00:00:00 2001 From: Pavel Belous Date: Thu, 3 Aug 2017 18:15:32 +0300 Subject: aquantia: Switch to use napi_gro_receive Add support for GRO (generic receive offload) for aQuantia Atlantic driver. This results in a perfomance improvement when GRO is enabled. Signed-off-by: Pavel Belous Signed-off-by: David S. Miller --- drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 7 +++++-- drivers/net/ethernet/aquantia/atlantic/aq_ring.h | 5 ++++- drivers/net/ethernet/aquantia/atlantic/aq_vec.c | 1 + 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 9a0817938eca..4b445750b93e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -134,7 +134,10 @@ static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, } #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget) { struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); int err = 0; @@ -240,7 +243,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget) skb_record_rx_queue(skb, self->idx); - netif_receive_skb(skb); + napi_gro_receive(napi, skb); ++self->stats.rx.packets; self->stats.rx.bytes += skb->len; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index eecd6d1c4d73..782176c5f4f8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h @@ -148,7 +148,10 @@ int aq_ring_init(struct aq_ring_s *self); void aq_ring_rx_deinit(struct aq_ring_s *self); void aq_ring_free(struct aq_ring_s *self); void aq_ring_tx_clean(struct aq_ring_s *self); -int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget); +int aq_ring_rx_clean(struct aq_ring_s *self, + struct napi_struct *napi, + int *work_done, + int budget); int aq_ring_rx_fill(struct aq_ring_s *self); #endif /* AQ_RING_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index ad5b4d4dac7f..ec390c5eed35 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c @@ -78,6 +78,7 @@ __acquires(&self->lock) if (ring[AQ_VEC_RX_ID].sw_head != ring[AQ_VEC_RX_ID].hw_head) { err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID], + napi, &work_done, budget - work_done); if (err < 0) -- cgit v1.2.3-55-g7522 From a1e155ece1a5b68c4f845788e03a567574f606aa Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 3 Aug 2017 18:07:05 +0200 Subject: IP: do not modify ingress packet IP option in ip_options_echo() While computing the response option set for LSRR, ip_options_echo() also changes the ingress packet LSRR addresses list, setting the last one to the dst specific address for the ingress packet - via memset(start[ ... The only visible effect of such change - beyond possibly damaging shared/cloned skbs - is modifying the data carried by ICMP replies changing the header information for reported the ingress packet, which violates RFC1122 3.2.2.6. All the others call sites just ignore the ingress packet IP options after calling ip_options_echo() Note that the last element in the LSRR option address list for the reply packet will be properly set later in the ip output path via ip_options_build(). This buggy memset() predates git history and apparently was present into the initial ip_options_echo() implementation in linux 1.3.30 but still looks wrong. The removal of the fib_compute_spec_dst() call will help completely dropping the skb->dst usage by __ip_options_echo() with a later patch. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/ipv4/ip_options.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 93157f2f4758..fdda97308c0b 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -174,9 +174,6 @@ int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, doffset -= 4; } if (doffset > 3) { - __be32 daddr = fib_compute_spec_dst(skb); - - memcpy(&start[doffset-1], &daddr, 4); dopt->faddr = faddr; dptr[0] = start[0]; dptr[1] = doffset+3; -- cgit v1.2.3-55-g7522 From 91ed1e666a4ea2e260452a7d7d311ac5ae852cba Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 3 Aug 2017 18:07:06 +0200 Subject: ip/options: explicitly provide net ns to __ip_options_echo() __ip_options_echo() uses the current network namespace, and currently retrives it via skb->dst->dev. This commit adds an explicit 'net' argument to __ip_options_echo() and update all the call sites to provide it, usually via a simpler sock_net(). After this change, __ip_options_echo() no more needs to access skb->dst and we can drop a couple of hack to preserve such info in the rx path. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- include/net/ip.h | 9 +++++---- include/net/tcp.h | 5 +++-- net/ipv4/icmp.c | 4 ++-- net/ipv4/ip_options.c | 6 +++--- net/ipv4/ip_output.c | 2 +- net/ipv4/ip_sockglue.c | 7 ++++--- net/ipv4/syncookies.c | 2 +- net/ipv4/tcp_ipv4.c | 2 +- 8 files changed, 20 insertions(+), 17 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 821cedcc8e73..9e59dcf1787a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -567,11 +567,12 @@ int ip_forward(struct sk_buff *skb); void ip_options_build(struct sk_buff *skb, struct ip_options *opt, __be32 daddr, struct rtable *rt, int is_frag); -int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, - const struct ip_options *sopt); -static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb) +int __ip_options_echo(struct net *net, struct ip_options *dopt, + struct sk_buff *skb, const struct ip_options *sopt); +static inline int ip_options_echo(struct net *net, struct ip_options *dopt, + struct sk_buff *skb) { - return __ip_options_echo(dopt, skb, &IPCB(skb)->opt); + return __ip_options_echo(net, dopt, skb, &IPCB(skb)->opt); } void ip_options_fragment(struct sk_buff *skb); diff --git a/include/net/tcp.h b/include/net/tcp.h index bb1881b4ce48..5173fecde495 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1885,7 +1885,8 @@ extern void tcp_rack_reo_timeout(struct sock *sk); /* * Save and compile IPv4 options, return a pointer to it */ -static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb) +static inline struct ip_options_rcu *tcp_v4_save_options(struct net *net, + struct sk_buff *skb) { const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; struct ip_options_rcu *dopt = NULL; @@ -1894,7 +1895,7 @@ static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb) int opt_size = sizeof(*dopt) + opt->optlen; dopt = kmalloc(opt_size, GFP_ATOMIC); - if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) { + if (dopt && __ip_options_echo(net, &dopt->opt, skb, opt)) { kfree(dopt); dopt = NULL; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index c2be26b98b5f..681e33998e03 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -412,7 +412,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) int type = icmp_param->data.icmph.type; int code = icmp_param->data.icmph.code; - if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) + if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb)) return; /* Needed by both icmp_global_allow and icmp_xmit_lock */ @@ -694,7 +694,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) iph->tos; mark = IP4_REPLY_MARK(net, skb_in->mark); - if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in)) + if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in)) goto out_unlock; diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index fdda97308c0b..525ae88d1e58 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -86,8 +86,8 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt, * NOTE: dopt cannot point to skb. */ -int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, - const struct ip_options *sopt) +int __ip_options_echo(struct net *net, struct ip_options *dopt, + struct sk_buff *skb, const struct ip_options *sopt) { unsigned char *sptr, *dptr; int soffset, doffset; @@ -140,7 +140,7 @@ int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb, __be32 addr; memcpy(&addr, dptr+soffset-1, 4); - if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_UNICAST) { + if (inet_addr_type(net, addr) != RTN_UNICAST) { dopt->ts_needtime = 1; soffset += 8; } diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index b631ec685d77..73b0b15245b6 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1525,7 +1525,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, int err; int oif; - if (__ip_options_echo(&replyopts.opt.opt, skb, sopt)) + if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) return; ipc.addr = daddr; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ecc4b4a2413e..1c3354d028a4 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -80,7 +80,8 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb) } -static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) +static void ip_cmsg_recv_retopts(struct net *net, struct msghdr *msg, + struct sk_buff *skb) { unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; @@ -88,7 +89,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) if (IPCB(skb)->opt.optlen == 0) return; - if (ip_options_echo(opt, skb)) { + if (ip_options_echo(net, opt, skb)) { msg->msg_flags |= MSG_CTRUNC; return; } @@ -204,7 +205,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk, } if (flags & IP_CMSG_RETOPTS) { - ip_cmsg_recv_retopts(msg, skb); + ip_cmsg_recv_retopts(sock_net(sk), msg, skb); flags &= ~IP_CMSG_RETOPTS; if (!flags) diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 03ad8778c395..b1bb1b3a1082 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ - ireq->opt = tcp_v4_save_options(skb); + ireq->opt = tcp_v4_save_options(sock_net(sk), skb); if (security_inet_conn_request(sk, skb, req)) { reqsk_free(req); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 9b51663cd5a4..5f708c85110e 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1267,7 +1267,7 @@ static void tcp_v4_init_req(struct request_sock *req, sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); - ireq->opt = tcp_v4_save_options(skb); + ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); } static struct dst_entry *tcp_v4_route_req(const struct sock *sk, -- cgit v1.2.3-55-g7522 From 61a1030bad628f7264cd5e5d0f4d71b5488eb4a4 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 3 Aug 2017 18:07:07 +0200 Subject: Revert "ipv4: keep skb->dst around in presence of IP options" ip_options_echo() does not use anymore the skb->dst and don't need to keep the dst around for options's sake only. This reverts commit 34b2cef20f19c87999fff3da4071e66937db9644. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 1c3354d028a4..dd68a9ed5e40 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1228,14 +1228,7 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) pktinfo->ipi_ifindex = 0; pktinfo->ipi_spec_dst.s_addr = 0; } - /* We need to keep the dst for __ip_options_echo() - * We could restrict the test to opt.ts_needtime || opt.srr, - * but the following is good enough as IP options are not often used. - */ - if (unlikely(IPCB(skb)->opt.optlen)) - skb_dst_force(skb); - else - skb_dst_drop(skb); + skb_dst_drop(skb); } int ip_setsockopt(struct sock *sk, int level, -- cgit v1.2.3-55-g7522 From 3bdefdf9d9c2a972085742578b08d99f14c09555 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Thu, 3 Aug 2017 18:07:08 +0200 Subject: udp: no need to preserve skb->dst __ip_options_echo() does not need anymore skb->dst, so we can avoid explicitly preserving it for its own sake. This is almost a revert of commit 0ddf3fb2c43d ("udp: preserve skb->dst if required for IP options processing") plus some lifting to fit later changes. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- net/ipv4/udp.c | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index e6276fa3750b..38bca2c4897d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1176,7 +1176,11 @@ static void udp_set_dev_scratch(struct sk_buff *skb) scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); scratch->is_linear = !skb_is_nonlinear(skb); #endif - if (likely(!skb->_skb_refdst)) + /* all head states execept sp (dst, sk, nf) are always cleared by + * udp_rcv() and we need to preserve secpath, if present, to eventually + * process IP_CMSG_PASSSEC at recvmsg() time + */ + if (likely(!skb_sec_path(skb))) scratch->_tsize_state |= UDP_SKB_IS_STATELESS; } @@ -1782,13 +1786,6 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) sk_mark_napi_id_once(sk, skb); } - /* At recvmsg() time we may access skb->dst or skb->sp depending on - * the IP options and the cmsg flags, elsewhere can we clear all - * pending head states while they are hot in the cache - */ - if (likely(IPCB(skb)->opt.optlen == 0 && !skb_sec_path(skb))) - skb_release_head_state(skb); - rc = __udp_enqueue_schedule_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); -- cgit v1.2.3-55-g7522 From c3ecbe757c973d59994d9496b829052ed688ae1e Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:02:37 +0100 Subject: net: phy: allow settings table to support more than 32 link modes Allow the phy settings table to support more than 32 link modes by switching to the ethtool link mode bit number representation, rather than storing the mask. This will allow phylink and other ethtool code to share the settings table to look up settings. Signed-off-by: Russell King Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index b9d4922581de..abae9167b5fc 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -199,7 +199,7 @@ EXPORT_SYMBOL(phy_aneg_done); struct phy_setting { int speed; int duplex; - u32 setting; + int bit; }; /* A mapping of all SUPPORTED settings to speed/duplex. This table @@ -209,57 +209,57 @@ static const struct phy_setting settings[] = { { .speed = SPEED_10000, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseKR_Full, + .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, }, { .speed = SPEED_10000, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseKX4_Full, + .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, }, { .speed = SPEED_10000, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10000baseT_Full, + .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, }, { .speed = SPEED_2500, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_2500baseX_Full, + .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, }, { .speed = SPEED_1000, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_1000baseKX_Full, + .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, }, { .speed = SPEED_1000, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_1000baseT_Full, + .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, }, { .speed = SPEED_1000, .duplex = DUPLEX_HALF, - .setting = SUPPORTED_1000baseT_Half, + .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, }, { .speed = SPEED_100, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_100baseT_Full, + .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, }, { .speed = SPEED_100, .duplex = DUPLEX_HALF, - .setting = SUPPORTED_100baseT_Half, + .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, }, { .speed = SPEED_10, .duplex = DUPLEX_FULL, - .setting = SUPPORTED_10baseT_Full, + .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT, }, { .speed = SPEED_10, .duplex = DUPLEX_HALF, - .setting = SUPPORTED_10baseT_Half, + .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT, }, }; @@ -267,7 +267,8 @@ static const struct phy_setting settings[] = { * phy_lookup_setting - lookup a PHY setting * @speed: speed to match * @duplex: duplex to match - * @features: allowed link modes + * @mask: allowed link modes + * @maxbit: bit size of link modes * @exact: an exact match is required * * Search the settings array for a setting that matches the speed and @@ -281,13 +282,14 @@ static const struct phy_setting settings[] = { * they all fail, %NULL will be returned. */ static const struct phy_setting * -phy_lookup_setting(int speed, int duplex, u32 features, bool exact) +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, + size_t maxbit, bool exact) { const struct phy_setting *p, *match = NULL, *last = NULL; int i; for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->setting & features) { + if (p->bit < maxbit && test_bit(p->bit, mask)) { last = p; if (p->speed == speed && p->duplex == duplex) { /* Exact match for speed and duplex */ @@ -326,7 +328,9 @@ phy_lookup_setting(int speed, int duplex, u32 features, bool exact) static const struct phy_setting * phy_find_valid(int speed, int duplex, u32 supported) { - return phy_lookup_setting(speed, duplex, supported, false); + unsigned long mask = supported; + + return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false); } /** @@ -343,12 +347,14 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, unsigned int size) { + unsigned long supported = phy->supported; unsigned int count = 0; unsigned int idx = 0; for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++) /* Assumes settings are grouped by speed */ - if ((settings[idx].setting & phy->supported) && + if (settings[idx].bit < BITS_PER_LONG && + !test_bit(settings[idx].bit, &supported) && (count == 0 || speeds[count - 1] != settings[idx].speed)) speeds[count++] = settings[idx].speed; @@ -366,7 +372,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy, */ static inline bool phy_check_valid(int speed, int duplex, u32 features) { - return !!phy_lookup_setting(speed, duplex, features, true); + unsigned long mask = features; + + return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true); } /** -- cgit v1.2.3-55-g7522 From da4625ac2637e4e5249dc08a10f8dce7643603d2 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:02:42 +0100 Subject: net: phy: split out PHY speed and duplex string generation Other code would like to make use of this, so make the speed and duplex string generation visible, and place it in a separate file. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/phy-core.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/phy/phy.c | 38 +---------------------------------- include/linux/phy.h | 3 +++ 3 files changed, 53 insertions(+), 37 deletions(-) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 6739b738bbaf..bf01a24f21ce 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -9,6 +9,55 @@ #include #include +const char *phy_speed_to_str(int speed) +{ + switch (speed) { + case SPEED_10: + return "10Mbps"; + case SPEED_100: + return "100Mbps"; + case SPEED_1000: + return "1Gbps"; + case SPEED_2500: + return "2.5Gbps"; + case SPEED_5000: + return "5Gbps"; + case SPEED_10000: + return "10Gbps"; + case SPEED_14000: + return "14Gbps"; + case SPEED_20000: + return "20Gbps"; + case SPEED_25000: + return "25Gbps"; + case SPEED_40000: + return "40Gbps"; + case SPEED_50000: + return "50Gbps"; + case SPEED_56000: + return "56Gbps"; + case SPEED_100000: + return "100Gbps"; + case SPEED_UNKNOWN: + return "Unknown"; + default: + return "Unsupported (update phy-core.c)"; + } +} +EXPORT_SYMBOL_GPL(phy_speed_to_str); + +const char *phy_duplex_to_str(unsigned int duplex) +{ + if (duplex == DUPLEX_HALF) + return "Half"; + if (duplex == DUPLEX_FULL) + return "Full"; + if (duplex == DUPLEX_UNKNOWN) + return "Unknown"; + return "Unsupported (update phy-core.c)"; +} +EXPORT_SYMBOL_GPL(phy_duplex_to_str); + static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, u16 regnum) { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index abae9167b5fc..7b7fe6beae7e 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -38,42 +38,6 @@ #include -static const char *phy_speed_to_str(int speed) -{ - switch (speed) { - case SPEED_10: - return "10Mbps"; - case SPEED_100: - return "100Mbps"; - case SPEED_1000: - return "1Gbps"; - case SPEED_2500: - return "2.5Gbps"; - case SPEED_5000: - return "5Gbps"; - case SPEED_10000: - return "10Gbps"; - case SPEED_14000: - return "14Gbps"; - case SPEED_20000: - return "20Gbps"; - case SPEED_25000: - return "25Gbps"; - case SPEED_40000: - return "40Gbps"; - case SPEED_50000: - return "50Gbps"; - case SPEED_56000: - return "56Gbps"; - case SPEED_100000: - return "100Gbps"; - case SPEED_UNKNOWN: - return "Unknown"; - default: - return "Unsupported (update phy.c)"; - } -} - #define PHY_STATE_STR(_state) \ case PHY_##_state: \ return __stringify(_state); \ @@ -109,7 +73,7 @@ void phy_print_status(struct phy_device *phydev) netdev_info(phydev->attached_dev, "Link is Up - %s/%s - flow control %s\n", phy_speed_to_str(phydev->speed), - DUPLEX_FULL == phydev->duplex ? "Full" : "Half", + phy_duplex_to_str(phydev->duplex), phydev->pause ? "rx/tx" : "off"); } else { netdev_info(phydev->attached_dev, "Link is Down\n"); diff --git a/include/linux/phy.h b/include/linux/phy.h index 0bb5b212ab42..e8264c78b75b 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -667,6 +667,9 @@ struct phy_fixup { int (*run)(struct phy_device *phydev); }; +const char *phy_speed_to_str(int speed); +const char *phy_duplex_to_str(unsigned int duplex); + /** * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. -- cgit v1.2.3-55-g7522 From 0ccb4fc65d2799a315d5ee8732d75f35a114379c Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:02:47 +0100 Subject: net: phy: move phy_lookup_setting() and guts of phy_supported_speeds() to phy-core phy_lookup_setting() provides useful functionality in ethtool code outside phylib. Move it to phy-core and allow it to be re-used (eg, in phylink) rather than duplicated elsewhere. Note that this supports the larger linkmode space. As we move the phy settings table, we also need to move the guts of phy_supported_speeds() as well. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-core.c | 126 +++++++++++++++++++++++++++++++++++++++++++ drivers/net/phy/phy.c | 130 +-------------------------------------------- include/linux/phy.h | 15 ++++++ 3 files changed, 142 insertions(+), 129 deletions(-) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index bf01a24f21ce..c07845e77004 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -58,6 +58,132 @@ const char *phy_duplex_to_str(unsigned int duplex) } EXPORT_SYMBOL_GPL(phy_duplex_to_str); +/* A mapping of all SUPPORTED settings to speed/duplex. This table + * must be grouped by speed and sorted in descending match priority + * - iow, descending speed. */ +static const struct phy_setting settings[] = { + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + }, + { + .speed = SPEED_2500, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + }, + { + .speed = SPEED_100, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, + }, + { + .speed = SPEED_100, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, + }, + { + .speed = SPEED_10, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT, + }, + { + .speed = SPEED_10, + .duplex = DUPLEX_HALF, + .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT, + }, +}; + +/** + * phy_lookup_setting - lookup a PHY setting + * @speed: speed to match + * @duplex: duplex to match + * @mask: allowed link modes + * @maxbit: bit size of link modes + * @exact: an exact match is required + * + * Search the settings array for a setting that matches the speed and + * duplex, and which is supported. + * + * If @exact is unset, either an exact match or %NULL for no match will + * be returned. + * + * If @exact is set, an exact match, the fastest supported setting at + * or below the specified speed, the slowest supported setting, or if + * they all fail, %NULL will be returned. + */ +const struct phy_setting * +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, + size_t maxbit, bool exact) +{ + const struct phy_setting *p, *match = NULL, *last = NULL; + int i; + + for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { + if (p->bit < maxbit && test_bit(p->bit, mask)) { + last = p; + if (p->speed == speed && p->duplex == duplex) { + /* Exact match for speed and duplex */ + match = p; + break; + } else if (!exact) { + if (!match && p->speed <= speed) + /* Candidate */ + match = p; + + if (p->speed < speed) + break; + } + } + } + + if (!match && !exact) + match = last; + + return match; +} +EXPORT_SYMBOL_GPL(phy_lookup_setting); + +size_t phy_speeds(unsigned int *speeds, size_t size, + unsigned long *mask, size_t maxbit) +{ + size_t count; + int i; + + for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++) + if (settings[i].bit < maxbit && + test_bit(settings[i].bit, mask) && + (count == 0 || speeds[count - 1] != settings[i].speed)) + speeds[count++] = settings[i].speed; + + return count; +} + static void mmd_phy_indirect(struct mii_bus *bus, int phy_addr, int devad, u16 regnum) { diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7b7fe6beae7e..71c64a774856 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -157,125 +157,6 @@ int phy_aneg_done(struct phy_device *phydev) } EXPORT_SYMBOL(phy_aneg_done); -/* A structure for mapping a particular speed and duplex - * combination to a particular SUPPORTED and ADVERTISED value - */ -struct phy_setting { - int speed; - int duplex; - int bit; -}; - -/* A mapping of all SUPPORTED settings to speed/duplex. This table - * must be grouped by speed and sorted in descending match priority - * - iow, descending speed. */ -static const struct phy_setting settings[] = { - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, - }, - { - .speed = SPEED_10000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, - }, - { - .speed = SPEED_2500, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT, - }, - { - .speed = SPEED_1000, - .duplex = DUPLEX_HALF, - .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, - }, - { - .speed = SPEED_100, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, - }, - { - .speed = SPEED_100, - .duplex = DUPLEX_HALF, - .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, - }, - { - .speed = SPEED_10, - .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT, - }, - { - .speed = SPEED_10, - .duplex = DUPLEX_HALF, - .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT, - }, -}; - -/** - * phy_lookup_setting - lookup a PHY setting - * @speed: speed to match - * @duplex: duplex to match - * @mask: allowed link modes - * @maxbit: bit size of link modes - * @exact: an exact match is required - * - * Search the settings array for a setting that matches the speed and - * duplex, and which is supported. - * - * If @exact is unset, either an exact match or %NULL for no match will - * be returned. - * - * If @exact is set, an exact match, the fastest supported setting at - * or below the specified speed, the slowest supported setting, or if - * they all fail, %NULL will be returned. - */ -static const struct phy_setting * -phy_lookup_setting(int speed, int duplex, const unsigned long *mask, - size_t maxbit, bool exact) -{ - const struct phy_setting *p, *match = NULL, *last = NULL; - int i; - - for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->bit < maxbit && test_bit(p->bit, mask)) { - last = p; - if (p->speed == speed && p->duplex == duplex) { - /* Exact match for speed and duplex */ - match = p; - break; - } else if (!exact) { - if (!match && p->speed <= speed) - /* Candidate */ - match = p; - - if (p->speed < speed) - break; - } - } - } - - if (!match && !exact) - match = last; - - return match; -} - /** * phy_find_valid - find a PHY setting that matches the requested parameters * @speed: desired speed @@ -312,17 +193,8 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int size) { unsigned long supported = phy->supported; - unsigned int count = 0; - unsigned int idx = 0; - - for (idx = 0; idx < ARRAY_SIZE(settings) && count < size; idx++) - /* Assumes settings are grouped by speed */ - if (settings[idx].bit < BITS_PER_LONG && - !test_bit(settings[idx].bit, &supported) && - (count == 0 || speeds[count - 1] != settings[idx].speed)) - speeds[count++] = settings[idx].speed; - return count; + return phy_speeds(speeds, size, &supported, BITS_PER_LONG); } /** diff --git a/include/linux/phy.h b/include/linux/phy.h index e8264c78b75b..8a280257778c 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -670,6 +670,21 @@ struct phy_fixup { const char *phy_speed_to_str(int speed); const char *phy_duplex_to_str(unsigned int duplex); +/* A structure for mapping a particular speed and duplex + * combination to a particular SUPPORTED and ADVERTISED value + */ +struct phy_setting { + u32 speed; + u8 duplex; + u8 bit; +}; + +const struct phy_setting * +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, + size_t maxbit, bool exact); +size_t phy_speeds(unsigned int *speeds, size_t size, + unsigned long *mask, size_t maxbit); + /** * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. -- cgit v1.2.3-55-g7522 From 1f3645bb41f2e2b15fe5799476284bf2cd138722 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:02:52 +0100 Subject: net: phy: add 1000Base-X to phy settings table Add the missing 1000Base-X entry to the phy settings table. This was not included because the original code could not cope with more than 32 bits of link mode mask. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phy-core.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index c07845e77004..21f75ae244b3 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -87,6 +87,11 @@ static const struct phy_setting settings[] = { .duplex = DUPLEX_FULL, .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, }, + { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + }, { .speed = SPEED_1000, .duplex = DUPLEX_FULL, -- cgit v1.2.3-55-g7522 From a81497bee70eb15039594b3116913133aa9c9b29 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:02:58 +0100 Subject: net: phy: provide a hook for link up/link down events Sometimes, we need to do additional work between the PHY coming up and marking the carrier present - for example, we may need to wait for the PHY to MAC link to finish negotiation. This changes phylib to provide a notification function pointer which avoids the built-in netif_carrier_on() and netif_carrier_off() functions. Standard ->adjust_link functionality is provided by hooking a helper into the new ->phy_link_change method. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 42 ++++++++++++++++++++---------------------- drivers/net/phy/phy_device.c | 14 ++++++++++++++ include/linux/phy.h | 1 + 3 files changed, 35 insertions(+), 22 deletions(-) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 71c64a774856..d5f2af2c5ddb 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -865,9 +865,15 @@ void phy_start(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start); -static void phy_adjust_link(struct phy_device *phydev) +static void phy_link_up(struct phy_device *phydev) { - phydev->adjust_link(phydev->attached_dev); + phydev->phy_link_change(phydev, true, true); + phy_led_trigger_change_speed(phydev); +} + +static void phy_link_down(struct phy_device *phydev, bool do_carrier) +{ + phydev->phy_link_change(phydev, false, do_carrier); phy_led_trigger_change_speed(phydev); } @@ -912,8 +918,7 @@ void phy_state_machine(struct work_struct *work) /* If the link is down, give up on negotiation for now */ if (!phydev->link) { phydev->state = PHY_NOLINK; - netif_carrier_off(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_down(phydev, true); break; } @@ -925,9 +930,7 @@ void phy_state_machine(struct work_struct *work) /* If AN is done, we're running */ if (err > 0) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); - phy_adjust_link(phydev); - + phy_link_up(phydev); } else if (0 == phydev->link_timeout--) needs_aneg = true; break; @@ -952,8 +955,7 @@ void phy_state_machine(struct work_struct *work) } } phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_up(phydev); } break; case PHY_FORCING: @@ -963,13 +965,12 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { if (0 == phydev->link_timeout--) needs_aneg = true; + phy_link_down(phydev, false); } - - phy_adjust_link(phydev); break; case PHY_RUNNING: /* Only register a CHANGE if we are polling and link changed @@ -1001,14 +1002,12 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; - netif_carrier_off(phydev->attached_dev); + phy_link_down(phydev, true); } - phy_adjust_link(phydev); - if (phy_interrupt_is_valid(phydev)) err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED); @@ -1016,8 +1015,7 @@ void phy_state_machine(struct work_struct *work) case PHY_HALTED: if (phydev->link) { phydev->link = 0; - netif_carrier_off(phydev->attached_dev); - phy_adjust_link(phydev); + phy_link_down(phydev, true); do_suspend = true; } break; @@ -1037,11 +1035,11 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; + phy_link_down(phydev, false); } - phy_adjust_link(phydev); } else { phydev->state = PHY_AN; phydev->link_timeout = PHY_AN_TIMEOUT; @@ -1053,11 +1051,11 @@ void phy_state_machine(struct work_struct *work) if (phydev->link) { phydev->state = PHY_RUNNING; - netif_carrier_on(phydev->attached_dev); + phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; + phy_link_down(phydev, false); } - phy_adjust_link(phydev); } break; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1790f7fec125..d536a9a7cd2b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -688,6 +688,19 @@ struct phy_device *phy_find_first(struct mii_bus *bus) } EXPORT_SYMBOL(phy_find_first); +static void phy_link_change(struct phy_device *phydev, bool up, bool do_carrier) +{ + struct net_device *netdev = phydev->attached_dev; + + if (do_carrier) { + if (up) + netif_carrier_on(netdev); + else + netif_carrier_off(netdev); + } + phydev->adjust_link(netdev); +} + /** * phy_prepare_link - prepares the PHY layer to monitor link status * @phydev: target phy_device struct @@ -951,6 +964,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, goto error; } + phydev->phy_link_change = phy_link_change; phydev->attached_dev = dev; dev->phydev = phydev; diff --git a/include/linux/phy.h b/include/linux/phy.h index 8a280257778c..0a5e8e62c9e0 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -474,6 +474,7 @@ struct phy_device { u8 mdix; u8 mdix_ctrl; + void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier); void (*adjust_link)(struct net_device *dev); }; #define to_phy_device(d) container_of(to_mdio_device(d), \ -- cgit v1.2.3-55-g7522 From 5e5758d9d84256d55da831e940276c3216997f3e Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:03 +0100 Subject: net: phy: export phy_start_machine() for phylink phylink will need phy_start_machine exported, so lets export it as a GPL symbol. Documentation/networking/phy.txt indicates that this should be a PHY API function. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index d5f2af2c5ddb..dae13f028c84 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -556,6 +556,7 @@ void phy_start_machine(struct phy_device *phydev) { queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ); } +EXPORT_SYMBOL_GPL(phy_start_machine); /** * phy_trigger_machine - trigger the state machine to run -- cgit v1.2.3-55-g7522 From 453d00defba502a48e3f9a218a519b233ff83d16 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:08 +0100 Subject: net: phy: add I2C mdio bus Add an I2C MDIO bus bridge library, to allow phylib to access PHYs which are connected to an I2C bus instead of the more conventional MDIO bus. Such PHYs can be found in SFP adapters and SFF modules. Since PHYs appear at I2C bus address 0x40..0x5f, and 0x50/0x51 are reserved for SFP EEPROMs/diagnostics, we must not allow the MDIO bus to access these I2C addresses. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 10 +++++ drivers/net/phy/Makefile | 1 + drivers/net/phy/mdio-i2c.c | 109 +++++++++++++++++++++++++++++++++++++++++++++ drivers/net/phy/mdio-i2c.h | 19 ++++++++ 4 files changed, 139 insertions(+) create mode 100644 drivers/net/phy/mdio-i2c.c create mode 100644 drivers/net/phy/mdio-i2c.h diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 928fd892f167..a1d6fdba8980 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -106,6 +106,16 @@ config MDIO_HISI_FEMAC This module provides a driver for the MDIO busses found in the Hisilicon SoC that have an Fast Ethernet MAC. +config MDIO_I2C + tristate + depends on I2C + help + Support I2C based PHYs. This provides a MDIO bus bridged + to I2C to allow PHYs connected in I2C mode to be accessed + using the existing infrastructure. + + This is library mode. + config MDIO_MOXART tristate "MOXA ART MDIO interface support" depends on ARCH_MOXART diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 8e9b9f349384..113e8d525c5e 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -30,6 +30,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o +obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/phy/mdio-i2c.c new file mode 100644 index 000000000000..6d24fd13ca86 --- /dev/null +++ b/drivers/net/phy/mdio-i2c.c @@ -0,0 +1,109 @@ +/* + * MDIO I2C bridge + * + * Copyright (C) 2015-2016 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Network PHYs can appear on I2C buses when they are part of SFP module. + * This driver exposes these PHYs to the networking PHY code, allowing + * our PHY drivers access to these PHYs, and so allowing configuration + * of their settings. + */ +#include +#include + +#include "mdio-i2c.h" + +/* + * I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is + * specified to be present in SFP modules. These correspond with PHY + * addresses 16 and 17. Disallow access to these "phy" addresses. + */ +static bool i2c_mii_valid_phy_id(int phy_id) +{ + return phy_id != 0x10 && phy_id != 0x11; +} + +static unsigned int i2c_mii_phy_addr(int phy_id) +{ + return phy_id + 0x40; +} + +static int i2c_mii_read(struct mii_bus *bus, int phy_id, int reg) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msgs[2]; + u8 data[2], dev_addr = reg; + int bus_addr, ret; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0xffff; + + bus_addr = i2c_mii_phy_addr(phy_id); + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &dev_addr; + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = sizeof(data); + msgs[1].buf = data; + + ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); + if (ret != ARRAY_SIZE(msgs)) + return 0xffff; + + return data[0] << 8 | data[1]; +} + +static int i2c_mii_write(struct mii_bus *bus, int phy_id, int reg, u16 val) +{ + struct i2c_adapter *i2c = bus->priv; + struct i2c_msg msg; + int ret; + u8 data[3]; + + if (!i2c_mii_valid_phy_id(phy_id)) + return 0; + + data[0] = reg; + data[1] = val >> 8; + data[2] = val; + + msg.addr = i2c_mii_phy_addr(phy_id); + msg.flags = 0; + msg.len = 3; + msg.buf = data; + + ret = i2c_transfer(i2c, &msg, 1); + + return ret < 0 ? ret : 0; +} + +struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c) +{ + struct mii_bus *mii; + + if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return ERR_PTR(-EINVAL); + + mii = mdiobus_alloc(); + if (!mii) + return ERR_PTR(-ENOMEM); + + snprintf(mii->id, MII_BUS_ID_SIZE, "i2c:%s", dev_name(parent)); + mii->parent = parent; + mii->read = i2c_mii_read; + mii->write = i2c_mii_write; + mii->priv = i2c; + + return mii; +} +EXPORT_SYMBOL_GPL(mdio_i2c_alloc); + +MODULE_AUTHOR("Russell King"); +MODULE_DESCRIPTION("MDIO I2C bridge library"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/mdio-i2c.h b/drivers/net/phy/mdio-i2c.h new file mode 100644 index 000000000000..889ab57d7f3e --- /dev/null +++ b/drivers/net/phy/mdio-i2c.h @@ -0,0 +1,19 @@ +/* + * MDIO I2C bridge + * + * Copyright (C) 2015 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef MDIO_I2C_H +#define MDIO_I2C_H + +struct device; +struct i2c_adapter; +struct mii_bus; + +struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c); + +#endif -- cgit v1.2.3-55-g7522 From 9525ae83959b60c6061fe2f2caabdc8f69a48bc6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:13 +0100 Subject: phylink: add phylink infrastructure The link between the ethernet MAC and its PHY has become more complex as the interface evolves. This is especially true with serdes links, where the part of the PHY is effectively integrated into the MAC. Serdes links can be connected to a variety of devices, including SFF modules soldered down onto the board with the MAC, a SFP cage with a hotpluggable SFP module which may contain a PHY or directly modulate the serdes signals onto optical media with or without a PHY, or even a classical PHY connection. Moreover, the negotiation information on serdes links comes in two varieties - SGMII mode, where the PHY provides its speed/duplex/flow control information to the MAC, and 1000base-X mode where both ends exchange their abilities and each resolve the link capabilities. This means we need a more flexible means to support these arrangements, particularly with the hotpluggable nature of SFP, where the PHY can be attached or detached after the network device has been brought up. Ethtool information can come from multiple sources: - we may have a PHY operating in either SGMII or 1000base-X mode, in which case we take ethtool/mii data directly from the PHY. - we may have a optical SFP module without a PHY, with the MAC operating in 1000base-X mode - the ethtool/mii data needs to come from the MAC. - we may have a copper SFP module with a PHY whic can't be accessed, which means we need to take ethtool/mii data from the MAC. Phylink aims to solve this by providing an intermediary between the MAC and PHY, providing a safe way for PHYs to be hotplugged, and allowing a SFP driver to reconfigure the serdes connection. Phylink also takes over support of fixed link connections, where the speed/duplex/flow control are fixed, but link status may be controlled by a GPIO signal. By avoiding the fixed-phy implementation, phylink can provide a faster response to link events: fixed-phy has to wait for phylib to operate its state machine, which can take several seconds. In comparison, phylink takes milliseconds. Signed-off-by: Russell King - remove sync status - rework supported and advertisment handling - add 1000base-x speed for fixed links - use functionality exported from phy-core, reworking __phylink_ethtool_ksettings_set for it Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 10 + drivers/net/phy/Makefile | 1 + drivers/net/phy/phy_device.c | 1 + drivers/net/phy/phylink.c | 1169 ++++++++++++++++++++++++++++++++++++++++++ include/linux/phy.h | 2 + include/linux/phylink.h | 145 ++++++ 6 files changed, 1328 insertions(+) create mode 100644 drivers/net/phy/phylink.c create mode 100644 include/linux/phylink.h diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index a1d6fdba8980..a0a9e03e2f80 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -169,6 +169,16 @@ menuconfig PHYLIB devices. This option provides infrastructure for managing PHY devices. +config PHYLINK + tristate + depends on NETDEVICES + select PHYLIB + select SWPHY + help + PHYlink models the link between the PHY and MAC, allowing fixed + configuration links, PHYs, and Serdes links with MAC level + autonegotiation modes. + if PHYLIB config SWPHY diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 113e8d525c5e..c43e5b99fda4 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -18,6 +18,7 @@ endif libphy-$(CONFIG_SWPHY) += swphy.o libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o +obj-$(CONFIG_PHYLINK) += phylink.o obj-$(CONFIG_PHYLIB) += libphy.o obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d536a9a7cd2b..9493fb369682 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1084,6 +1084,7 @@ void phy_detach(struct phy_device *phydev) phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; phy_suspend(phydev); + phydev->phylink = NULL; phy_led_triggers_unregister(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c new file mode 100644 index 000000000000..af61d7d400af --- /dev/null +++ b/drivers/net/phy/phylink.c @@ -0,0 +1,1169 @@ +/* + * phylink models the MAC to optional PHY connection, supporting + * technologies such as SFP cages where the PHY is hot-pluggable. + * + * Copyright (C) 2015 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "swphy.h" + +#define SUPPORTED_INTERFACES \ + (SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE | \ + SUPPORTED_BNC | SUPPORTED_AUI | SUPPORTED_Backplane) +#define ADVERTISED_INTERFACES \ + (ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_FIBRE | \ + ADVERTISED_BNC | ADVERTISED_AUI | ADVERTISED_Backplane) + +enum { + PHYLINK_DISABLE_STOPPED, +}; + +struct phylink { + struct net_device *netdev; + const struct phylink_mac_ops *ops; + + unsigned long phylink_disable_state; /* bitmask of disables */ + struct phy_device *phydev; + phy_interface_t link_interface; /* PHY_INTERFACE_xxx */ + u8 link_an_mode; /* MLO_AN_xxx */ + u8 link_port; /* The current non-phy ethtool port */ + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + + /* The link configuration settings */ + struct phylink_link_state link_config; + struct gpio_desc *link_gpio; + + struct mutex state_mutex; + struct phylink_link_state phy_state; + struct work_struct resolve; + + bool mac_link_dropped; +}; + +static inline void linkmode_zero(unsigned long *dst) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_copy(unsigned long *dst, const unsigned long *src) +{ + bitmap_copy(dst, src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_and(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_and(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline void linkmode_or(unsigned long *dst, const unsigned long *a, + const unsigned long *b) +{ + bitmap_or(dst, a, b, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static inline bool linkmode_empty(const unsigned long *src) +{ + return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +void phylink_set_port_modes(unsigned long *mask) +{ + phylink_set(mask, TP); + phylink_set(mask, AUI); + phylink_set(mask, MII); + phylink_set(mask, FIBRE); + phylink_set(mask, BNC); + phylink_set(mask, Backplane); +} +EXPORT_SYMBOL_GPL(phylink_set_port_modes); + +static int phylink_is_empty_linkmode(const unsigned long *linkmode) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, }; + + phylink_set_port_modes(tmp); + phylink_set(tmp, Autoneg); + phylink_set(tmp, Pause); + phylink_set(tmp, Asym_Pause); + + bitmap_andnot(tmp, linkmode, tmp, __ETHTOOL_LINK_MODE_MASK_NBITS); + + return linkmode_empty(tmp); +} + +static const char *phylink_an_mode_str(unsigned int mode) +{ + static const char *modestr[] = { + [MLO_AN_PHY] = "phy", + [MLO_AN_FIXED] = "fixed", + [MLO_AN_SGMII] = "SGMII", + [MLO_AN_8023Z] = "802.3z", + }; + + return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown"; +} + +static int phylink_validate(struct phylink *pl, unsigned long *supported, + struct phylink_link_state *state) +{ + pl->ops->validate(pl->netdev, supported, state); + + return phylink_is_empty_linkmode(supported) ? -EINVAL : 0; +} + +static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np) +{ + struct device_node *fixed_node; + const struct phy_setting *s; + struct gpio_desc *desc; + const __be32 *fixed_prop; + u32 speed; + int ret, len; + + fixed_node = of_get_child_by_name(np, "fixed-link"); + if (fixed_node) { + ret = of_property_read_u32(fixed_node, "speed", &speed); + + pl->link_config.speed = speed; + pl->link_config.duplex = DUPLEX_HALF; + + if (of_property_read_bool(fixed_node, "full-duplex")) + pl->link_config.duplex = DUPLEX_FULL; + + /* We treat the "pause" and "asym-pause" terminology as + * defining the link partner's ability. */ + if (of_property_read_bool(fixed_node, "pause")) + pl->link_config.pause |= MLO_PAUSE_SYM; + if (of_property_read_bool(fixed_node, "asym-pause")) + pl->link_config.pause |= MLO_PAUSE_ASYM; + + if (ret == 0) { + desc = fwnode_get_named_gpiod(&fixed_node->fwnode, + "link-gpios", 0, + GPIOD_IN, "?"); + + if (!IS_ERR(desc)) + pl->link_gpio = desc; + else if (desc == ERR_PTR(-EPROBE_DEFER)) + ret = -EPROBE_DEFER; + } + of_node_put(fixed_node); + + if (ret) + return ret; + } else { + fixed_prop = of_get_property(np, "fixed-link", &len); + if (!fixed_prop) { + netdev_err(pl->netdev, "broken fixed-link?\n"); + return -EINVAL; + } + if (len == 5 * sizeof(*fixed_prop)) { + pl->link_config.duplex = be32_to_cpu(fixed_prop[1]) ? + DUPLEX_FULL : DUPLEX_HALF; + pl->link_config.speed = be32_to_cpu(fixed_prop[2]); + if (be32_to_cpu(fixed_prop[3])) + pl->link_config.pause |= MLO_PAUSE_SYM; + if (be32_to_cpu(fixed_prop[4])) + pl->link_config.pause |= MLO_PAUSE_ASYM; + } + } + + if (pl->link_config.speed > SPEED_1000 && + pl->link_config.duplex != DUPLEX_FULL) + netdev_warn(pl->netdev, "fixed link specifies half duplex for %dMbps link?\n", + pl->link_config.speed); + + bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(pl->link_config.advertising, pl->supported); + phylink_validate(pl, pl->supported, &pl->link_config); + + s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex, + pl->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, true); + linkmode_zero(pl->supported); + phylink_set(pl->supported, MII); + if (s) { + __set_bit(s->bit, pl->supported); + } else { + netdev_warn(pl->netdev, "fixed link %s duplex %dMbps not recognised\n", + pl->link_config.duplex == DUPLEX_FULL ? "full" : "half", + pl->link_config.speed); + } + + linkmode_and(pl->link_config.advertising, pl->link_config.advertising, + pl->supported); + + pl->link_config.link = 1; + pl->link_config.an_complete = 1; + + return 0; +} + +static int phylink_parse_mode(struct phylink *pl, struct device_node *np) +{ + struct device_node *dn; + const char *managed; + + dn = of_get_child_by_name(np, "fixed-link"); + if (dn || of_find_property(np, "fixed-link", NULL)) + pl->link_an_mode = MLO_AN_FIXED; + of_node_put(dn); + + if (of_property_read_string(np, "managed", &managed) == 0 && + strcmp(managed, "in-band-status") == 0) { + if (pl->link_an_mode == MLO_AN_FIXED) { + netdev_err(pl->netdev, + "can't use both fixed-link and in-band-status\n"); + return -EINVAL; + } + + linkmode_zero(pl->supported); + phylink_set(pl->supported, MII); + phylink_set(pl->supported, Autoneg); + phylink_set(pl->supported, Asym_Pause); + phylink_set(pl->supported, Pause); + pl->link_config.an_enabled = true; + + switch (pl->link_config.interface) { + case PHY_INTERFACE_MODE_SGMII: + phylink_set(pl->supported, 10baseT_Half); + phylink_set(pl->supported, 10baseT_Full); + phylink_set(pl->supported, 100baseT_Half); + phylink_set(pl->supported, 100baseT_Full); + phylink_set(pl->supported, 1000baseT_Half); + phylink_set(pl->supported, 1000baseT_Full); + pl->link_an_mode = MLO_AN_SGMII; + break; + + case PHY_INTERFACE_MODE_1000BASEX: + phylink_set(pl->supported, 1000baseX_Full); + pl->link_an_mode = MLO_AN_8023Z; + break; + + case PHY_INTERFACE_MODE_2500BASEX: + phylink_set(pl->supported, 2500baseX_Full); + pl->link_an_mode = MLO_AN_8023Z; + break; + + default: + netdev_err(pl->netdev, + "incorrect link mode %s for in-band status\n", + phy_modes(pl->link_config.interface)); + return -EINVAL; + } + + linkmode_copy(pl->link_config.advertising, pl->supported); + + if (phylink_validate(pl, pl->supported, &pl->link_config)) { + netdev_err(pl->netdev, + "failed to validate link configuration for in-band status\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phylink_mac_config(struct phylink *pl, + const struct phylink_link_state *state) +{ + netdev_dbg(pl->netdev, + "%s: mode=%s/%s/%s/%s adv=%*pb pause=%02x link=%u an=%u\n", + __func__, phylink_an_mode_str(pl->link_an_mode), + phy_modes(state->interface), + phy_speed_to_str(state->speed), + phy_duplex_to_str(state->duplex), + __ETHTOOL_LINK_MODE_MASK_NBITS, state->advertising, + state->pause, state->link, state->an_enabled); + + pl->ops->mac_config(pl->netdev, pl->link_an_mode, state); +} + +static void phylink_mac_an_restart(struct phylink *pl) +{ + if (pl->link_config.an_enabled && + (pl->link_config.interface == PHY_INTERFACE_MODE_1000BASEX || + pl->link_config.interface == PHY_INTERFACE_MODE_2500BASEX)) + pl->ops->mac_an_restart(pl->netdev); +} + +static int phylink_get_mac_state(struct phylink *pl, struct phylink_link_state *state) +{ + struct net_device *ndev = pl->netdev; + + linkmode_copy(state->advertising, pl->link_config.advertising); + linkmode_zero(state->lp_advertising); + state->interface = pl->link_config.interface; + state->an_enabled = pl->link_config.an_enabled; + state->link = 1; + + return pl->ops->mac_link_state(ndev, state); +} + +/* The fixed state is... fixed except for the link state, + * which may be determined by a GPIO. + */ +static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_state *state) +{ + *state = pl->link_config; + if (pl->link_gpio) + state->link = !!gpiod_get_value(pl->link_gpio); +} + +/* Flow control is resolved according to our and the link partners + * advertisments using the following drawn from the 802.3 specs: + * Local device Link partner + * Pause AsymDir Pause AsymDir Result + * 1 X 1 X TX+RX + * 0 1 1 1 RX + * 1 1 0 1 TX + */ +static void phylink_resolve_flow(struct phylink *pl, + struct phylink_link_state *state) +{ + int new_pause = 0; + + if (pl->link_config.pause & MLO_PAUSE_AN) { + int pause = 0; + + if (phylink_test(pl->link_config.advertising, Pause)) + pause |= MLO_PAUSE_SYM; + if (phylink_test(pl->link_config.advertising, Asym_Pause)) + pause |= MLO_PAUSE_ASYM; + + pause &= state->pause; + + if (pause & MLO_PAUSE_SYM) + new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; + else if (pause & MLO_PAUSE_ASYM) + new_pause = state->pause & MLO_PAUSE_SYM ? + MLO_PAUSE_RX : MLO_PAUSE_TX; + } else { + new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; + } + + state->pause &= ~MLO_PAUSE_TXRX_MASK; + state->pause |= new_pause; +} + +static const char *phylink_pause_to_str(int pause) +{ + switch (pause & MLO_PAUSE_TXRX_MASK) { + case MLO_PAUSE_TX | MLO_PAUSE_RX: + return "rx/tx"; + case MLO_PAUSE_TX: + return "tx"; + case MLO_PAUSE_RX: + return "rx"; + default: + return "off"; + } +} + +static void phylink_resolve(struct work_struct *w) +{ + struct phylink *pl = container_of(w, struct phylink, resolve); + struct phylink_link_state link_state; + struct net_device *ndev = pl->netdev; + + mutex_lock(&pl->state_mutex); + if (pl->phylink_disable_state) { + pl->mac_link_dropped = false; + link_state.link = false; + } else if (pl->mac_link_dropped) { + link_state.link = false; + } else { + switch (pl->link_an_mode) { + case MLO_AN_PHY: + link_state = pl->phy_state; + phylink_resolve_flow(pl, &link_state); + phylink_mac_config(pl, &link_state); + break; + + case MLO_AN_FIXED: + phylink_get_fixed_state(pl, &link_state); + phylink_mac_config(pl, &link_state); + break; + + case MLO_AN_SGMII: + phylink_get_mac_state(pl, &link_state); + if (pl->phydev) { + bool changed = false; + + link_state.link = link_state.link && + pl->phy_state.link; + + if (pl->phy_state.interface != + link_state.interface) { + link_state.interface = pl->phy_state.interface; + changed = true; + } + + /* Propagate the flow control from the PHY + * to the MAC. Also propagate the interface + * if changed. + */ + if (pl->phy_state.link || changed) { + link_state.pause |= pl->phy_state.pause; + phylink_resolve_flow(pl, &link_state); + + phylink_mac_config(pl, &link_state); + } + } + break; + + case MLO_AN_8023Z: + phylink_get_mac_state(pl, &link_state); + break; + } + } + + if (link_state.link != netif_carrier_ok(ndev)) { + if (!link_state.link) { + netif_carrier_off(ndev); + pl->ops->mac_link_down(ndev, pl->link_an_mode); + netdev_info(ndev, "Link is Down\n"); + } else { + pl->ops->mac_link_up(ndev, pl->link_an_mode, + pl->phydev); + + netif_carrier_on(ndev); + + netdev_info(ndev, + "Link is Up - %s/%s - flow control %s\n", + phy_speed_to_str(link_state.speed), + phy_duplex_to_str(link_state.duplex), + phylink_pause_to_str(link_state.pause)); + } + } + if (!link_state.link && pl->mac_link_dropped) { + pl->mac_link_dropped = false; + queue_work(system_power_efficient_wq, &pl->resolve); + } + mutex_unlock(&pl->state_mutex); +} + +static void phylink_run_resolve(struct phylink *pl) +{ + if (!pl->phylink_disable_state) + queue_work(system_power_efficient_wq, &pl->resolve); +} + +struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, + phy_interface_t iface, const struct phylink_mac_ops *ops) +{ + struct phylink *pl; + int ret; + + pl = kzalloc(sizeof(*pl), GFP_KERNEL); + if (!pl) + return ERR_PTR(-ENOMEM); + + mutex_init(&pl->state_mutex); + INIT_WORK(&pl->resolve, phylink_resolve); + pl->netdev = ndev; + pl->phy_state.interface = iface; + pl->link_interface = iface; + pl->link_port = PORT_MII; + pl->link_config.interface = iface; + pl->link_config.pause = MLO_PAUSE_AN; + pl->link_config.speed = SPEED_UNKNOWN; + pl->link_config.duplex = DUPLEX_UNKNOWN; + pl->ops = ops; + __set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + + bitmap_fill(pl->supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + linkmode_copy(pl->link_config.advertising, pl->supported); + phylink_validate(pl, pl->supported, &pl->link_config); + + ret = phylink_parse_mode(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + + if (pl->link_an_mode == MLO_AN_FIXED) { + ret = phylink_parse_fixedlink(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + } + + return pl; +} +EXPORT_SYMBOL_GPL(phylink_create); + +void phylink_destroy(struct phylink *pl) +{ + cancel_work_sync(&pl->resolve); + kfree(pl); +} +EXPORT_SYMBOL_GPL(phylink_destroy); + +void phylink_phy_change(struct phy_device *phydev, bool up, bool do_carrier) +{ + struct phylink *pl = phydev->phylink; + + mutex_lock(&pl->state_mutex); + pl->phy_state.speed = phydev->speed; + pl->phy_state.duplex = phydev->duplex; + pl->phy_state.pause = MLO_PAUSE_NONE; + if (phydev->pause) + pl->phy_state.pause |= MLO_PAUSE_SYM; + if (phydev->asym_pause) + pl->phy_state.pause |= MLO_PAUSE_ASYM; + pl->phy_state.interface = phydev->interface; + pl->phy_state.link = up; + mutex_unlock(&pl->state_mutex); + + phylink_run_resolve(pl); + + netdev_dbg(pl->netdev, "phy link %s %s/%s/%s\n", up ? "up" : "down", + phy_modes(phydev->interface), + phy_speed_to_str(phydev->speed), + phy_duplex_to_str(phydev->duplex)); +} + +static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) +{ + struct phylink_link_state config; + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + u32 advertising; + int ret; + + memset(&config, 0, sizeof(config)); + ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported); + ethtool_convert_legacy_u32_to_link_mode(config.advertising, + phy->advertising); + config.interface = pl->link_config.interface; + + /* + * This is the new way of dealing with flow control for PHYs, + * as described by Timur Tabi in commit 529ed1275263 ("net: phy: + * phy drivers should not set SUPPORTED_[Asym_]Pause") except + * using our validate call to the MAC, we rely upon the MAC + * clearing the bits from both supported and advertising fields. + */ + if (phylink_test(supported, Pause)) + phylink_set(config.advertising, Pause); + if (phylink_test(supported, Asym_Pause)) + phylink_set(config.advertising, Asym_Pause); + + ret = phylink_validate(pl, supported, &config); + if (ret) + return ret; + + phy->phylink = pl; + phy->phy_link_change = phylink_phy_change; + + netdev_info(pl->netdev, + "PHY [%s] driver [%s]\n", dev_name(&phy->mdio.dev), + phy->drv->name); + + mutex_lock(&phy->lock); + mutex_lock(&pl->state_mutex); + pl->netdev->phydev = phy; + pl->phydev = phy; + linkmode_copy(pl->supported, supported); + linkmode_copy(pl->link_config.advertising, config.advertising); + + /* Restrict the phy advertisment according to the MAC support. */ + ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising); + phy->advertising = advertising; + mutex_unlock(&pl->state_mutex); + mutex_unlock(&phy->lock); + + netdev_dbg(pl->netdev, + "phy: setting supported %*pb advertising 0x%08x\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, + phy->advertising); + + phy_start_machine(phy); + if (phy->irq > 0) + phy_start_interrupts(phy); + + return 0; +} + +int phylink_connect_phy(struct phylink *pl, struct phy_device *phy) +{ + int ret; + + ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface); + if (ret) + return ret; + + ret = phylink_bringup_phy(pl, phy); + if (ret) + phy_detach(phy); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_connect_phy); + +int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn) +{ + struct device_node *phy_node; + struct phy_device *phy_dev; + int ret; + + /* Fixed links are handled without needing a PHY */ + if (pl->link_an_mode == MLO_AN_FIXED) + return 0; + + phy_node = of_parse_phandle(dn, "phy-handle", 0); + if (!phy_node) + phy_node = of_parse_phandle(dn, "phy", 0); + if (!phy_node) + phy_node = of_parse_phandle(dn, "phy-device", 0); + + if (!phy_node) { + if (pl->link_an_mode == MLO_AN_PHY) { + netdev_err(pl->netdev, "unable to find PHY node\n"); + return -ENODEV; + } + return 0; + } + + phy_dev = of_phy_attach(pl->netdev, phy_node, 0, pl->link_interface); + /* We're done with the phy_node handle */ + of_node_put(phy_node); + + if (!phy_dev) + return -ENODEV; + + ret = phylink_bringup_phy(pl, phy_dev); + if (ret) + phy_detach(phy_dev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_of_phy_connect); + +void phylink_disconnect_phy(struct phylink *pl) +{ + struct phy_device *phy; + + WARN_ON(!lockdep_rtnl_is_held()); + + phy = pl->phydev; + if (phy) { + mutex_lock(&phy->lock); + mutex_lock(&pl->state_mutex); + pl->netdev->phydev = NULL; + pl->phydev = NULL; + mutex_unlock(&pl->state_mutex); + mutex_unlock(&phy->lock); + flush_work(&pl->resolve); + + phy_disconnect(phy); + } +} +EXPORT_SYMBOL_GPL(phylink_disconnect_phy); + +void phylink_mac_change(struct phylink *pl, bool up) +{ + if (!up) + pl->mac_link_dropped = true; + phylink_run_resolve(pl); + netdev_dbg(pl->netdev, "mac link %s\n", up ? "up" : "down"); +} +EXPORT_SYMBOL_GPL(phylink_mac_change); + +void phylink_start(struct phylink *pl) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + netdev_info(pl->netdev, "configuring for %s/%s link mode\n", + phylink_an_mode_str(pl->link_an_mode), + phy_modes(pl->link_config.interface)); + + /* Apply the link configuration to the MAC when starting. This allows + * a fixed-link to start with the correct parameters, and also + * ensures that we set the appropriate advertisment for Serdes links. + */ + phylink_resolve_flow(pl, &pl->link_config); + phylink_mac_config(pl, &pl->link_config); + + clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + phylink_run_resolve(pl); + + if (pl->phydev) + phy_start(pl->phydev); +} +EXPORT_SYMBOL_GPL(phylink_start); + +void phylink_stop(struct phylink *pl) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + phy_stop(pl->phydev); + + set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); + flush_work(&pl->resolve); +} +EXPORT_SYMBOL_GPL(phylink_stop); + +void phylink_ethtool_get_wol(struct phylink *pl, struct ethtool_wolinfo *wol) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + wol->supported = 0; + wol->wolopts = 0; + + if (pl->phydev) + phy_ethtool_get_wol(pl->phydev, wol); +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_wol); + +int phylink_ethtool_set_wol(struct phylink *pl, struct ethtool_wolinfo *wol) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_set_wol(pl->phydev, wol); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_wol); + +static void phylink_merge_link_mode(unsigned long *dst, const unsigned long *b) +{ + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask); + + linkmode_zero(mask); + phylink_set_port_modes(mask); + + linkmode_and(dst, dst, mask); + linkmode_or(dst, dst, b); +} + +static void phylink_get_ksettings(const struct phylink_link_state *state, + struct ethtool_link_ksettings *kset) +{ + phylink_merge_link_mode(kset->link_modes.advertising, state->advertising); + linkmode_copy(kset->link_modes.lp_advertising, state->lp_advertising); + kset->base.speed = state->speed; + kset->base.duplex = state->duplex; + kset->base.autoneg = state->an_enabled ? AUTONEG_ENABLE : + AUTONEG_DISABLE; +} + +int phylink_ethtool_ksettings_get(struct phylink *pl, + struct ethtool_link_ksettings *kset) +{ + struct phylink_link_state link_state; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) { + phy_ethtool_ksettings_get(pl->phydev, kset); + } else { + kset->base.port = pl->link_port; + } + + linkmode_copy(kset->link_modes.supported, pl->supported); + + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + /* We are using fixed settings. Report these as the + * current link settings - and note that these also + * represent the supported speeds/duplex/pause modes. + */ + phylink_get_fixed_state(pl, &link_state); + phylink_get_ksettings(&link_state, kset); + break; + + case MLO_AN_SGMII: + /* If there is a phy attached, then use the reported + * settings from the phy with no modification. + */ + if (pl->phydev) + break; + + case MLO_AN_8023Z: + phylink_get_mac_state(pl, &link_state); + + /* The MAC is reporting the link results from its own PCS + * layer via in-band status. Report these as the current + * link settings. + */ + phylink_get_ksettings(&link_state, kset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get); + +int phylink_ethtool_ksettings_set(struct phylink *pl, + const struct ethtool_link_ksettings *kset) +{ + struct ethtool_link_ksettings our_kset; + struct phylink_link_state config; + int ret; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (kset->base.autoneg != AUTONEG_DISABLE && + kset->base.autoneg != AUTONEG_ENABLE) + return -EINVAL; + + config = pl->link_config; + + /* Mask out unsupported advertisments */ + linkmode_and(config.advertising, kset->link_modes.advertising, + pl->supported); + + /* FIXME: should we reject autoneg if phy/mac does not support it? */ + if (kset->base.autoneg == AUTONEG_DISABLE) { + const struct phy_setting *s; + + /* Autonegotiation disabled, select a suitable speed and + * duplex. + */ + s = phy_lookup_setting(kset->base.speed, kset->base.duplex, + pl->supported, + __ETHTOOL_LINK_MODE_MASK_NBITS, false); + if (!s) + return -EINVAL; + + /* If we have a fixed link (as specified by firmware), refuse + * to change link parameters. + */ + if (pl->link_an_mode == MLO_AN_FIXED && + (s->speed != pl->link_config.speed || + s->duplex != pl->link_config.duplex)) + return -EINVAL; + + config.speed = s->speed; + config.duplex = s->duplex; + config.an_enabled = false; + + __clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); + } else { + /* If we have a fixed link, refuse to enable autonegotiation */ + if (pl->link_an_mode == MLO_AN_FIXED) + return -EINVAL; + + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.an_enabled = true; + + __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising); + } + + if (phylink_validate(pl, pl->supported, &config)) + return -EINVAL; + + /* If autonegotiation is enabled, we must have an advertisment */ + if (config.an_enabled && phylink_is_empty_linkmode(config.advertising)) + return -EINVAL; + + our_kset = *kset; + linkmode_copy(our_kset.link_modes.advertising, config.advertising); + our_kset.base.speed = config.speed; + our_kset.base.duplex = config.duplex; + + /* If we have a PHY, configure the phy */ + if (pl->phydev) { + ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset); + if (ret) + return ret; + } + + mutex_lock(&pl->state_mutex); + /* Configure the MAC to match the new settings */ + linkmode_copy(pl->link_config.advertising, our_kset.link_modes.advertising); + pl->link_config.speed = our_kset.base.speed; + pl->link_config.duplex = our_kset.base.duplex; + pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; + + if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + phylink_mac_config(pl, &pl->link_config); + phylink_mac_an_restart(pl); + } + mutex_unlock(&pl->state_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set); + +int phylink_ethtool_nway_reset(struct phylink *pl) +{ + int ret = 0; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_restart_aneg(pl->phydev); + phylink_mac_an_restart(pl); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_nway_reset); + +void phylink_ethtool_get_pauseparam(struct phylink *pl, + struct ethtool_pauseparam *pause) +{ + WARN_ON(!lockdep_rtnl_is_held()); + + pause->autoneg = !!(pl->link_config.pause & MLO_PAUSE_AN); + pause->rx_pause = !!(pl->link_config.pause & MLO_PAUSE_RX); + pause->tx_pause = !!(pl->link_config.pause & MLO_PAUSE_TX); +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_pauseparam); + +int phylink_ethtool_set_pauseparam(struct phylink *pl, + struct ethtool_pauseparam *pause) +{ + struct phylink_link_state *config = &pl->link_config; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (!phylink_test(pl->supported, Pause) && + !phylink_test(pl->supported, Asym_Pause)) + return -EOPNOTSUPP; + + if (!phylink_test(pl->supported, Asym_Pause) && + !pause->autoneg && pause->rx_pause != pause->tx_pause) + return -EINVAL; + + config->pause &= ~(MLO_PAUSE_AN | MLO_PAUSE_TXRX_MASK); + + if (pause->autoneg) + config->pause |= MLO_PAUSE_AN; + if (pause->rx_pause) + config->pause |= MLO_PAUSE_RX; + if (pause->tx_pause) + config->pause |= MLO_PAUSE_TX; + + if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { + switch (pl->link_an_mode) { + case MLO_AN_PHY: + /* Silently mark the carrier down, and then trigger a resolve */ + netif_carrier_off(pl->netdev); + phylink_run_resolve(pl); + break; + + case MLO_AN_FIXED: + /* Should we allow fixed links to change against the config? */ + phylink_resolve_flow(pl, config); + phylink_mac_config(pl, config); + break; + + case MLO_AN_SGMII: + case MLO_AN_8023Z: + phylink_mac_config(pl, config); + phylink_mac_an_restart(pl); + break; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam); + +int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) +{ + int ret = -EPROTONOSUPPORT; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_init_eee(pl->phydev, clk_stop_enable); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_init_eee); + +int phylink_get_eee_err(struct phylink *pl) +{ + int ret = 0; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_get_eee_err(pl->phydev); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_get_eee_err); + +int phylink_ethtool_get_eee(struct phylink *pl, struct ethtool_eee *eee) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_get_eee(pl->phydev, eee); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_eee); + +int phylink_ethtool_set_eee(struct phylink *pl, struct ethtool_eee *eee) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->phydev) + ret = phy_ethtool_set_eee(pl->phydev, eee); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_set_eee); + +/* This emulates MII registers for a fixed-mode phy operating as per the + * passed in state. "aneg" defines if we report negotiation is possible. + * + * FIXME: should deal with negotiation state too. + */ +static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg, + struct phylink_link_state *state, bool aneg) +{ + struct fixed_phy_status fs; + int val; + + fs.link = state->link; + fs.speed = state->speed; + fs.duplex = state->duplex; + fs.pause = state->pause & MLO_PAUSE_SYM; + fs.asym_pause = state->pause & MLO_PAUSE_ASYM; + + val = swphy_read_reg(reg, &fs); + if (reg == MII_BMSR) { + if (!state->an_complete) + val &= ~BMSR_ANEGCOMPLETE; + if (!aneg) + val &= ~BMSR_ANEGCAPABLE; + } + return val; +} + +static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, + unsigned int reg) +{ + struct phylink_link_state state; + int val = 0xffff; + + /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ + if (pl->phydev) + return mdiobus_read(pl->phydev->mdio.bus, phy_id, reg); + + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + if (phy_id == 0) { + phylink_get_fixed_state(pl, &state); + val = phylink_mii_emul_read(pl->netdev, reg, &state, + true); + } + break; + + case MLO_AN_PHY: + return -EOPNOTSUPP; + + case MLO_AN_SGMII: + /* No phy, fall through to 8023z method */ + case MLO_AN_8023Z: + if (phy_id == 0) { + val = phylink_get_mac_state(pl, &state); + if (val < 0) + return val; + + val = phylink_mii_emul_read(pl->netdev, reg, &state, + true); + } + break; + } + + return val & 0xffff; +} + +static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, + unsigned int reg, unsigned int val) +{ + /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ + if (pl->phydev) { + mdiobus_write(pl->phydev->mdio.bus, phy_id, reg, val); + return 0; + } + + switch (pl->link_an_mode) { + case MLO_AN_FIXED: + break; + + case MLO_AN_PHY: + return -EOPNOTSUPP; + + case MLO_AN_SGMII: + /* No phy, fall through to 8023z method */ + case MLO_AN_8023Z: + break; + } + + return 0; +} + +int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *mii_data = if_mii(ifr); + int val, ret; + + WARN_ON(!lockdep_rtnl_is_held()); + + switch (cmd) { + case SIOCGMIIPHY: + mii_data->phy_id = pl->phydev ? pl->phydev->mdio.addr : 0; + /* fallthrough */ + + case SIOCGMIIREG: + val = phylink_mii_read(pl, mii_data->phy_id, mii_data->reg_num); + if (val < 0) { + ret = val; + } else { + mii_data->val_out = val; + ret = 0; + } + break; + + case SIOCSMIIREG: + ret = phylink_mii_write(pl, mii_data->phy_id, mii_data->reg_num, + mii_data->val_in); + break; + + default: + ret = -EOPNOTSUPP; + if (pl->phydev) + ret = phy_mii_ioctl(pl->phydev, ifr, cmd); + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_mii_ioctl); + +MODULE_LICENSE("GPL"); diff --git a/include/linux/phy.h b/include/linux/phy.h index 0a5e8e62c9e0..d78cd01ea513 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -182,6 +182,7 @@ static inline const char *phy_modes(phy_interface_t interface) #define MII_ADDR_C45 (1<<30) struct device; +struct phylink; struct sk_buff; /* @@ -469,6 +470,7 @@ struct phy_device { struct mutex lock; + struct phylink *phylink; struct net_device *attached_dev; u8 mdix; diff --git a/include/linux/phylink.h b/include/linux/phylink.h new file mode 100644 index 000000000000..76f054f39684 --- /dev/null +++ b/include/linux/phylink.h @@ -0,0 +1,145 @@ +#ifndef NETDEV_PCS_H +#define NETDEV_PCS_H + +#include +#include +#include + +struct device_node; +struct ethtool_cmd; +struct net_device; + +enum { + MLO_PAUSE_NONE, + MLO_PAUSE_ASYM = BIT(0), + MLO_PAUSE_SYM = BIT(1), + MLO_PAUSE_RX = BIT(2), + MLO_PAUSE_TX = BIT(3), + MLO_PAUSE_TXRX_MASK = MLO_PAUSE_TX | MLO_PAUSE_RX, + MLO_PAUSE_AN = BIT(4), + + MLO_AN_PHY = 0, /* Conventional PHY */ + MLO_AN_FIXED, /* Fixed-link mode */ + MLO_AN_SGMII, /* Cisco SGMII protocol */ + MLO_AN_8023Z, /* 1000base-X protocol */ +}; + +static inline bool phylink_autoneg_inband(unsigned int mode) +{ + return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z; +} + +struct phylink_link_state { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); + phy_interface_t interface; /* PHY_INTERFACE_xxx */ + int speed; + int duplex; + int pause; + unsigned int link:1; + unsigned int an_enabled:1; + unsigned int an_complete:1; +}; + +struct phylink_mac_ops { + /** + * validate: validate and update the link configuration + * @ndev: net_device structure associated with MAC + * @config: configuration to validate + * + * Update the %config->supported and %config->advertised masks + * clearing bits that can not be supported. + * + * Note: the PHY may be able to transform from one connection + * technology to another, so, eg, don't clear 1000BaseX just + * because the MAC is unable to support it. This is more about + * clearing unsupported speeds and duplex settings. + * + * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX + * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode + * based on %config->advertised and/or %config->speed. + */ + void (*validate)(struct net_device *ndev, unsigned long *supported, + struct phylink_link_state *state); + + /* Read the current link state from the hardware */ + int (*mac_link_state)(struct net_device *, struct phylink_link_state *); + + /* Configure the MAC */ + /** + * mac_config: configure the MAC for the selected mode and state + * @ndev: net_device structure for the MAC + * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII + * @state: state structure + * + * The action performed depends on the currently selected mode: + * + * %MLO_AN_FIXED, %MLO_AN_PHY: + * set the specified speed, duplex, pause mode, and phy interface + * mode in the provided @state. + * %MLO_AN_8023Z: + * place the link in 1000base-X mode, advertising the parameters + * given in advertising in @state. + * %MLO_AN_SGMII: + * place the link in Cisco SGMII mode - there is no advertisment + * to make as the PHY communicates the speed and duplex to the + * MAC over the in-band control word. Configuration of the pause + * mode is as per MLO_AN_PHY since this is not included. + */ + void (*mac_config)(struct net_device *ndev, unsigned int mode, + const struct phylink_link_state *state); + + /** + * mac_an_restart: restart 802.3z BaseX autonegotiation + * @ndev: net_device structure for the MAC + */ + void (*mac_an_restart)(struct net_device *ndev); + + void (*mac_link_down)(struct net_device *, unsigned int mode); + void (*mac_link_up)(struct net_device *, unsigned int mode, + struct phy_device *); +}; + +struct phylink *phylink_create(struct net_device *, struct device_node *, + phy_interface_t iface, const struct phylink_mac_ops *ops); +void phylink_destroy(struct phylink *); + +int phylink_connect_phy(struct phylink *, struct phy_device *); +int phylink_of_phy_connect(struct phylink *, struct device_node *); +void phylink_disconnect_phy(struct phylink *); + +void phylink_mac_change(struct phylink *, bool up); + +void phylink_start(struct phylink *); +void phylink_stop(struct phylink *); + +void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *); +int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *); + +int phylink_ethtool_ksettings_get(struct phylink *, + struct ethtool_link_ksettings *); +int phylink_ethtool_ksettings_set(struct phylink *, + const struct ethtool_link_ksettings *); +int phylink_ethtool_nway_reset(struct phylink *); +void phylink_ethtool_get_pauseparam(struct phylink *, + struct ethtool_pauseparam *); +int phylink_ethtool_set_pauseparam(struct phylink *, + struct ethtool_pauseparam *); +int phylink_init_eee(struct phylink *, bool); +int phylink_get_eee_err(struct phylink *); +int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); +int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); +int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); + +#define phylink_zero(bm) \ + bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS) +#define __phylink_do_bit(op, bm, mode) \ + op(ETHTOOL_LINK_MODE_ ## mode ## _BIT, bm) + +#define phylink_set(bm, mode) __phylink_do_bit(__set_bit, bm, mode) +#define phylink_clear(bm, mode) __phylink_do_bit(__clear_bit, bm, mode) +#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode) + +void phylink_set_port_modes(unsigned long *bits); + +#endif -- cgit v1.2.3-55-g7522 From ce0aa27ff3f68ed4ea1631d33797e573b3508bfa Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:18 +0100 Subject: sfp: add sfp-bus to bridge between network devices and sfp cages Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/Makefile | 3 + drivers/net/phy/phylink.c | 157 +++++++++++++++ drivers/net/phy/sfp-bus.c | 475 ++++++++++++++++++++++++++++++++++++++++++++++ drivers/net/phy/sfp.h | 28 +++ include/linux/sfp.h | 434 ++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 1097 insertions(+) create mode 100644 drivers/net/phy/sfp-bus.c create mode 100644 drivers/net/phy/sfp.h create mode 100644 include/linux/sfp.h diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index c43e5b99fda4..4c16a10f420e 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -38,6 +38,9 @@ obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o +sfp-obj-$(CONFIG_SFP) += sfp-bus.o +obj-y += $(sfp-obj-y) $(sfp-obj-m) + obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o obj-$(CONFIG_AT803X_PHY) += at803x.o diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index af61d7d400af..02082f4a8a95 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -21,6 +21,7 @@ #include #include +#include "sfp.h" #include "swphy.h" #define SUPPORTED_INTERFACES \ @@ -32,6 +33,7 @@ enum { PHYLINK_DISABLE_STOPPED, + PHYLINK_DISABLE_LINK, }; struct phylink { @@ -54,6 +56,8 @@ struct phylink { struct work_struct resolve; bool mac_link_dropped; + + struct sfp_bus *sfp_bus; }; static inline void linkmode_zero(unsigned long *dst) @@ -466,6 +470,24 @@ static void phylink_run_resolve(struct phylink *pl) queue_work(system_power_efficient_wq, &pl->resolve); } +static const struct sfp_upstream_ops sfp_phylink_ops; + +static int phylink_register_sfp(struct phylink *pl, struct device_node *np) +{ + struct device_node *sfp_np; + + sfp_np = of_parse_phandle(np, "sfp", 0); + if (!sfp_np) + return 0; + + pl->sfp_bus = sfp_register_upstream(sfp_np, pl->netdev, pl, + &sfp_phylink_ops); + if (!pl->sfp_bus) + return -ENOMEM; + + return 0; +} + struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, phy_interface_t iface, const struct phylink_mac_ops *ops) { @@ -507,12 +529,21 @@ struct phylink *phylink_create(struct net_device *ndev, struct device_node *np, } } + ret = phylink_register_sfp(pl, np); + if (ret < 0) { + kfree(pl); + return ERR_PTR(ret); + } + return pl; } EXPORT_SYMBOL_GPL(phylink_create); void phylink_destroy(struct phylink *pl) { + if (pl->sfp_bus) + sfp_unregister_upstream(pl->sfp_bus); + cancel_work_sync(&pl->resolve); kfree(pl); } @@ -706,6 +737,8 @@ void phylink_start(struct phylink *pl) clear_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); phylink_run_resolve(pl); + if (pl->sfp_bus) + sfp_upstream_start(pl->sfp_bus); if (pl->phydev) phy_start(pl->phydev); } @@ -717,6 +750,8 @@ void phylink_stop(struct phylink *pl) if (pl->phydev) phy_stop(pl->phydev); + if (pl->sfp_bus) + sfp_upstream_stop(pl->sfp_bus); set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); flush_work(&pl->resolve); @@ -1166,4 +1201,126 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL_GPL(phylink_mii_ioctl); + + +static int phylink_sfp_module_insert(void *upstream, + const struct sfp_eeprom_id *id) +{ + struct phylink *pl = upstream; + __ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, }; + struct phylink_link_state config; + phy_interface_t iface; + int mode, ret = 0; + bool changed; + u8 port; + + sfp_parse_support(pl->sfp_bus, id, support); + port = sfp_parse_port(pl->sfp_bus, id, support); + iface = sfp_parse_interface(pl->sfp_bus, id); + + WARN_ON(!lockdep_rtnl_is_held()); + + switch (iface) { + case PHY_INTERFACE_MODE_SGMII: + mode = MLO_AN_SGMII; + break; + case PHY_INTERFACE_MODE_1000BASEX: + mode = MLO_AN_8023Z; + break; + default: + return -EINVAL; + } + + memset(&config, 0, sizeof(config)); + linkmode_copy(config.advertising, support); + config.interface = iface; + config.speed = SPEED_UNKNOWN; + config.duplex = DUPLEX_UNKNOWN; + config.pause = MLO_PAUSE_AN; + config.an_enabled = pl->link_config.an_enabled; + + /* Ignore errors if we're expecting a PHY to attach later */ + ret = phylink_validate(pl, support, &config); + if (ret) { + netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n", + phylink_an_mode_str(mode), phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret); + return ret; + } + + netdev_dbg(pl->netdev, "requesting link mode %s/%s with support %*pb\n", + phylink_an_mode_str(mode), phy_modes(config.interface), + __ETHTOOL_LINK_MODE_MASK_NBITS, support); + + if (mode == MLO_AN_8023Z && pl->phydev) + return -EINVAL; + + changed = !bitmap_equal(pl->supported, support, + __ETHTOOL_LINK_MODE_MASK_NBITS); + if (changed) { + linkmode_copy(pl->supported, support); + linkmode_copy(pl->link_config.advertising, config.advertising); + } + + if (pl->link_an_mode != mode || + pl->link_config.interface != config.interface) { + pl->link_config.interface = config.interface; + pl->link_an_mode = mode; + + changed = true; + + netdev_info(pl->netdev, "switched to %s/%s link mode\n", + phylink_an_mode_str(mode), + phy_modes(config.interface)); + } + + pl->link_port = port; + + if (changed && !test_bit(PHYLINK_DISABLE_STOPPED, + &pl->phylink_disable_state)) + phylink_mac_config(pl, &pl->link_config); + + return ret; +} + +static void phylink_sfp_link_down(void *upstream) +{ + struct phylink *pl = upstream; + + WARN_ON(!lockdep_rtnl_is_held()); + + set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + flush_work(&pl->resolve); + + netif_carrier_off(pl->netdev); +} + +static void phylink_sfp_link_up(void *upstream) +{ + struct phylink *pl = upstream; + + WARN_ON(!lockdep_rtnl_is_held()); + + clear_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); + phylink_run_resolve(pl); +} + +static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) +{ + return phylink_connect_phy(upstream, phy); +} + +static void phylink_sfp_disconnect_phy(void *upstream) +{ + phylink_disconnect_phy(upstream); +} + +static const struct sfp_upstream_ops sfp_phylink_ops = { + .module_insert = phylink_sfp_module_insert, + .link_up = phylink_sfp_link_up, + .link_down = phylink_sfp_link_down, + .connect_phy = phylink_sfp_connect_phy, + .disconnect_phy = phylink_sfp_disconnect_phy, +}; + MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c new file mode 100644 index 000000000000..5cb5384697ea --- /dev/null +++ b/drivers/net/phy/sfp-bus.c @@ -0,0 +1,475 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "sfp.h" + +struct sfp_bus { + struct kref kref; + struct list_head node; + struct device_node *device_node; + + const struct sfp_socket_ops *socket_ops; + struct device *sfp_dev; + struct sfp *sfp; + + const struct sfp_upstream_ops *upstream_ops; + void *upstream; + struct net_device *netdev; + struct phy_device *phydev; + + bool registered; + bool started; +}; + + +int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support) +{ + int port; + + /* port is the physical connector, set this from the connector field. */ + switch (id->base.connector) { + case SFP_CONNECTOR_SC: + case SFP_CONNECTOR_FIBERJACK: + case SFP_CONNECTOR_LC: + case SFP_CONNECTOR_MT_RJ: + case SFP_CONNECTOR_MU: + case SFP_CONNECTOR_OPTICAL_PIGTAIL: + if (support) + phylink_set(support, FIBRE); + port = PORT_FIBRE; + break; + + case SFP_CONNECTOR_RJ45: + if (support) + phylink_set(support, TP); + port = PORT_TP; + break; + + case SFP_CONNECTOR_UNSPEC: + if (id->base.e1000_base_t) { + if (support) + phylink_set(support, TP); + port = PORT_TP; + break; + } + /* fallthrough */ + case SFP_CONNECTOR_SG: /* guess */ + case SFP_CONNECTOR_MPO_1X12: + case SFP_CONNECTOR_MPO_2X16: + case SFP_CONNECTOR_HSSDC_II: + case SFP_CONNECTOR_COPPER_PIGTAIL: + case SFP_CONNECTOR_NOSEPARATE: + case SFP_CONNECTOR_MXC_2X16: + port = PORT_OTHER; + break; + default: + dev_warn(bus->sfp_dev, "SFP: unknown connector id 0x%02x\n", + id->base.connector); + port = PORT_OTHER; + break; + } + + return port; +} +EXPORT_SYMBOL_GPL(sfp_parse_port); + +phy_interface_t sfp_parse_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id) +{ + phy_interface_t iface; + + /* Setting the serdes link mode is guesswork: there's no field in + * the EEPROM which indicates what mode should be used. + * + * If the module wants 64b66b, then it must be >= 10G. + * + * If it's a gigabit-only fiber module, it probably does not have + * a PHY, so switch to 802.3z negotiation mode. Otherwise, switch + * to SGMII mode (which is required to support non-gigabit speeds). + */ + switch (id->base.encoding) { + case SFP_ENCODING_8472_64B66B: + iface = PHY_INTERFACE_MODE_10GKR; + break; + + case SFP_ENCODING_8B10B: + if (!id->base.e1000_base_t && + !id->base.e100_base_lx && + !id->base.e100_base_fx) + iface = PHY_INTERFACE_MODE_1000BASEX; + else + iface = PHY_INTERFACE_MODE_SGMII; + break; + + default: + iface = PHY_INTERFACE_MODE_NA; + dev_err(bus->sfp_dev, + "SFP module encoding does not support 8b10b nor 64b66b\n"); + break; + } + + return iface; +} +EXPORT_SYMBOL_GPL(sfp_parse_interface); + +void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support) +{ + phylink_set(support, Autoneg); + phylink_set(support, Pause); + phylink_set(support, Asym_Pause); + + /* Set ethtool support from the compliance fields. */ + if (id->base.e10g_base_sr) + phylink_set(support, 10000baseSR_Full); + if (id->base.e10g_base_lr) + phylink_set(support, 10000baseLR_Full); + if (id->base.e10g_base_lrm) + phylink_set(support, 10000baseLRM_Full); + if (id->base.e10g_base_er) + phylink_set(support, 10000baseER_Full); + if (id->base.e1000_base_sx || + id->base.e1000_base_lx || + id->base.e1000_base_cx) + phylink_set(support, 1000baseX_Full); + if (id->base.e1000_base_t) { + phylink_set(support, 1000baseT_Half); + phylink_set(support, 1000baseT_Full); + } + + switch (id->base.extended_cc) { + case 0x00: /* Unspecified */ + break; + case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */ + phylink_set(support, 100000baseSR4_Full); + phylink_set(support, 25000baseSR_Full); + break; + case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */ + case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */ + phylink_set(support, 100000baseLR4_ER4_Full); + break; + case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */ + case 0x0c: /* 25Gbase-CR CA-S */ + case 0x0d: /* 25Gbase-CR CA-N */ + phylink_set(support, 100000baseCR4_Full); + phylink_set(support, 25000baseCR_Full); + break; + default: + dev_warn(bus->sfp_dev, + "Unknown/unsupported extended compliance code: 0x%02x\n", + id->base.extended_cc); + break; + } + + /* For fibre channel SFP, derive possible BaseX modes */ + if (id->base.fc_speed_100 || + id->base.fc_speed_200 || + id->base.fc_speed_400) { + if (id->base.br_nominal >= 31) + phylink_set(support, 2500baseX_Full); + if (id->base.br_nominal >= 12) + phylink_set(support, 1000baseX_Full); + } + + switch (id->base.connector) { + case SFP_CONNECTOR_SC: + case SFP_CONNECTOR_FIBERJACK: + case SFP_CONNECTOR_LC: + case SFP_CONNECTOR_MT_RJ: + case SFP_CONNECTOR_MU: + case SFP_CONNECTOR_OPTICAL_PIGTAIL: + break; + + case SFP_CONNECTOR_UNSPEC: + if (id->base.e1000_base_t) + break; + + case SFP_CONNECTOR_SG: /* guess */ + case SFP_CONNECTOR_MPO_1X12: + case SFP_CONNECTOR_MPO_2X16: + case SFP_CONNECTOR_HSSDC_II: + case SFP_CONNECTOR_COPPER_PIGTAIL: + case SFP_CONNECTOR_NOSEPARATE: + case SFP_CONNECTOR_MXC_2X16: + default: + /* a guess at the supported link modes */ + dev_warn(bus->sfp_dev, + "Guessing link modes, please report...\n"); + phylink_set(support, 1000baseT_Half); + phylink_set(support, 1000baseT_Full); + break; + } +} +EXPORT_SYMBOL_GPL(sfp_parse_support); + + +static LIST_HEAD(sfp_buses); +static DEFINE_MUTEX(sfp_mutex); + +static const struct sfp_upstream_ops *sfp_get_upstream_ops(struct sfp_bus *bus) +{ + return bus->registered ? bus->upstream_ops : NULL; +} + +static struct sfp_bus *sfp_bus_get(struct device_node *np) +{ + struct sfp_bus *sfp, *new, *found = NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + + mutex_lock(&sfp_mutex); + + list_for_each_entry(sfp, &sfp_buses, node) { + if (sfp->device_node == np) { + kref_get(&sfp->kref); + found = sfp; + break; + } + } + + if (!found && new) { + kref_init(&new->kref); + new->device_node = np; + list_add(&new->node, &sfp_buses); + found = new; + new = NULL; + } + + mutex_unlock(&sfp_mutex); + + kfree(new); + + return found; +} + +static void sfp_bus_release(struct kref *kref) __releases(sfp_mutex) +{ + struct sfp_bus *bus = container_of(kref, struct sfp_bus, kref); + + list_del(&bus->node); + mutex_unlock(&sfp_mutex); + kfree(bus); +} + +static void sfp_bus_put(struct sfp_bus *bus) +{ + kref_put_mutex(&bus->kref, sfp_bus_release, &sfp_mutex); +} + +static int sfp_register_bus(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = bus->upstream_ops; + int ret; + + if (ops) { + if (ops->link_down) + ops->link_down(bus->upstream); + if (ops->connect_phy && bus->phydev) { + ret = ops->connect_phy(bus->upstream, bus->phydev); + if (ret) + return ret; + } + } + if (bus->started) + bus->socket_ops->start(bus->sfp); + bus->registered = true; + return 0; +} + +static void sfp_unregister_bus(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = bus->upstream_ops; + + if (bus->registered) { + if (bus->started) + bus->socket_ops->stop(bus->sfp); + if (bus->phydev && ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + } + bus->registered = false; +} + + +int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo) +{ + if (!bus->registered) + return -ENOIOCTLCMD; + return bus->socket_ops->module_info(bus->sfp, modinfo); +} +EXPORT_SYMBOL_GPL(sfp_get_module_info); + +int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data) +{ + if (!bus->registered) + return -ENOIOCTLCMD; + return bus->socket_ops->module_eeprom(bus->sfp, ee, data); +} +EXPORT_SYMBOL_GPL(sfp_get_module_eeprom); + +void sfp_upstream_start(struct sfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->start(bus->sfp); + bus->started = true; +} +EXPORT_SYMBOL_GPL(sfp_upstream_start); + +void sfp_upstream_stop(struct sfp_bus *bus) +{ + if (bus->registered) + bus->socket_ops->stop(bus->sfp); + bus->started = false; +} +EXPORT_SYMBOL_GPL(sfp_upstream_stop); + +struct sfp_bus *sfp_register_upstream(struct device_node *np, + struct net_device *ndev, void *upstream, + const struct sfp_upstream_ops *ops) +{ + struct sfp_bus *bus = sfp_bus_get(np); + int ret = 0; + + if (bus) { + rtnl_lock(); + bus->upstream_ops = ops; + bus->upstream = upstream; + bus->netdev = ndev; + + if (bus->sfp) + ret = sfp_register_bus(bus); + rtnl_unlock(); + } + + if (ret) { + sfp_bus_put(bus); + bus = NULL; + } + + return bus; +} +EXPORT_SYMBOL_GPL(sfp_register_upstream); + +void sfp_unregister_upstream(struct sfp_bus *bus) +{ + rtnl_lock(); + sfp_unregister_bus(bus); + bus->upstream = NULL; + bus->netdev = NULL; + rtnl_unlock(); + + sfp_bus_put(bus); +} +EXPORT_SYMBOL_GPL(sfp_unregister_upstream); + + +/* Socket driver entry points */ +int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->connect_phy) + ret = ops->connect_phy(bus->upstream, phydev); + + if (ret == 0) + bus->phydev = phydev; + + return ret; +} +EXPORT_SYMBOL_GPL(sfp_add_phy); + +void sfp_remove_phy(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->disconnect_phy) + ops->disconnect_phy(bus->upstream); + bus->phydev = NULL; +} +EXPORT_SYMBOL_GPL(sfp_remove_phy); + + +void sfp_link_up(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->link_up) + ops->link_up(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_link_up); + +void sfp_link_down(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->link_down) + ops->link_down(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_link_down); + +int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + int ret = 0; + + if (ops && ops->module_insert) + ret = ops->module_insert(bus->upstream, id); + + return ret; +} +EXPORT_SYMBOL_GPL(sfp_module_insert); + +void sfp_module_remove(struct sfp_bus *bus) +{ + const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); + + if (ops && ops->module_remove) + ops->module_remove(bus->upstream); +} +EXPORT_SYMBOL_GPL(sfp_module_remove); + +struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, + const struct sfp_socket_ops *ops) +{ + struct sfp_bus *bus = sfp_bus_get(dev->of_node); + int ret = 0; + + if (bus) { + rtnl_lock(); + bus->sfp_dev = dev; + bus->sfp = sfp; + bus->socket_ops = ops; + + if (bus->netdev) + ret = sfp_register_bus(bus); + rtnl_unlock(); + } + + if (ret) { + sfp_bus_put(bus); + bus = NULL; + } + + return bus; +} +EXPORT_SYMBOL_GPL(sfp_register_socket); + +void sfp_unregister_socket(struct sfp_bus *bus) +{ + rtnl_lock(); + sfp_unregister_bus(bus); + bus->sfp_dev = NULL; + bus->sfp = NULL; + bus->socket_ops = NULL; + rtnl_unlock(); + + sfp_bus_put(bus); +} +EXPORT_SYMBOL_GPL(sfp_unregister_socket); diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h new file mode 100644 index 000000000000..31b0acf337e2 --- /dev/null +++ b/drivers/net/phy/sfp.h @@ -0,0 +1,28 @@ +#ifndef SFP_H +#define SFP_H + +#include +#include + +struct sfp; + +struct sfp_socket_ops { + void (*start)(struct sfp *sfp); + void (*stop)(struct sfp *sfp); + int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); + int (*module_eeprom)(struct sfp *sfp, struct ethtool_eeprom *ee, + u8 *data); +}; + +int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev); +void sfp_remove_phy(struct sfp_bus *bus); +void sfp_link_up(struct sfp_bus *bus); +void sfp_link_down(struct sfp_bus *bus); +int sfp_module_insert(struct sfp_bus *bus, const struct sfp_eeprom_id *id); +void sfp_module_remove(struct sfp_bus *bus); +int sfp_link_configure(struct sfp_bus *bus, const struct sfp_eeprom_id *id); +struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, + const struct sfp_socket_ops *ops); +void sfp_unregister_socket(struct sfp_bus *bus); + +#endif diff --git a/include/linux/sfp.h b/include/linux/sfp.h new file mode 100644 index 000000000000..4a906f560817 --- /dev/null +++ b/include/linux/sfp.h @@ -0,0 +1,434 @@ +#ifndef LINUX_SFP_H +#define LINUX_SFP_H + +#include + +struct __packed sfp_eeprom_base { + u8 phys_id; + u8 phys_ext_id; + u8 connector; +#if defined __BIG_ENDIAN_BITFIELD + u8 e10g_base_er:1; + u8 e10g_base_lrm:1; + u8 e10g_base_lr:1; + u8 e10g_base_sr:1; + u8 if_1x_sx:1; + u8 if_1x_lx:1; + u8 if_1x_copper_active:1; + u8 if_1x_copper_passive:1; + + u8 escon_mmf_1310_led:1; + u8 escon_smf_1310_laser:1; + u8 sonet_oc192_short_reach:1; + u8 sonet_reach_bit1:1; + u8 sonet_reach_bit2:1; + u8 sonet_oc48_long_reach:1; + u8 sonet_oc48_intermediate_reach:1; + u8 sonet_oc48_short_reach:1; + + u8 unallocated_5_7:1; + u8 sonet_oc12_smf_long_reach:1; + u8 sonet_oc12_smf_intermediate_reach:1; + u8 sonet_oc12_short_reach:1; + u8 unallocated_5_3:1; + u8 sonet_oc3_smf_long_reach:1; + u8 sonet_oc3_smf_intermediate_reach:1; + u8 sonet_oc3_short_reach:1; + + u8 e_base_px:1; + u8 e_base_bx10:1; + u8 e100_base_fx:1; + u8 e100_base_lx:1; + u8 e1000_base_t:1; + u8 e1000_base_cx:1; + u8 e1000_base_lx:1; + u8 e1000_base_sx:1; + + u8 fc_ll_v:1; + u8 fc_ll_s:1; + u8 fc_ll_i:1; + u8 fc_ll_l:1; + u8 fc_ll_m:1; + u8 fc_tech_sa:1; + u8 fc_tech_lc:1; + u8 fc_tech_electrical_inter_enclosure:1; + + u8 fc_tech_electrical_intra_enclosure:1; + u8 fc_tech_sn:1; + u8 fc_tech_sl:1; + u8 fc_tech_ll:1; + u8 sfp_ct_active:1; + u8 sfp_ct_passive:1; + u8 unallocated_8_1:1; + u8 unallocated_8_0:1; + + u8 fc_media_tw:1; + u8 fc_media_tp:1; + u8 fc_media_mi:1; + u8 fc_media_tv:1; + u8 fc_media_m6:1; + u8 fc_media_m5:1; + u8 unallocated_9_1:1; + u8 fc_media_sm:1; + + u8 fc_speed_1200:1; + u8 fc_speed_800:1; + u8 fc_speed_1600:1; + u8 fc_speed_400:1; + u8 fc_speed_3200:1; + u8 fc_speed_200:1; + u8 unallocated_10_1:1; + u8 fc_speed_100:1; +#elif defined __LITTLE_ENDIAN_BITFIELD + u8 if_1x_copper_passive:1; + u8 if_1x_copper_active:1; + u8 if_1x_lx:1; + u8 if_1x_sx:1; + u8 e10g_base_sr:1; + u8 e10g_base_lr:1; + u8 e10g_base_lrm:1; + u8 e10g_base_er:1; + + u8 sonet_oc3_short_reach:1; + u8 sonet_oc3_smf_intermediate_reach:1; + u8 sonet_oc3_smf_long_reach:1; + u8 unallocated_5_3:1; + u8 sonet_oc12_short_reach:1; + u8 sonet_oc12_smf_intermediate_reach:1; + u8 sonet_oc12_smf_long_reach:1; + u8 unallocated_5_7:1; + + u8 sonet_oc48_short_reach:1; + u8 sonet_oc48_intermediate_reach:1; + u8 sonet_oc48_long_reach:1; + u8 sonet_reach_bit2:1; + u8 sonet_reach_bit1:1; + u8 sonet_oc192_short_reach:1; + u8 escon_smf_1310_laser:1; + u8 escon_mmf_1310_led:1; + + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e_base_bx10:1; + u8 e_base_px:1; + + u8 fc_tech_electrical_inter_enclosure:1; + u8 fc_tech_lc:1; + u8 fc_tech_sa:1; + u8 fc_ll_m:1; + u8 fc_ll_l:1; + u8 fc_ll_i:1; + u8 fc_ll_s:1; + u8 fc_ll_v:1; + + u8 unallocated_8_0:1; + u8 unallocated_8_1:1; + u8 sfp_ct_passive:1; + u8 sfp_ct_active:1; + u8 fc_tech_ll:1; + u8 fc_tech_sl:1; + u8 fc_tech_sn:1; + u8 fc_tech_electrical_intra_enclosure:1; + + u8 fc_media_sm:1; + u8 unallocated_9_1:1; + u8 fc_media_m5:1; + u8 fc_media_m6:1; + u8 fc_media_tv:1; + u8 fc_media_mi:1; + u8 fc_media_tp:1; + u8 fc_media_tw:1; + + u8 fc_speed_100:1; + u8 unallocated_10_1:1; + u8 fc_speed_200:1; + u8 fc_speed_3200:1; + u8 fc_speed_400:1; + u8 fc_speed_1600:1; + u8 fc_speed_800:1; + u8 fc_speed_1200:1; +#else +#error Unknown Endian +#endif + u8 encoding; + u8 br_nominal; + u8 rate_id; + u8 link_len[6]; + char vendor_name[16]; + u8 extended_cc; + char vendor_oui[3]; + char vendor_pn[16]; + char vendor_rev[4]; + union { + __be16 optical_wavelength; + u8 cable_spec; + }; + u8 reserved62; + u8 cc_base; +}; + +struct __packed sfp_eeprom_ext { + __be16 options; + u8 br_max; + u8 br_min; + char vendor_sn[16]; + char datecode[8]; + u8 diagmon; + u8 enhopts; + u8 sff8472_compliance; + u8 cc_ext; +}; + +struct __packed sfp_eeprom_id { + struct sfp_eeprom_base base; + struct sfp_eeprom_ext ext; +}; + +/* SFP EEPROM registers */ +enum { + SFP_PHYS_ID = 0x00, + SFP_PHYS_EXT_ID = 0x01, + SFP_CONNECTOR = 0x02, + SFP_COMPLIANCE = 0x03, + SFP_ENCODING = 0x0b, + SFP_BR_NOMINAL = 0x0c, + SFP_RATE_ID = 0x0d, + SFP_LINK_LEN_SM_KM = 0x0e, + SFP_LINK_LEN_SM_100M = 0x0f, + SFP_LINK_LEN_50UM_OM2_10M = 0x10, + SFP_LINK_LEN_62_5UM_OM1_10M = 0x11, + SFP_LINK_LEN_COPPER_1M = 0x12, + SFP_LINK_LEN_50UM_OM4_10M = 0x12, + SFP_LINK_LEN_50UM_OM3_10M = 0x13, + SFP_VENDOR_NAME = 0x14, + SFP_VENDOR_OUI = 0x25, + SFP_VENDOR_PN = 0x28, + SFP_VENDOR_REV = 0x38, + SFP_OPTICAL_WAVELENGTH_MSB = 0x3c, + SFP_OPTICAL_WAVELENGTH_LSB = 0x3d, + SFP_CABLE_SPEC = 0x3c, + SFP_CC_BASE = 0x3f, + SFP_OPTIONS = 0x40, /* 2 bytes, MSB, LSB */ + SFP_BR_MAX = 0x42, + SFP_BR_MIN = 0x43, + SFP_VENDOR_SN = 0x44, + SFP_DATECODE = 0x54, + SFP_DIAGMON = 0x5c, + SFP_ENHOPTS = 0x5d, + SFP_SFF8472_COMPLIANCE = 0x5e, + SFP_CC_EXT = 0x5f, + + SFP_PHYS_ID_SFP = 0x03, + SFP_PHYS_EXT_ID_SFP = 0x04, + SFP_CONNECTOR_UNSPEC = 0x00, + /* codes 01-05 not supportable on SFP, but some modules have single SC */ + SFP_CONNECTOR_SC = 0x01, + SFP_CONNECTOR_FIBERJACK = 0x06, + SFP_CONNECTOR_LC = 0x07, + SFP_CONNECTOR_MT_RJ = 0x08, + SFP_CONNECTOR_MU = 0x09, + SFP_CONNECTOR_SG = 0x0a, + SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b, + SFP_CONNECTOR_MPO_1X12 = 0x0c, + SFP_CONNECTOR_MPO_2X16 = 0x0d, + SFP_CONNECTOR_HSSDC_II = 0x20, + SFP_CONNECTOR_COPPER_PIGTAIL = 0x21, + SFP_CONNECTOR_RJ45 = 0x22, + SFP_CONNECTOR_NOSEPARATE = 0x23, + SFP_CONNECTOR_MXC_2X16 = 0x24, + SFP_ENCODING_UNSPEC = 0x00, + SFP_ENCODING_8B10B = 0x01, + SFP_ENCODING_4B5B = 0x02, + SFP_ENCODING_NRZ = 0x03, + SFP_ENCODING_8472_MANCHESTER = 0x04, + SFP_ENCODING_8472_SONET = 0x05, + SFP_ENCODING_8472_64B66B = 0x06, + SFP_ENCODING_256B257B = 0x07, + SFP_ENCODING_PAM4 = 0x08, + SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13), + SFP_OPTIONS_PAGING_A2 = BIT(12), + SFP_OPTIONS_RETIMER = BIT(11), + SFP_OPTIONS_COOLED_XCVR = BIT(10), + SFP_OPTIONS_POWER_DECL = BIT(9), + SFP_OPTIONS_RX_LINEAR_OUT = BIT(8), + SFP_OPTIONS_RX_DECISION_THRESH = BIT(7), + SFP_OPTIONS_TUNABLE_TX = BIT(6), + SFP_OPTIONS_RATE_SELECT = BIT(5), + SFP_OPTIONS_TX_DISABLE = BIT(4), + SFP_OPTIONS_TX_FAULT = BIT(3), + SFP_OPTIONS_LOS_INVERTED = BIT(2), + SFP_OPTIONS_LOS_NORMAL = BIT(1), + SFP_DIAGMON_DDM = BIT(6), + SFP_DIAGMON_INT_CAL = BIT(5), + SFP_DIAGMON_EXT_CAL = BIT(4), + SFP_DIAGMON_RXPWR_AVG = BIT(3), + SFP_DIAGMON_ADDRMODE = BIT(2), + SFP_ENHOPTS_ALARMWARN = BIT(7), + SFP_ENHOPTS_SOFT_TX_DISABLE = BIT(6), + SFP_ENHOPTS_SOFT_TX_FAULT = BIT(5), + SFP_ENHOPTS_SOFT_RX_LOS = BIT(4), + SFP_ENHOPTS_SOFT_RATE_SELECT = BIT(3), + SFP_ENHOPTS_APP_SELECT_SFF8079 = BIT(2), + SFP_ENHOPTS_SOFT_RATE_SFF8431 = BIT(1), + SFP_SFF8472_COMPLIANCE_NONE = 0x00, + SFP_SFF8472_COMPLIANCE_REV9_3 = 0x01, + SFP_SFF8472_COMPLIANCE_REV9_5 = 0x02, + SFP_SFF8472_COMPLIANCE_REV10_2 = 0x03, + SFP_SFF8472_COMPLIANCE_REV10_4 = 0x04, + SFP_SFF8472_COMPLIANCE_REV11_0 = 0x05, + SFP_SFF8472_COMPLIANCE_REV11_3 = 0x06, + SFP_SFF8472_COMPLIANCE_REV11_4 = 0x07, + SFP_SFF8472_COMPLIANCE_REV12_0 = 0x08, +}; + +/* SFP Diagnostics */ +enum { + /* Alarm and warnings stored MSB at lower address then LSB */ + SFP_TEMP_HIGH_ALARM = 0x00, + SFP_TEMP_LOW_ALARM = 0x02, + SFP_TEMP_HIGH_WARN = 0x04, + SFP_TEMP_LOW_WARN = 0x06, + SFP_VOLT_HIGH_ALARM = 0x08, + SFP_VOLT_LOW_ALARM = 0x0a, + SFP_VOLT_HIGH_WARN = 0x0c, + SFP_VOLT_LOW_WARN = 0x0e, + SFP_BIAS_HIGH_ALARM = 0x10, + SFP_BIAS_LOW_ALARM = 0x12, + SFP_BIAS_HIGH_WARN = 0x14, + SFP_BIAS_LOW_WARN = 0x16, + SFP_TXPWR_HIGH_ALARM = 0x18, + SFP_TXPWR_LOW_ALARM = 0x1a, + SFP_TXPWR_HIGH_WARN = 0x1c, + SFP_TXPWR_LOW_WARN = 0x1e, + SFP_RXPWR_HIGH_ALARM = 0x20, + SFP_RXPWR_LOW_ALARM = 0x22, + SFP_RXPWR_HIGH_WARN = 0x24, + SFP_RXPWR_LOW_WARN = 0x26, + SFP_LASER_TEMP_HIGH_ALARM = 0x28, + SFP_LASER_TEMP_LOW_ALARM = 0x2a, + SFP_LASER_TEMP_HIGH_WARN = 0x2c, + SFP_LASER_TEMP_LOW_WARN = 0x2e, + SFP_TEC_CUR_HIGH_ALARM = 0x30, + SFP_TEC_CUR_LOW_ALARM = 0x32, + SFP_TEC_CUR_HIGH_WARN = 0x34, + SFP_TEC_CUR_LOW_WARN = 0x36, + SFP_CAL_RXPWR4 = 0x38, + SFP_CAL_RXPWR3 = 0x3c, + SFP_CAL_RXPWR2 = 0x40, + SFP_CAL_RXPWR1 = 0x44, + SFP_CAL_RXPWR0 = 0x48, + SFP_CAL_TXI_SLOPE = 0x4c, + SFP_CAL_TXI_OFFSET = 0x4e, + SFP_CAL_TXPWR_SLOPE = 0x50, + SFP_CAL_TXPWR_OFFSET = 0x52, + SFP_CAL_T_SLOPE = 0x54, + SFP_CAL_T_OFFSET = 0x56, + SFP_CAL_V_SLOPE = 0x58, + SFP_CAL_V_OFFSET = 0x5a, + SFP_CHKSUM = 0x5f, + + SFP_TEMP = 0x60, + SFP_VCC = 0x62, + SFP_TX_BIAS = 0x64, + SFP_TX_POWER = 0x66, + SFP_RX_POWER = 0x68, + SFP_LASER_TEMP = 0x6a, + SFP_TEC_CUR = 0x6c, + + SFP_STATUS = 0x6e, + SFP_ALARM = 0x70, + + SFP_EXT_STATUS = 0x76, + SFP_VSL = 0x78, + SFP_PAGE = 0x7f, +}; + +struct device_node; +struct ethtool_eeprom; +struct ethtool_modinfo; +struct net_device; +struct sfp_bus; + +struct sfp_upstream_ops { + int (*module_insert)(void *, const struct sfp_eeprom_id *id); + void (*module_remove)(void *); + void (*link_down)(void *); + void (*link_up)(void *); + int (*connect_phy)(void *, struct phy_device *); + void (*disconnect_phy)(void *); +}; + +#if IS_ENABLED(CONFIG_SFP) +int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support); +phy_interface_t sfp_parse_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id); +void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, + unsigned long *support); + +int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo); +int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, + u8 *data); +void sfp_upstream_start(struct sfp_bus *bus); +void sfp_upstream_stop(struct sfp_bus *bus); +struct sfp_bus *sfp_register_upstream(struct device_node *np, + struct net_device *ndev, void *upstream, + const struct sfp_upstream_ops *ops); +void sfp_unregister_upstream(struct sfp_bus *bus); +#else +static inline int sfp_parse_port(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *support) +{ + return PORT_OTHER; +} + +static inline phy_interface_t sfp_parse_interface(struct sfp_bus *bus, + const struct sfp_eeprom_id *id) +{ + return PHY_INTERFACE_MODE_NA; +} + +static inline void sfp_parse_support(struct sfp_bus *bus, + const struct sfp_eeprom_id *id, + unsigned long *support) +{ +} + +static inline int sfp_get_module_info(struct sfp_bus *bus, + struct ethtool_modinfo *modinfo) +{ + return -EOPNOTSUPP; +} + +static inline int sfp_get_module_eeprom(struct sfp_bus *bus, + struct ethtool_eeprom *ee, u8 *data) +{ + return -EOPNOTSUPP; +} + +static inline void sfp_upstream_start(struct sfp_bus *bus) +{ +} + +static inline void sfp_upstream_stop(struct sfp_bus *bus) +{ +} + +static inline struct sfp_bus *sfp_register_upstream(struct device_node *np, + struct net_device *ndev, void *upstream, + const struct sfp_upstream_ops *ops) +{ + return (struct sfp_bus *)-1; +} + +static inline void sfp_unregister_upstream(struct sfp_bus *bus) +{ +} +#endif + +#endif -- cgit v1.2.3-55-g7522 From 770a1ad55763a8a783cb71078e0b33a6b91ad92b Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:23 +0100 Subject: phylink: add module EEPROM support Add support for reading module EEPROMs through phylink. Reviewed-by: Florian Fainelli Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 28 ++++++++++++++++++++++++++++ include/linux/phylink.h | 3 +++ 2 files changed, 31 insertions(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 02082f4a8a95..026060c95b82 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1020,6 +1020,34 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl, } EXPORT_SYMBOL_GPL(phylink_ethtool_set_pauseparam); +int phylink_ethtool_get_module_info(struct phylink *pl, + struct ethtool_modinfo *modinfo) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->sfp_bus) + ret = sfp_get_module_info(pl->sfp_bus, modinfo); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_info); + +int phylink_ethtool_get_module_eeprom(struct phylink *pl, + struct ethtool_eeprom *ee, u8 *buf) +{ + int ret = -EOPNOTSUPP; + + WARN_ON(!lockdep_rtnl_is_held()); + + if (pl->sfp_bus) + ret = sfp_get_module_eeprom(pl->sfp_bus, ee, buf); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_ethtool_get_module_eeprom); + int phylink_init_eee(struct phylink *pl, bool clk_stop_enable) { int ret = -EPROTONOSUPPORT; diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 76f054f39684..af67edd4ae38 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -125,6 +125,9 @@ void phylink_ethtool_get_pauseparam(struct phylink *, struct ethtool_pauseparam *); int phylink_ethtool_set_pauseparam(struct phylink *, struct ethtool_pauseparam *); +int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *); +int phylink_ethtool_get_module_eeprom(struct phylink *, + struct ethtool_eeprom *, u8 *); int phylink_init_eee(struct phylink *, bool); int phylink_get_eee_err(struct phylink *); int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); -- cgit v1.2.3-55-g7522 From ecbd87b8430419199cc9dd91598d5552a180f558 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:28 +0100 Subject: phylink: add support for MII ioctl access to Clause 45 PHYs Add support for reading and writing the clause 45 MII registers. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 157 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 124 insertions(+), 33 deletions(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 026060c95b82..dc0f4d7b7dd2 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1127,16 +1127,93 @@ static int phylink_mii_emul_read(struct net_device *ndev, unsigned int reg, return val; } +static int phylink_phy_read(struct phylink *pl, unsigned int phy_id, + unsigned int reg) +{ + struct phy_device *phydev = pl->phydev; + int prtad, devad; + + if (mdio_phy_id_is_c45(phy_id)) { + prtad = mdio_phy_id_prtad(phy_id); + devad = mdio_phy_id_devad(phy_id); + devad = MII_ADDR_C45 | devad << 16 | reg; + } else if (phydev->is_c45) { + switch (reg) { + case MII_BMCR: + case MII_BMSR: + case MII_PHYSID1: + case MII_PHYSID2: + devad = __ffs(phydev->c45_ids.devices_in_package); + break; + case MII_ADVERTISE: + case MII_LPA: + if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + return -EINVAL; + devad = MDIO_MMD_AN; + if (reg == MII_ADVERTISE) + reg = MDIO_AN_ADVERTISE; + else + reg = MDIO_AN_LPA; + break; + default: + return -EINVAL; + } + prtad = phy_id; + devad = MII_ADDR_C45 | devad << 16 | reg; + } else { + prtad = phy_id; + devad = reg; + } + return mdiobus_read(pl->phydev->mdio.bus, prtad, devad); +} + +static int phylink_phy_write(struct phylink *pl, unsigned int phy_id, + unsigned int reg, unsigned int val) +{ + struct phy_device *phydev = pl->phydev; + int prtad, devad; + + if (mdio_phy_id_is_c45(phy_id)) { + prtad = mdio_phy_id_prtad(phy_id); + devad = mdio_phy_id_devad(phy_id); + devad = MII_ADDR_C45 | devad << 16 | reg; + } else if (phydev->is_c45) { + switch (reg) { + case MII_BMCR: + case MII_BMSR: + case MII_PHYSID1: + case MII_PHYSID2: + devad = __ffs(phydev->c45_ids.devices_in_package); + break; + case MII_ADVERTISE: + case MII_LPA: + if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN)) + return -EINVAL; + devad = MDIO_MMD_AN; + if (reg == MII_ADVERTISE) + reg = MDIO_AN_ADVERTISE; + else + reg = MDIO_AN_LPA; + break; + default: + return -EINVAL; + } + prtad = phy_id; + devad = MII_ADDR_C45 | devad << 16 | reg; + } else { + prtad = phy_id; + devad = reg; + } + + return mdiobus_write(phydev->mdio.bus, prtad, devad, val); +} + static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, unsigned int reg) { struct phylink_link_state state; int val = 0xffff; - /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ - if (pl->phydev) - return mdiobus_read(pl->phydev->mdio.bus, phy_id, reg); - switch (pl->link_an_mode) { case MLO_AN_FIXED: if (phy_id == 0) { @@ -1169,12 +1246,6 @@ static int phylink_mii_read(struct phylink *pl, unsigned int phy_id, static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, unsigned int reg, unsigned int val) { - /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ - if (pl->phydev) { - mdiobus_write(pl->phydev->mdio.bus, phy_id, reg, val); - return 0; - } - switch (pl->link_an_mode) { case MLO_AN_FIXED: break; @@ -1193,36 +1264,56 @@ static int phylink_mii_write(struct phylink *pl, unsigned int phy_id, int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd) { - struct mii_ioctl_data *mii_data = if_mii(ifr); - int val, ret; + struct mii_ioctl_data *mii = if_mii(ifr); + int ret; WARN_ON(!lockdep_rtnl_is_held()); - switch (cmd) { - case SIOCGMIIPHY: - mii_data->phy_id = pl->phydev ? pl->phydev->mdio.addr : 0; - /* fallthrough */ + if (pl->phydev) { + /* PHYs only exist for MLO_AN_PHY and MLO_AN_SGMII */ + switch (cmd) { + case SIOCGMIIPHY: + mii->phy_id = pl->phydev->mdio.addr; + + case SIOCGMIIREG: + ret = phylink_phy_read(pl, mii->phy_id, mii->reg_num); + if (ret >= 0) { + mii->val_out = ret; + ret = 0; + } + break; - case SIOCGMIIREG: - val = phylink_mii_read(pl, mii_data->phy_id, mii_data->reg_num); - if (val < 0) { - ret = val; - } else { - mii_data->val_out = val; - ret = 0; + case SIOCSMIIREG: + ret = phylink_phy_write(pl, mii->phy_id, mii->reg_num, + mii->val_in); + break; + + default: + ret = phy_mii_ioctl(pl->phydev, ifr, cmd); + break; } - break; + } else { + switch (cmd) { + case SIOCGMIIPHY: + mii->phy_id = 0; + + case SIOCGMIIREG: + ret = phylink_mii_read(pl, mii->phy_id, mii->reg_num); + if (ret >= 0) { + mii->val_out = ret; + ret = 0; + } + break; - case SIOCSMIIREG: - ret = phylink_mii_write(pl, mii_data->phy_id, mii_data->reg_num, - mii_data->val_in); - break; + case SIOCSMIIREG: + ret = phylink_mii_write(pl, mii->phy_id, mii->reg_num, + mii->val_in); + break; - default: - ret = -EOPNOTSUPP; - if (pl->phydev) - ret = phy_mii_ioctl(pl->phydev, ifr, cmd); - break; + default: + ret = -EOPNOTSUPP; + break; + } } return ret; -- cgit v1.2.3-55-g7522 From da7c1862f05842236490a8e65cc6f57d8160c05d Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:34 +0100 Subject: phylink: add in-band autonegotiation support for 10GBase-KR mode. Add in-band autonegotation support for 10GBase-KR mode. Signed-off-by: Russell King Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index dc0f4d7b7dd2..32917bdd1432 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -266,6 +266,23 @@ static int phylink_parse_mode(struct phylink *pl, struct device_node *np) pl->link_an_mode = MLO_AN_8023Z; break; + case PHY_INTERFACE_MODE_10GKR: + phylink_set(pl->supported, 10baseT_Half); + phylink_set(pl->supported, 10baseT_Full); + phylink_set(pl->supported, 100baseT_Half); + phylink_set(pl->supported, 100baseT_Full); + phylink_set(pl->supported, 1000baseT_Half); + phylink_set(pl->supported, 1000baseT_Full); + phylink_set(pl->supported, 1000baseX_Full); + phylink_set(pl->supported, 10000baseKR_Full); + phylink_set(pl->supported, 10000baseCR_Full); + phylink_set(pl->supported, 10000baseSR_Full); + phylink_set(pl->supported, 10000baseLR_Full); + phylink_set(pl->supported, 10000baseLRM_Full); + phylink_set(pl->supported, 10000baseER_Full); + pl->link_an_mode = MLO_AN_SGMII; + break; + default: netdev_err(pl->netdev, "incorrect link mode %s for in-band status\n", -- cgit v1.2.3-55-g7522 From 73970055450eebc6fc36fd170e56cc45889d0093 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 25 Jul 2017 15:03:39 +0100 Subject: sfp: add SFP module support Add support for SFP hotpluggable modules via sfp-bus and phylink. This supports both copper and optical SFP modules, which require different Serdes modes in order to properly negotiate the link. Optical SFP modules typically require the Serdes link to be talking 1000BaseX mode - this is the gigabit ethernet mode defined by the 802.3 standard. Copper SFP modules typically integrate a PHY in the module to convert from Serdes to copper, and the PHY will be configured by the vendor to either present a 1000BaseX Serdes link (for fixed 1000BaseT) or a SGMII Serdes link. However, this is vendor defined, so we instead detect the PHY, switch the link to SGMII mode, and use traditional PHY based negotiation. Signed-off-by: Russell King Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 5 + drivers/net/phy/Makefile | 1 + drivers/net/phy/sfp.c | 915 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 921 insertions(+) create mode 100644 drivers/net/phy/sfp.c diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index a0a9e03e2f80..bf73969a9d2b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -200,6 +200,11 @@ config LED_TRIGGER_PHY comment "MII PHY device drivers" +config SFP + tristate "SFP cage support" + depends on I2C && PHYLINK + select MDIO_I2C + config AMD_PHY tristate "AMD PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 4c16a10f420e..7237255bad68 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o +obj-$(CONFIG_SFP) += sfp.o sfp-obj-$(CONFIG_SFP) += sfp-bus.o obj-y += $(sfp-obj-y) $(sfp-obj-m) diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c new file mode 100644 index 000000000000..fb2cf4342f48 --- /dev/null +++ b/drivers/net/phy/sfp.c @@ -0,0 +1,915 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdio-i2c.h" +#include "sfp.h" +#include "swphy.h" + +enum { + GPIO_MODDEF0, + GPIO_LOS, + GPIO_TX_FAULT, + GPIO_TX_DISABLE, + GPIO_RATE_SELECT, + GPIO_MAX, + + SFP_F_PRESENT = BIT(GPIO_MODDEF0), + SFP_F_LOS = BIT(GPIO_LOS), + SFP_F_TX_FAULT = BIT(GPIO_TX_FAULT), + SFP_F_TX_DISABLE = BIT(GPIO_TX_DISABLE), + SFP_F_RATE_SELECT = BIT(GPIO_RATE_SELECT), + + SFP_E_INSERT = 0, + SFP_E_REMOVE, + SFP_E_DEV_DOWN, + SFP_E_DEV_UP, + SFP_E_TX_FAULT, + SFP_E_TX_CLEAR, + SFP_E_LOS_HIGH, + SFP_E_LOS_LOW, + SFP_E_TIMEOUT, + + SFP_MOD_EMPTY = 0, + SFP_MOD_PROBE, + SFP_MOD_PRESENT, + SFP_MOD_ERROR, + + SFP_DEV_DOWN = 0, + SFP_DEV_UP, + + SFP_S_DOWN = 0, + SFP_S_INIT, + SFP_S_WAIT_LOS, + SFP_S_LINK_UP, + SFP_S_TX_FAULT, + SFP_S_REINIT, + SFP_S_TX_DISABLE, +}; + +static const char *gpio_of_names[] = { + "moddef0", + "los", + "tx-fault", + "tx-disable", + "rate-select", +}; + +static const enum gpiod_flags gpio_flags[] = { + GPIOD_IN, + GPIOD_IN, + GPIOD_IN, + GPIOD_ASIS, + GPIOD_ASIS, +}; + +#define T_INIT_JIFFIES msecs_to_jiffies(300) +#define T_RESET_US 10 +#define T_FAULT_RECOVER msecs_to_jiffies(1000) + +/* SFP module presence detection is poor: the three MOD DEF signals are + * the same length on the PCB, which means it's possible for MOD DEF 0 to + * connect before the I2C bus on MOD DEF 1/2. + * + * The SFP MSA specifies 300ms as t_init (the time taken for TX_FAULT to + * be deasserted) but makes no mention of the earliest time before we can + * access the I2C EEPROM. However, Avago modules require 300ms. + */ +#define T_PROBE_INIT msecs_to_jiffies(300) +#define T_PROBE_RETRY msecs_to_jiffies(100) + +/* + * SFP modules appear to always have their PHY configured for bus address + * 0x56 (which with mdio-i2c, translates to a PHY address of 22). + */ +#define SFP_PHY_ADDR 22 + +/* + * Give this long for the PHY to reset. + */ +#define T_PHY_RESET_MS 50 + +static DEFINE_MUTEX(sfp_mutex); + +struct sfp { + struct device *dev; + struct i2c_adapter *i2c; + struct mii_bus *i2c_mii; + struct sfp_bus *sfp_bus; + struct phy_device *mod_phy; + + unsigned int (*get_state)(struct sfp *); + void (*set_state)(struct sfp *, unsigned int); + int (*read)(struct sfp *, bool, u8, void *, size_t); + + struct gpio_desc *gpio[GPIO_MAX]; + + unsigned int state; + struct delayed_work poll; + struct delayed_work timeout; + struct mutex sm_mutex; + unsigned char sm_mod_state; + unsigned char sm_dev_state; + unsigned short sm_state; + unsigned int sm_retries; + + struct sfp_eeprom_id id; +}; + +static unsigned long poll_jiffies; + +static unsigned int sfp_gpio_get_state(struct sfp *sfp) +{ + unsigned int i, state, v; + + for (i = state = 0; i < GPIO_MAX; i++) { + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) + continue; + + v = gpiod_get_value_cansleep(sfp->gpio[i]); + if (v) + state |= BIT(i); + } + + return state; +} + +static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state) +{ + if (state & SFP_F_PRESENT) { + /* If the module is present, drive the signals */ + if (sfp->gpio[GPIO_TX_DISABLE]) + gpiod_direction_output(sfp->gpio[GPIO_TX_DISABLE], + state & SFP_F_TX_DISABLE); + if (state & SFP_F_RATE_SELECT) + gpiod_direction_output(sfp->gpio[GPIO_RATE_SELECT], + state & SFP_F_RATE_SELECT); + } else { + /* Otherwise, let them float to the pull-ups */ + if (sfp->gpio[GPIO_TX_DISABLE]) + gpiod_direction_input(sfp->gpio[GPIO_TX_DISABLE]); + if (state & SFP_F_RATE_SELECT) + gpiod_direction_input(sfp->gpio[GPIO_RATE_SELECT]); + } +} + +static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr, + void *buf, size_t len) +{ + struct i2c_msg msgs[2]; + int ret; + + msgs[0].addr = bus_addr; + msgs[0].flags = 0; + msgs[0].len = 1; + msgs[0].buf = &dev_addr; + msgs[1].addr = bus_addr; + msgs[1].flags = I2C_M_RD; + msgs[1].len = len; + msgs[1].buf = buf; + + ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return ret; + + return ret == ARRAY_SIZE(msgs) ? len : 0; +} + +static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 addr, void *buf, + size_t len) +{ + return sfp__i2c_read(sfp->i2c, a2 ? 0x51 : 0x50, addr, buf, len); +} + +static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c) +{ + struct mii_bus *i2c_mii; + int ret; + + if (!i2c_check_functionality(i2c, I2C_FUNC_I2C)) + return -EINVAL; + + sfp->i2c = i2c; + sfp->read = sfp_i2c_read; + + i2c_mii = mdio_i2c_alloc(sfp->dev, i2c); + if (IS_ERR(i2c_mii)) + return PTR_ERR(i2c_mii); + + i2c_mii->name = "SFP I2C Bus"; + i2c_mii->phy_mask = ~0; + + ret = mdiobus_register(i2c_mii); + if (ret < 0) { + mdiobus_free(i2c_mii); + return ret; + } + + sfp->i2c_mii = i2c_mii; + + return 0; +} + + +/* Interface */ +static unsigned int sfp_get_state(struct sfp *sfp) +{ + return sfp->get_state(sfp); +} + +static void sfp_set_state(struct sfp *sfp, unsigned int state) +{ + sfp->set_state(sfp, state); +} + +static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len) +{ + return sfp->read(sfp, a2, addr, buf, len); +} + +static unsigned int sfp_check(void *buf, size_t len) +{ + u8 *p, check; + + for (p = buf, check = 0; len; p++, len--) + check += *p; + + return check; +} + +/* Helpers */ +static void sfp_module_tx_disable(struct sfp *sfp) +{ + dev_dbg(sfp->dev, "tx disable %u -> %u\n", + sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 1); + sfp->state |= SFP_F_TX_DISABLE; + sfp_set_state(sfp, sfp->state); +} + +static void sfp_module_tx_enable(struct sfp *sfp) +{ + dev_dbg(sfp->dev, "tx disable %u -> %u\n", + sfp->state & SFP_F_TX_DISABLE ? 1 : 0, 0); + sfp->state &= ~SFP_F_TX_DISABLE; + sfp_set_state(sfp, sfp->state); +} + +static void sfp_module_tx_fault_reset(struct sfp *sfp) +{ + unsigned int state = sfp->state; + + if (state & SFP_F_TX_DISABLE) + return; + + sfp_set_state(sfp, state | SFP_F_TX_DISABLE); + + udelay(T_RESET_US); + + sfp_set_state(sfp, state); +} + +/* SFP state machine */ +static void sfp_sm_set_timer(struct sfp *sfp, unsigned int timeout) +{ + if (timeout) + mod_delayed_work(system_power_efficient_wq, &sfp->timeout, + timeout); + else + cancel_delayed_work(&sfp->timeout); +} + +static void sfp_sm_next(struct sfp *sfp, unsigned int state, + unsigned int timeout) +{ + sfp->sm_state = state; + sfp_sm_set_timer(sfp, timeout); +} + +static void sfp_sm_ins_next(struct sfp *sfp, unsigned int state, unsigned int timeout) +{ + sfp->sm_mod_state = state; + sfp_sm_set_timer(sfp, timeout); +} + +static void sfp_sm_phy_detach(struct sfp *sfp) +{ + phy_stop(sfp->mod_phy); + sfp_remove_phy(sfp->sfp_bus); + phy_device_remove(sfp->mod_phy); + phy_device_free(sfp->mod_phy); + sfp->mod_phy = NULL; +} + +static void sfp_sm_probe_phy(struct sfp *sfp) +{ + struct phy_device *phy; + int err; + + msleep(T_PHY_RESET_MS); + + phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR); + if (IS_ERR(phy)) { + dev_err(sfp->dev, "mdiobus scan returned %ld\n", PTR_ERR(phy)); + return; + } + if (!phy) { + dev_info(sfp->dev, "no PHY detected\n"); + return; + } + + err = sfp_add_phy(sfp->sfp_bus, phy); + if (err) { + phy_device_remove(phy); + phy_device_free(phy); + dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err); + return; + } + + sfp->mod_phy = phy; + phy_start(phy); +} + +static void sfp_sm_link_up(struct sfp *sfp) +{ + sfp_link_up(sfp->sfp_bus); + sfp_sm_next(sfp, SFP_S_LINK_UP, 0); +} + +static void sfp_sm_link_down(struct sfp *sfp) +{ + sfp_link_down(sfp->sfp_bus); +} + +static void sfp_sm_link_check_los(struct sfp *sfp) +{ + unsigned int los = sfp->state & SFP_F_LOS; + + /* FIXME: what if neither SFP_OPTIONS_LOS_INVERTED nor + * SFP_OPTIONS_LOS_NORMAL are set? For now, we assume + * the same as SFP_OPTIONS_LOS_NORMAL set. + */ + if (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED) + los ^= SFP_F_LOS; + + if (los) + sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); + else + sfp_sm_link_up(sfp); +} + +static void sfp_sm_fault(struct sfp *sfp, bool warn) +{ + if (sfp->sm_retries && !--sfp->sm_retries) { + dev_err(sfp->dev, "module persistently indicates fault, disabling\n"); + sfp_sm_next(sfp, SFP_S_TX_DISABLE, 0); + } else { + if (warn) + dev_err(sfp->dev, "module transmit fault indicated\n"); + + sfp_sm_next(sfp, SFP_S_TX_FAULT, T_FAULT_RECOVER); + } +} + +static void sfp_sm_mod_init(struct sfp *sfp) +{ + sfp_module_tx_enable(sfp); + + /* Wait t_init before indicating that the link is up, provided the + * current state indicates no TX_FAULT. If TX_FAULT clears before + * this time, that's fine too. + */ + sfp_sm_next(sfp, SFP_S_INIT, T_INIT_JIFFIES); + sfp->sm_retries = 5; + + /* Setting the serdes link mode is guesswork: there's no + * field in the EEPROM which indicates what mode should + * be used. + * + * If it's a gigabit-only fiber module, it probably does + * not have a PHY, so switch to 802.3z negotiation mode. + * Otherwise, switch to SGMII mode (which is required to + * support non-gigabit speeds) and probe for a PHY. + */ + if (sfp->id.base.e1000_base_t || + sfp->id.base.e100_base_lx || + sfp->id.base.e100_base_fx) + sfp_sm_probe_phy(sfp); +} + +static int sfp_sm_mod_probe(struct sfp *sfp) +{ + /* SFP module inserted - read I2C data */ + struct sfp_eeprom_id id; + char vendor[17]; + char part[17]; + char sn[17]; + char date[9]; + char rev[5]; + u8 check; + int err; + + err = sfp_read(sfp, false, 0, &id, sizeof(id)); + if (err < 0) { + dev_err(sfp->dev, "failed to read EEPROM: %d\n", err); + return -EAGAIN; + } + + if (err != sizeof(id)) { + dev_err(sfp->dev, "EEPROM short read: %d\n", err); + return -EAGAIN; + } + + /* Validate the checksum over the base structure */ + check = sfp_check(&id.base, sizeof(id.base) - 1); + if (check != id.base.cc_base) { + dev_err(sfp->dev, + "EEPROM base structure checksum failure: 0x%02x\n", + check); + print_hex_dump(KERN_ERR, "sfp EE: ", DUMP_PREFIX_OFFSET, + 16, 1, &id, sizeof(id.base) - 1, true); + return -EINVAL; + } + + check = sfp_check(&id.ext, sizeof(id.ext) - 1); + if (check != id.ext.cc_ext) { + dev_err(sfp->dev, + "EEPROM extended structure checksum failure: 0x%02x\n", + check); + memset(&id.ext, 0, sizeof(id.ext)); + } + + sfp->id = id; + + memcpy(vendor, sfp->id.base.vendor_name, 16); + vendor[16] = '\0'; + memcpy(part, sfp->id.base.vendor_pn, 16); + part[16] = '\0'; + memcpy(rev, sfp->id.base.vendor_rev, 4); + rev[4] = '\0'; + memcpy(sn, sfp->id.ext.vendor_sn, 16); + sn[16] = '\0'; + memcpy(date, sfp->id.ext.datecode, 8); + date[8] = '\0'; + + dev_info(sfp->dev, "module %s %s rev %s sn %s dc %s\n", vendor, part, rev, sn, date); + + /* We only support SFP modules, not the legacy GBIC modules. */ + if (sfp->id.base.phys_id != SFP_PHYS_ID_SFP || + sfp->id.base.phys_ext_id != SFP_PHYS_EXT_ID_SFP) { + dev_err(sfp->dev, "module is not SFP - phys id 0x%02x 0x%02x\n", + sfp->id.base.phys_id, sfp->id.base.phys_ext_id); + return -EINVAL; + } + + return sfp_module_insert(sfp->sfp_bus, &sfp->id); +} + +static void sfp_sm_mod_remove(struct sfp *sfp) +{ + sfp_module_remove(sfp->sfp_bus); + + if (sfp->mod_phy) + sfp_sm_phy_detach(sfp); + + sfp_module_tx_disable(sfp); + + memset(&sfp->id, 0, sizeof(sfp->id)); + + dev_info(sfp->dev, "module removed\n"); +} + +static void sfp_sm_event(struct sfp *sfp, unsigned int event) +{ + mutex_lock(&sfp->sm_mutex); + + dev_dbg(sfp->dev, "SM: enter %u:%u:%u event %u\n", + sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state, event); + + /* This state machine tracks the insert/remove state of + * the module, and handles probing the on-board EEPROM. + */ + switch (sfp->sm_mod_state) { + default: + if (event == SFP_E_INSERT) { + sfp_module_tx_disable(sfp); + sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); + } + break; + + case SFP_MOD_PROBE: + if (event == SFP_E_REMOVE) { + sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0); + } else if (event == SFP_E_TIMEOUT) { + int err = sfp_sm_mod_probe(sfp); + + if (err == 0) + sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0); + else if (err == -EAGAIN) + sfp_sm_set_timer(sfp, T_PROBE_RETRY); + else + sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0); + } + break; + + case SFP_MOD_PRESENT: + case SFP_MOD_ERROR: + if (event == SFP_E_REMOVE) { + sfp_sm_mod_remove(sfp); + sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0); + } + break; + } + + /* This state machine tracks the netdev up/down state */ + switch (sfp->sm_dev_state) { + default: + if (event == SFP_E_DEV_UP) + sfp->sm_dev_state = SFP_DEV_UP; + break; + + case SFP_DEV_UP: + if (event == SFP_E_DEV_DOWN) { + /* If the module has a PHY, avoid raising TX disable + * as this resets the PHY. Otherwise, raise it to + * turn the laser off. + */ + if (!sfp->mod_phy) + sfp_module_tx_disable(sfp); + sfp->sm_dev_state = SFP_DEV_DOWN; + } + break; + } + + /* Some events are global */ + if (sfp->sm_state != SFP_S_DOWN && + (sfp->sm_mod_state != SFP_MOD_PRESENT || + sfp->sm_dev_state != SFP_DEV_UP)) { + if (sfp->sm_state == SFP_S_LINK_UP && + sfp->sm_dev_state == SFP_DEV_UP) + sfp_sm_link_down(sfp); + if (sfp->mod_phy) + sfp_sm_phy_detach(sfp); + sfp_sm_next(sfp, SFP_S_DOWN, 0); + mutex_unlock(&sfp->sm_mutex); + return; + } + + /* The main state machine */ + switch (sfp->sm_state) { + case SFP_S_DOWN: + if (sfp->sm_mod_state == SFP_MOD_PRESENT && + sfp->sm_dev_state == SFP_DEV_UP) + sfp_sm_mod_init(sfp); + break; + + case SFP_S_INIT: + if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) + sfp_sm_fault(sfp, true); + else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) + sfp_sm_link_check_los(sfp); + break; + + case SFP_S_WAIT_LOS: + if (event == SFP_E_TX_FAULT) + sfp_sm_fault(sfp, true); + else if (event == + (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + SFP_E_LOS_HIGH : SFP_E_LOS_LOW)) + sfp_sm_link_up(sfp); + break; + + case SFP_S_LINK_UP: + if (event == SFP_E_TX_FAULT) { + sfp_sm_link_down(sfp); + sfp_sm_fault(sfp, true); + } else if (event == + (sfp->id.ext.options & SFP_OPTIONS_LOS_INVERTED ? + SFP_E_LOS_LOW : SFP_E_LOS_HIGH)) { + sfp_sm_link_down(sfp); + sfp_sm_next(sfp, SFP_S_WAIT_LOS, 0); + } + break; + + case SFP_S_TX_FAULT: + if (event == SFP_E_TIMEOUT) { + sfp_module_tx_fault_reset(sfp); + sfp_sm_next(sfp, SFP_S_REINIT, T_INIT_JIFFIES); + } + break; + + case SFP_S_REINIT: + if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) { + sfp_sm_fault(sfp, false); + } else if (event == SFP_E_TIMEOUT || event == SFP_E_TX_CLEAR) { + dev_info(sfp->dev, "module transmit fault recovered\n"); + sfp_sm_link_check_los(sfp); + } + break; + + case SFP_S_TX_DISABLE: + break; + } + + dev_dbg(sfp->dev, "SM: exit %u:%u:%u\n", + sfp->sm_mod_state, sfp->sm_dev_state, sfp->sm_state); + + mutex_unlock(&sfp->sm_mutex); +} + +static void sfp_start(struct sfp *sfp) +{ + sfp_sm_event(sfp, SFP_E_DEV_UP); +} + +static void sfp_stop(struct sfp *sfp) +{ + sfp_sm_event(sfp, SFP_E_DEV_DOWN); +} + +static int sfp_module_info(struct sfp *sfp, struct ethtool_modinfo *modinfo) +{ + /* locking... and check module is present */ + + if (sfp->id.ext.sff8472_compliance) { + modinfo->type = ETH_MODULE_SFF_8472; + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; + } else { + modinfo->type = ETH_MODULE_SFF_8079; + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; + } + return 0; +} + +static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, + u8 *data) +{ + unsigned int first, last, len; + int ret; + + if (ee->len == 0) + return -EINVAL; + + first = ee->offset; + last = ee->offset + ee->len; + if (first < ETH_MODULE_SFF_8079_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8079_LEN); + len -= first; + + ret = sfp->read(sfp, false, first, data, len); + if (ret < 0) + return ret; + + first += len; + data += len; + } + if (first >= ETH_MODULE_SFF_8079_LEN && + first < ETH_MODULE_SFF_8472_LEN) { + len = min_t(unsigned int, last, ETH_MODULE_SFF_8472_LEN); + len -= first; + first -= ETH_MODULE_SFF_8079_LEN; + + ret = sfp->read(sfp, true, first, data, len); + if (ret < 0) + return ret; + } + return 0; +} + +static const struct sfp_socket_ops sfp_module_ops = { + .start = sfp_start, + .stop = sfp_stop, + .module_info = sfp_module_info, + .module_eeprom = sfp_module_eeprom, +}; + +static void sfp_timeout(struct work_struct *work) +{ + struct sfp *sfp = container_of(work, struct sfp, timeout.work); + + rtnl_lock(); + sfp_sm_event(sfp, SFP_E_TIMEOUT); + rtnl_unlock(); +} + +static void sfp_check_state(struct sfp *sfp) +{ + unsigned int state, i, changed; + + state = sfp_get_state(sfp); + changed = state ^ sfp->state; + changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT; + + for (i = 0; i < GPIO_MAX; i++) + if (changed & BIT(i)) + dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_of_names[i], + !!(sfp->state & BIT(i)), !!(state & BIT(i))); + + state |= sfp->state & (SFP_F_TX_DISABLE | SFP_F_RATE_SELECT); + sfp->state = state; + + rtnl_lock(); + if (changed & SFP_F_PRESENT) + sfp_sm_event(sfp, state & SFP_F_PRESENT ? + SFP_E_INSERT : SFP_E_REMOVE); + + if (changed & SFP_F_TX_FAULT) + sfp_sm_event(sfp, state & SFP_F_TX_FAULT ? + SFP_E_TX_FAULT : SFP_E_TX_CLEAR); + + if (changed & SFP_F_LOS) + sfp_sm_event(sfp, state & SFP_F_LOS ? + SFP_E_LOS_HIGH : SFP_E_LOS_LOW); + rtnl_unlock(); +} + +static irqreturn_t sfp_irq(int irq, void *data) +{ + struct sfp *sfp = data; + + sfp_check_state(sfp); + + return IRQ_HANDLED; +} + +static void sfp_poll(struct work_struct *work) +{ + struct sfp *sfp = container_of(work, struct sfp, poll.work); + + sfp_check_state(sfp); + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); +} + +static struct sfp *sfp_alloc(struct device *dev) +{ + struct sfp *sfp; + + sfp = kzalloc(sizeof(*sfp), GFP_KERNEL); + if (!sfp) + return ERR_PTR(-ENOMEM); + + sfp->dev = dev; + + mutex_init(&sfp->sm_mutex); + INIT_DELAYED_WORK(&sfp->poll, sfp_poll); + INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout); + + return sfp; +} + +static void sfp_cleanup(void *data) +{ + struct sfp *sfp = data; + + cancel_delayed_work_sync(&sfp->poll); + cancel_delayed_work_sync(&sfp->timeout); + if (sfp->i2c_mii) { + mdiobus_unregister(sfp->i2c_mii); + mdiobus_free(sfp->i2c_mii); + } + if (sfp->i2c) + i2c_put_adapter(sfp->i2c); + kfree(sfp); +} + +static int sfp_probe(struct platform_device *pdev) +{ + struct sfp *sfp; + bool poll = false; + int irq, err, i; + + sfp = sfp_alloc(&pdev->dev); + if (IS_ERR(sfp)) + return PTR_ERR(sfp); + + platform_set_drvdata(pdev, sfp); + + err = devm_add_action(sfp->dev, sfp_cleanup, sfp); + if (err < 0) + return err; + + if (pdev->dev.of_node) { + struct device_node *node = pdev->dev.of_node; + struct device_node *np; + + np = of_parse_phandle(node, "i2c-bus", 0); + if (np) { + struct i2c_adapter *i2c; + + i2c = of_find_i2c_adapter_by_node(np); + of_node_put(np); + if (!i2c) + return -EPROBE_DEFER; + + err = sfp_i2c_configure(sfp, i2c); + if (err < 0) { + i2c_put_adapter(i2c); + return err; + } + } + + for (i = 0; i < GPIO_MAX; i++) { + sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev, + gpio_of_names[i], gpio_flags[i]); + if (IS_ERR(sfp->gpio[i])) + return PTR_ERR(sfp->gpio[i]); + } + + sfp->get_state = sfp_gpio_get_state; + sfp->set_state = sfp_gpio_set_state; + } + + sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); + if (!sfp->sfp_bus) + return -ENOMEM; + + /* Get the initial state, and always signal TX disable, + * since the network interface will not be up. + */ + sfp->state = sfp_get_state(sfp) | SFP_F_TX_DISABLE; + + if (sfp->gpio[GPIO_RATE_SELECT] && + gpiod_get_value_cansleep(sfp->gpio[GPIO_RATE_SELECT])) + sfp->state |= SFP_F_RATE_SELECT; + sfp_set_state(sfp, sfp->state); + sfp_module_tx_disable(sfp); + rtnl_lock(); + if (sfp->state & SFP_F_PRESENT) + sfp_sm_event(sfp, SFP_E_INSERT); + rtnl_unlock(); + + for (i = 0; i < GPIO_MAX; i++) { + if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) + continue; + + irq = gpiod_to_irq(sfp->gpio[i]); + if (!irq) { + poll = true; + continue; + } + + err = devm_request_threaded_irq(sfp->dev, irq, NULL, sfp_irq, + IRQF_ONESHOT | + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + dev_name(sfp->dev), sfp); + if (err) + poll = true; + } + + if (poll) + mod_delayed_work(system_wq, &sfp->poll, poll_jiffies); + + return 0; +} + +static int sfp_remove(struct platform_device *pdev) +{ + struct sfp *sfp = platform_get_drvdata(pdev); + + sfp_unregister_socket(sfp->sfp_bus); + + return 0; +} + +static const struct of_device_id sfp_of_match[] = { + { .compatible = "sff,sfp", }, + { }, +}; +MODULE_DEVICE_TABLE(of, sfp_of_match); + +static struct platform_driver sfp_driver = { + .probe = sfp_probe, + .remove = sfp_remove, + .driver = { + .name = "sfp", + .of_match_table = sfp_of_match, + }, +}; + +static int sfp_init(void) +{ + poll_jiffies = msecs_to_jiffies(100); + + return platform_driver_register(&sfp_driver); +} +module_init(sfp_init); + +static void sfp_exit(void) +{ + platform_driver_unregister(&sfp_driver); +} +module_exit(sfp_exit); + +MODULE_ALIAS("platform:sfp"); +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-55-g7522 From 9060e6bae61a253f83a39145419f23fc67b401cf Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Thu, 3 Aug 2017 13:08:24 -0700 Subject: liquidio: add missing strings in oct_dev_state_str array There's supposed to be a one-to-one correspondence between the 18 macros that #define the OCT_DEV states (in octeon_device.h) and the strings in the oct_dev_state_str array, but there are only 14 strings in the array. Add the missing strings (so they become 18 in total), and also revise some incorrect/outdated text of existing strings. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_device.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index f10014f7ae88..495cc8880646 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -528,9 +528,10 @@ static struct octeon_config_ptr { }; static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { - "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", + "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", - "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", + "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE", + "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", "INVALID" }; -- cgit v1.2.3-55-g7522 From 2470f3a2946083fc7b0b9fcbdc6aaee9646733e3 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Thu, 3 Aug 2017 15:10:17 -0700 Subject: liquidio: moved console_bitmask module param to lio_main.c Moving PF module param console_bitmask to lio_main.c for consistency. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 15 +++++++++++++++ drivers/net/ethernet/cavium/liquidio/octeon_console.c | 14 -------------- drivers/net/ethernet/cavium/liquidio/octeon_device.h | 2 ++ 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 39a8dca35ffa..8c2cd8011bae 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -59,6 +59,21 @@ static char fw_type[LIO_MAX_FW_TYPE_LEN]; module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); +static u32 console_bitmask; +module_param(console_bitmask, int, 0644); +MODULE_PARM_DESC(console_bitmask, + "Bitmask indicating which consoles have debug output redirected to syslog."); + +/** + * \brief determines if a given console has debug enabled. + * @param console console to check + * @returns 1 = enabled. 0 otherwise + */ +int octeon_console_debug_enabled(u32 console) +{ + return (console_bitmask >> (console)) & 0x1; +} + static int ptp_enable = 1; /* Polling interval for determining when NIC application is alive */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index 15ad1ab2c0c7..dd0efc9b4286 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -37,10 +37,6 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, u32 flags); static int octeon_console_read(struct octeon_device *oct, u32 console_num, char *buffer, u32 buf_size); -static u32 console_bitmask; -module_param(console_bitmask, int, 0644); -MODULE_PARM_DESC(console_bitmask, - "Bitmask indicating which consoles have debug output redirected to syslog."); #define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008 #define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004 @@ -135,16 +131,6 @@ struct octeon_pci_console_desc { /* Implicit storage for console_addr_array */ }; -/** - * \brief determines if a given console has debug enabled. - * @param console console to check - * @returns 1 = enabled. 0 otherwise - */ -static int octeon_console_debug_enabled(u32 console) -{ - return (console_bitmask >> (console)) & 0x1; -} - /** * This function is the implementation of the get macros defined * for individual structure members. The argument are generated diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index ad464788c923..31efdef02a24 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -739,6 +739,8 @@ int octeon_wait_for_bootloader(struct octeon_device *oct, */ int octeon_init_consoles(struct octeon_device *oct); +int octeon_console_debug_enabled(u32 console); + /** * Adds access to a console to the device. * -- cgit v1.2.3-55-g7522 From 10377ba7673d19e28d92846955d51f49acd25c35 Mon Sep 17 00:00:00 2001 From: kiki good Date: Fri, 4 Aug 2017 00:07:45 +0100 Subject: net: systemport: Support 64bit statistics When using Broadcom Systemport device in 32bit Platform, ifconfig can only report up to 4G tx,rx status, which will be wrapped to 0 when the number of incoming or outgoing packets exceeds 4G, only taking around 2 hours in busy network environment (such as streaming). Therefore, it makes hard for network diagnostic tool to get reliable statistical result, so the patch is used to add 64bit support for Broadcom Systemport device in 32bit Platform. This patch provides 64bit statistics capability on both ethtool and ifconfig. Signed-off-by: Jianming.qiao Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bcmsysport.c | 78 +++++++++++++++++++++++------- drivers/net/ethernet/broadcom/bcmsysport.h | 21 ++++++++ 2 files changed, 82 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5333601f855f..bf9ca3c79d1a 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -201,10 +201,10 @@ static int bcm_sysport_set_features(struct net_device *dev, */ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* general stats */ - STAT_NETDEV(rx_packets), - STAT_NETDEV(tx_packets), - STAT_NETDEV(rx_bytes), - STAT_NETDEV(tx_bytes), + STAT_NETDEV64(rx_packets), + STAT_NETDEV64(tx_packets), + STAT_NETDEV64(rx_bytes), + STAT_NETDEV64(tx_bytes), STAT_NETDEV(rx_errors), STAT_NETDEV(tx_errors), STAT_NETDEV(rx_dropped), @@ -316,6 +316,7 @@ static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type) { switch (type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_RXCHK: case BCM_SYSPORT_STAT_RBUF: case BCM_SYSPORT_STAT_SOFT: @@ -398,6 +399,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) s = &bcm_sysport_gstrings_stats[i]; switch (s->type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_NETDEV64: case BCM_SYSPORT_STAT_SOFT: continue; case BCM_SYSPORT_STAT_MIB_RX: @@ -434,7 +436,10 @@ static void bcm_sysport_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct bcm_sysport_priv *priv = netdev_priv(dev); + struct bcm_sysport_stats64 *stats64 = &priv->stats64; + struct u64_stats_sync *syncp = &priv->syncp; struct bcm_sysport_tx_ring *ring; + unsigned int start; int i, j; if (netif_running(dev)) @@ -447,10 +452,20 @@ static void bcm_sysport_get_stats(struct net_device *dev, s = &bcm_sysport_gstrings_stats[i]; if (s->type == BCM_SYSPORT_STAT_NETDEV) p = (char *)&dev->stats; + else if (s->type == BCM_SYSPORT_STAT_NETDEV64) + p = (char *)stats64; else p = (char *)priv; + p += s->stat_offset; - data[j] = *(unsigned long *)p; + + if (s->stat_sizeof == sizeof(u64)) + do { + start = u64_stats_fetch_begin_irq(syncp); + data[i] = *(u64 *)p; + } while (u64_stats_fetch_retry_irq(syncp, start)); + else + data[i] = *(u32 *)p; j++; } @@ -662,6 +677,7 @@ static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv) static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, unsigned int budget) { + struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct net_device *ndev = priv->netdev; unsigned int processed = 0, to_process; struct bcm_sysport_cb *cb; @@ -765,6 +781,10 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv, skb->protocol = eth_type_trans(skb, ndev); ndev->stats.rx_packets++; ndev->stats.rx_bytes += len; + u64_stats_update_begin(&priv->syncp); + stats64->rx_packets++; + stats64->rx_bytes += len; + u64_stats_update_end(&priv->syncp); napi_gro_receive(&priv->napi, skb); next: @@ -787,17 +807,15 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, struct device *kdev = &priv->pdev->dev; if (cb->skb) { - ring->bytes += cb->skb->len; *bytes_compl += cb->skb->len; dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); - ring->packets++; (*pkts_compl)++; bcm_sysport_free_cb(cb); /* SKB fragment */ } else if (dma_unmap_addr(cb, dma_addr)) { - ring->bytes += dma_unmap_len(cb, dma_len); + *bytes_compl += dma_unmap_len(cb, dma_len); dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr), dma_unmap_len(cb, dma_len), DMA_TO_DEVICE); dma_unmap_addr_set(cb, dma_addr, 0); @@ -808,9 +826,9 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring, static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { - struct net_device *ndev = priv->netdev; unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; + struct net_device *ndev = priv->netdev; struct bcm_sysport_cb *cb; u32 hw_ind; @@ -849,6 +867,11 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, last_c_index &= (num_tx_cbs - 1); } + u64_stats_update_begin(&priv->syncp); + ring->packets += pkts_compl; + ring->bytes += bytes_compl; + u64_stats_update_end(&priv->syncp); + ring->c_index = c_index; netif_dbg(priv, tx_done, ndev, @@ -1671,22 +1694,41 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p) return 0; } -static struct net_device_stats *bcm_sysport_get_nstats(struct net_device *dev) +static void bcm_sysport_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats) { struct bcm_sysport_priv *priv = netdev_priv(dev); - unsigned long tx_bytes = 0, tx_packets = 0; + struct bcm_sysport_stats64 *stats64 = &priv->stats64; struct bcm_sysport_tx_ring *ring; + u64 tx_packets = 0, tx_bytes = 0; + unsigned int start; unsigned int q; + netdev_stats_to_stats64(stats, &dev->stats); + for (q = 0; q < dev->num_tx_queues; q++) { ring = &priv->tx_rings[q]; - tx_bytes += ring->bytes; - tx_packets += ring->packets; + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + tx_bytes = ring->bytes; + tx_packets = ring->packets; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); + + stats->tx_bytes += tx_bytes; + stats->tx_packets += tx_packets; } - dev->stats.tx_bytes = tx_bytes; - dev->stats.tx_packets = tx_packets; - return &dev->stats; + /* lockless update tx_bytes and tx_packets */ + u64_stats_update_begin(&priv->syncp); + stats64->tx_bytes = stats->tx_bytes; + stats64->tx_packets = stats->tx_packets; + u64_stats_update_end(&priv->syncp); + + do { + start = u64_stats_fetch_begin_irq(&priv->syncp); + stats->rx_packets = stats64->rx_packets; + stats->rx_bytes = stats64->rx_bytes; + } while (u64_stats_fetch_retry_irq(&priv->syncp, start)); } static void bcm_sysport_netif_start(struct net_device *dev) @@ -1950,7 +1992,7 @@ static const struct net_device_ops bcm_sysport_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcm_sysport_poll_controller, #endif - .ndo_get_stats = bcm_sysport_get_nstats, + .ndo_get_stats64 = bcm_sysport_get_stats64, }; #define REV_FMT "v%2x.%02x" @@ -2098,6 +2140,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) /* libphy will adjust the link state accordingly */ netif_carrier_off(dev); + u64_stats_init(&priv->syncp); + ret = register_netdev(dev); if (ret) { dev_err(&pdev->dev, "failed to register net_device\n"); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 77a51c167a69..80b4ffff63b7 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -603,6 +603,7 @@ struct bcm_sysport_mib { /* HW maintains a large list of counters */ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_NETDEV = -1, + BCM_SYSPORT_STAT_NETDEV64, BCM_SYSPORT_STAT_MIB_RX, BCM_SYSPORT_STAT_MIB_TX, BCM_SYSPORT_STAT_RUNT, @@ -619,6 +620,13 @@ enum bcm_sysport_stat_type { .type = BCM_SYSPORT_STAT_NETDEV, \ } +#define STAT_NETDEV64(m) { \ + .stat_string = __stringify(m), \ + .stat_sizeof = sizeof(((struct bcm_sysport_stats64 *)0)->m), \ + .stat_offset = offsetof(struct bcm_sysport_stats64, m), \ + .type = BCM_SYSPORT_STAT_NETDEV64, \ +} + #define STAT_MIB(str, m, _type) { \ .stat_string = str, \ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \ @@ -659,6 +667,14 @@ struct bcm_sysport_stats { u16 reg_offset; }; +struct bcm_sysport_stats64 { + /* 64bit stats on 32bit/64bit Machine */ + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; +}; + /* Software house keeping helper structure */ struct bcm_sysport_cb { struct sk_buff *skb; /* SKB for RX packets */ @@ -743,5 +759,10 @@ struct bcm_sysport_priv { /* Ethtool */ u32 msg_enable; + + struct bcm_sysport_stats64 stats64; + + /* For atomic update generic 64bit value on 32bit Machine */ + struct u64_stats_sync syncp; }; #endif /* __BCM_SYSPORT_H */ -- cgit v1.2.3-55-g7522 From 4faf783998b8cb88294e9df89032f473f8771b78 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 3 Aug 2017 20:38:51 -0700 Subject: tcp: fix cwnd undo in Reno and HTCP congestion controls Using ssthresh to revert cwnd is less reliable when ssthresh is bounded to 2 packets. This patch uses an existing variable in TCP "prior_cwnd" that snapshots the cwnd right before entering fast recovery and RTO recovery in Reno. This fixes the issue discussed in netdev thread: "A buggy behavior for Linux TCP Reno and HTCP" https://www.spinics.net/lists/netdev/msg444955.html Suggested-by: Neal Cardwell Reported-by: Wei Sun Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: David S. Miller --- include/linux/tcp.h | 2 +- net/ipv4/tcp_cong.c | 2 +- net/ipv4/tcp_htcp.c | 3 +-- net/ipv4/tcp_input.c | 1 + 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index d7389ea36e10..267164a1d559 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -258,7 +258,7 @@ struct tcp_sock { u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ u32 snd_cwnd_used; u32 snd_cwnd_stamp; - u32 prior_cwnd; /* Congestion window at start of Recovery. */ + u32 prior_cwnd; /* cwnd right before starting loss recovery */ u32 prr_delivered; /* Number of newly delivered packets to * receiver in Recovery. */ u32 prr_out; /* Total number of pkts sent during Recovery. */ diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index fde983f6376b..c2b174469645 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -456,7 +456,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); - return max(tp->snd_cwnd, tp->snd_ssthresh << 1); + return max(tp->snd_cwnd, tp->prior_cwnd); } EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 3eb78cde6ff0..082d479462fa 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c @@ -66,7 +66,6 @@ static inline void htcp_reset(struct htcp *ca) static u32 htcp_cwnd_undo(struct sock *sk) { - const struct tcp_sock *tp = tcp_sk(sk); struct htcp *ca = inet_csk_ca(sk); if (ca->undo_last_cong) { @@ -76,7 +75,7 @@ static u32 htcp_cwnd_undo(struct sock *sk) ca->undo_last_cong = 0; } - return max(tp->snd_cwnd, (tp->snd_ssthresh << 7) / ca->beta); + return tcp_reno_undo_cwnd(sk); } static inline void measure_rtt(struct sock *sk, u32 srtt) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 99cdf4ccabb8..842ed75ccb25 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1950,6 +1950,7 @@ void tcp_enter_loss(struct sock *sk) !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); + tp->prior_cwnd = tp->snd_cwnd; tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); tcp_init_undo(tp); -- cgit v1.2.3-55-g7522 From f1722a1be19dc38e0a4b282d4e6e6ec5e1b11a67 Mon Sep 17 00:00:00 2001 From: Yuchung Cheng Date: Thu, 3 Aug 2017 20:38:52 -0700 Subject: tcp: consolidate congestion control undo functions Most TCP congestion controls are using identical logic to undo cwnd except BBR. This patch consolidates these similar functions to the one used currently by Reno and others. Suggested-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Neal Cardwell Signed-off-by: David S. Miller --- net/ipv4/tcp_bic.c | 14 +------------- net/ipv4/tcp_cdg.c | 12 +----------- net/ipv4/tcp_cubic.c | 13 +------------ net/ipv4/tcp_highspeed.c | 11 +---------- net/ipv4/tcp_illinois.c | 11 +---------- net/ipv4/tcp_nv.c | 13 +------------ net/ipv4/tcp_scalable.c | 16 +--------------- net/ipv4/tcp_veno.c | 11 +---------- net/ipv4/tcp_yeah.c | 11 +---------- 9 files changed, 9 insertions(+), 103 deletions(-) diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 609965f0e298..fc3614377413 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -49,7 +49,6 @@ MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wma struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ u32 last_max_cwnd; /* last maximum snd_cwnd */ - u32 loss_cwnd; /* congestion window at last loss */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ u32 epoch_start; /* beginning of an epoch */ @@ -72,7 +71,6 @@ static void bictcp_init(struct sock *sk) struct bictcp *ca = inet_csk_ca(sk); bictcp_reset(ca); - ca->loss_cwnd = 0; if (initial_ssthresh) tcp_sk(sk)->snd_ssthresh = initial_ssthresh; @@ -172,22 +170,12 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk) else ca->last_max_cwnd = tp->snd_cwnd; - ca->loss_cwnd = tp->snd_cwnd; - if (tp->snd_cwnd <= low_window) return max(tp->snd_cwnd >> 1U, 2U); else return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); } -static u32 bictcp_undo_cwnd(struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - const struct bictcp *ca = inet_csk_ca(sk); - - return max(tp->snd_cwnd, ca->loss_cwnd); -} - static void bictcp_state(struct sock *sk, u8 new_state) { if (new_state == TCP_CA_Loss) @@ -214,7 +202,7 @@ static struct tcp_congestion_ops bictcp __read_mostly = { .ssthresh = bictcp_recalc_ssthresh, .cong_avoid = bictcp_cong_avoid, .set_state = bictcp_state, - .undo_cwnd = bictcp_undo_cwnd, + .undo_cwnd = tcp_reno_undo_cwnd, .pkts_acked = bictcp_acked, .owner = THIS_MODULE, .name = "bic", diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c index 50a0f3e51d5b..66ac69f7bd19 100644 --- a/net/ipv4/tcp_cdg.c +++ b/net/ipv4/tcp_cdg.c @@ -85,7 +85,6 @@ struct cdg { u8 state; u8 delack; u32 rtt_seq; - u32 undo_cwnd; u32 shadow_wnd; u16 backoff_cnt; u16 sample_cnt; @@ -330,8 +329,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk) struct cdg *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); - ca->undo_cwnd = tp->snd_cwnd; - if (ca->state == CDG_BACKOFF) return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10); @@ -344,13 +341,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk) return max(2U, tp->snd_cwnd >> 1); } -static u32 tcp_cdg_undo_cwnd(struct sock *sk) -{ - struct cdg *ca = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, ca->undo_cwnd); -} - static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev) { struct cdg *ca = inet_csk_ca(sk); @@ -403,7 +393,7 @@ struct tcp_congestion_ops tcp_cdg __read_mostly = { .cong_avoid = tcp_cdg_cong_avoid, .cwnd_event = tcp_cdg_cwnd_event, .pkts_acked = tcp_cdg_acked, - .undo_cwnd = tcp_cdg_undo_cwnd, + .undo_cwnd = tcp_reno_undo_cwnd, .ssthresh = tcp_cdg_ssthresh, .release = tcp_cdg_release, .init = tcp_cdg_init, diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 57ae5b5ae643..78bfadfcf342 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -83,7 +83,6 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse struct bictcp { u32 cnt; /* increase cwnd by 1 after ACKs */ u32 last_max_cwnd; /* last maximum snd_cwnd */ - u32 loss_cwnd; /* congestion window at last loss */ u32 last_cwnd; /* the last snd_cwnd */ u32 last_time; /* time when updated last_cwnd */ u32 bic_origin_point;/* origin point of bic function */ @@ -142,7 +141,6 @@ static void bictcp_init(struct sock *sk) struct bictcp *ca = inet_csk_ca(sk); bictcp_reset(ca); - ca->loss_cwnd = 0; if (hystart) bictcp_hystart_reset(sk); @@ -366,18 +364,9 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk) else ca->last_max_cwnd = tp->snd_cwnd; - ca->loss_cwnd = tp->snd_cwnd; - return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); } -static u32 bictcp_undo_cwnd(struct sock *sk) -{ - struct bictcp *ca = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); -} - static void bictcp_state(struct sock *sk, u8 new_state) { if (new_state == TCP_CA_Loss) { @@ -470,7 +459,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = { .ssthresh = bictcp_recalc_ssthresh, .cong_avoid = bictcp_cong_avoid, .set_state = bictcp_state, - .undo_cwnd = bictcp_undo_cwnd, + .undo_cwnd = tcp_reno_undo_cwnd, .cwnd_event = bictcp_cwnd_event, .pkts_acked = bictcp_acked, .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 6d9879e93648..d1c33c91eadc 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c @@ -94,7 +94,6 @@ static const struct hstcp_aimd_val { struct hstcp { u32 ai; - u32 loss_cwnd; }; static void hstcp_init(struct sock *sk) @@ -153,22 +152,14 @@ static u32 hstcp_ssthresh(struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); struct hstcp *ca = inet_csk_ca(sk); - ca->loss_cwnd = tp->snd_cwnd; /* Do multiplicative decrease */ return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); } -static u32 hstcp_cwnd_undo(struct sock *sk) -{ - const struct hstcp *ca = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); -} - static struct tcp_congestion_ops tcp_highspeed __read_mostly = { .init = hstcp_init, .ssthresh = hstcp_ssthresh, - .undo_cwnd = hstcp_cwnd_undo, + .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = hstcp_cong_avoid, .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 60352ff4f5a8..7c843578f233 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -48,7 +48,6 @@ struct illinois { u32 end_seq; /* right edge of current RTT */ u32 alpha; /* Additive increase */ u32 beta; /* Muliplicative decrease */ - u32 loss_cwnd; /* cwnd on loss */ u16 acked; /* # packets acked by current ACK */ u8 rtt_above; /* average rtt has gone above threshold */ u8 rtt_low; /* # of rtts measurements below threshold */ @@ -297,18 +296,10 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct illinois *ca = inet_csk_ca(sk); - ca->loss_cwnd = tp->snd_cwnd; /* Multiplicative decrease */ return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); } -static u32 tcp_illinois_cwnd_undo(struct sock *sk) -{ - const struct illinois *ca = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); -} - /* Extract info for Tcp socket info provided via netlink. */ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, union tcp_cc_info *info) @@ -336,7 +327,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, static struct tcp_congestion_ops tcp_illinois __read_mostly = { .init = tcp_illinois_init, .ssthresh = tcp_illinois_ssthresh, - .undo_cwnd = tcp_illinois_cwnd_undo, + .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = tcp_illinois_cong_avoid, .set_state = tcp_illinois_state, .get_info = tcp_illinois_info, diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c index 6d650ed3cb59..1ff73982e28c 100644 --- a/net/ipv4/tcp_nv.c +++ b/net/ipv4/tcp_nv.c @@ -86,7 +86,6 @@ struct tcpnv { * < 0 => less than 1 packet/RTT */ u8 available8; u16 available16; - u32 loss_cwnd; /* cwnd at last loss */ u8 nv_allow_cwnd_growth:1, /* whether cwnd can grow */ nv_reset:1, /* whether to reset values */ nv_catchup:1; /* whether we are growing because @@ -121,7 +120,6 @@ static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); ca->nv_reset = 0; - ca->loss_cwnd = 0; ca->nv_no_cong_cnt = 0; ca->nv_rtt_cnt = 0; ca->nv_last_rtt = 0; @@ -177,19 +175,10 @@ static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked) static u32 tcpnv_recalc_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); - struct tcpnv *ca = inet_csk_ca(sk); - ca->loss_cwnd = tp->snd_cwnd; return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U); } -static u32 tcpnv_undo_cwnd(struct sock *sk) -{ - struct tcpnv *ca = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); -} - static void tcpnv_state(struct sock *sk, u8 new_state) { struct tcpnv *ca = inet_csk_ca(sk); @@ -446,7 +435,7 @@ static struct tcp_congestion_ops tcpnv __read_mostly = { .ssthresh = tcpnv_recalc_ssthresh, .cong_avoid = tcpnv_cong_avoid, .set_state = tcpnv_state, - .undo_cwnd = tcpnv_undo_cwnd, + .undo_cwnd = tcp_reno_undo_cwnd, .pkts_acked = tcpnv_acked, .get_info = tcpnv_get_info, diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index f2123075ce6e..addc122f8818 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c @@ -15,10 +15,6 @@ #define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_MD_SCALE 3 -struct scalable { - u32 loss_cwnd; -}; - static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) { struct tcp_sock *tp = tcp_sk(sk); @@ -36,23 +32,13 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) static u32 tcp_scalable_ssthresh(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); - struct scalable *ca = inet_csk_ca(sk); - - ca->loss_cwnd = tp->snd_cwnd; return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); } -static u32 tcp_scalable_cwnd_undo(struct sock *sk) -{ - const struct scalable *ca = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); -} - static struct tcp_congestion_ops tcp_scalable __read_mostly = { .ssthresh = tcp_scalable_ssthresh, - .undo_cwnd = tcp_scalable_cwnd_undo, + .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = tcp_scalable_cong_avoid, .owner = THIS_MODULE, diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c index 76005d4b8dfc..6fcf482d611b 100644 --- a/net/ipv4/tcp_veno.c +++ b/net/ipv4/tcp_veno.c @@ -30,7 +30,6 @@ struct veno { u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */ u32 inc; /* decide whether to increase cwnd */ u32 diff; /* calculate the diff rate */ - u32 loss_cwnd; /* cwnd when loss occured */ }; /* There are several situations when we must "re-start" Veno: @@ -194,7 +193,6 @@ static u32 tcp_veno_ssthresh(struct sock *sk) const struct tcp_sock *tp = tcp_sk(sk); struct veno *veno = inet_csk_ca(sk); - veno->loss_cwnd = tp->snd_cwnd; if (veno->diff < beta) /* in "non-congestive state", cut cwnd by 1/5 */ return max(tp->snd_cwnd * 4 / 5, 2U); @@ -203,17 +201,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk) return max(tp->snd_cwnd >> 1U, 2U); } -static u32 tcp_veno_cwnd_undo(struct sock *sk) -{ - const struct veno *veno = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd); -} - static struct tcp_congestion_ops tcp_veno __read_mostly = { .init = tcp_veno_init, .ssthresh = tcp_veno_ssthresh, - .undo_cwnd = tcp_veno_cwnd_undo, + .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = tcp_veno_cong_avoid, .pkts_acked = tcp_veno_pkts_acked, .set_state = tcp_veno_state, diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c index e6ff99c4bd3b..96e829b2e2fc 100644 --- a/net/ipv4/tcp_yeah.c +++ b/net/ipv4/tcp_yeah.c @@ -37,7 +37,6 @@ struct yeah { u32 fast_count; u32 pkts_acked; - u32 loss_cwnd; }; static void tcp_yeah_init(struct sock *sk) @@ -220,22 +219,14 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) yeah->fast_count = 0; yeah->reno_count = max(yeah->reno_count>>1, 2U); - yeah->loss_cwnd = tp->snd_cwnd; return max_t(int, tp->snd_cwnd - reduction, 2); } -static u32 tcp_yeah_cwnd_undo(struct sock *sk) -{ - const struct yeah *yeah = inet_csk_ca(sk); - - return max(tcp_sk(sk)->snd_cwnd, yeah->loss_cwnd); -} - static struct tcp_congestion_ops tcp_yeah __read_mostly = { .init = tcp_yeah_init, .ssthresh = tcp_yeah_ssthresh, - .undo_cwnd = tcp_yeah_cwnd_undo, + .undo_cwnd = tcp_reno_undo_cwnd, .cong_avoid = tcp_yeah_cong_avoid, .set_state = tcp_vegas_state, .cwnd_event = tcp_vegas_cwnd_event, -- cgit v1.2.3-55-g7522 From 5f6b4e14cada6ddc662b80cbd670d9cd2922aea1 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 3 Aug 2017 21:33:27 -0700 Subject: net: dsa: User per-cpu 64-bit statistics During testing with a background iperf pushing 1Gbit/sec worth of traffic and having both ifconfig and ethtool collect statistics, we could see quite frequent deadlocks. Convert the often accessed DSA slave network devices statistics to per-cpu 64-bit statistics to remove these deadlocks and provide fast efficient statistics updates. Fixes: f613ed665bb3 ("net: dsa: Add support for 64-bit statistics") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 10 +++++--- net/dsa/dsa_priv.h | 2 +- net/dsa/slave.c | 72 +++++++++++++++++++++++++++++++++++++++--------------- 3 files changed, 59 insertions(+), 25 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 0ba842c08dd3..a91e520e735f 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -190,6 +190,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, { struct dsa_switch_tree *dst = dev->dsa_ptr; struct sk_buff *nskb = NULL; + struct pcpu_sw_netstats *s; struct dsa_slave_priv *p; if (unlikely(dst == NULL)) { @@ -213,10 +214,11 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); - u64_stats_update_begin(&p->stats64.syncp); - p->stats64.rx_packets++; - p->stats64.rx_bytes += skb->len; - u64_stats_update_end(&p->stats64.syncp); + s = this_cpu_ptr(p->stats64); + u64_stats_update_begin(&s->syncp); + s->rx_packets++; + s->rx_bytes += skb->len; + u64_stats_update_end(&s->syncp); netif_receive_skb(skb); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 7aa0656296c2..306cff229def 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -77,7 +77,7 @@ struct dsa_slave_priv { struct sk_buff * (*xmit)(struct sk_buff *skb, struct net_device *dev); - struct pcpu_sw_netstats stats64; + struct pcpu_sw_netstats *stats64; /* DSA port data, such as switch, port index, etc. */ struct dsa_port *dp; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 83252e8426d7..75c5c5808220 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -352,12 +352,14 @@ static inline netdev_tx_t dsa_netpoll_send_skb(struct dsa_slave_priv *p, static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); + struct pcpu_sw_netstats *s; struct sk_buff *nskb; - u64_stats_update_begin(&p->stats64.syncp); - p->stats64.tx_packets++; - p->stats64.tx_bytes += skb->len; - u64_stats_update_end(&p->stats64.syncp); + s = this_cpu_ptr(p->stats64); + u64_stats_update_begin(&s->syncp); + s->tx_packets++; + s->tx_bytes += skb->len; + u64_stats_update_end(&s->syncp); /* Transmit function may have to reallocate the original SKB, * in which case it must have freed it. Only free it here on error. @@ -596,15 +598,26 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev, { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->dp->ds; + struct pcpu_sw_netstats *s; unsigned int start; - - do { - start = u64_stats_fetch_begin_irq(&p->stats64.syncp); - data[0] = p->stats64.tx_packets; - data[1] = p->stats64.tx_bytes; - data[2] = p->stats64.rx_packets; - data[3] = p->stats64.rx_bytes; - } while (u64_stats_fetch_retry_irq(&p->stats64.syncp, start)); + int i; + + for_each_possible_cpu(i) { + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + + s = per_cpu_ptr(p->stats64, i); + do { + start = u64_stats_fetch_begin_irq(&s->syncp); + tx_packets = s->tx_packets; + tx_bytes = s->tx_bytes; + rx_packets = s->rx_packets; + rx_bytes = s->rx_bytes; + } while (u64_stats_fetch_retry_irq(&s->syncp, start)); + data[0] += tx_packets; + data[1] += tx_bytes; + data[2] += rx_packets; + data[3] += rx_bytes; + } if (ds->ops->get_ethtool_stats) ds->ops->get_ethtool_stats(ds, p->dp->index, data + 4); } @@ -879,16 +892,28 @@ static void dsa_slave_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct dsa_slave_priv *p = netdev_priv(dev); + struct pcpu_sw_netstats *s; unsigned int start; + int i; netdev_stats_to_stats64(stats, &dev->stats); - do { - start = u64_stats_fetch_begin_irq(&p->stats64.syncp); - stats->tx_packets = p->stats64.tx_packets; - stats->tx_bytes = p->stats64.tx_bytes; - stats->rx_packets = p->stats64.rx_packets; - stats->rx_bytes = p->stats64.rx_bytes; - } while (u64_stats_fetch_retry_irq(&p->stats64.syncp, start)); + for_each_possible_cpu(i) { + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + + s = per_cpu_ptr(p->stats64, i); + do { + start = u64_stats_fetch_begin_irq(&s->syncp); + tx_packets = s->tx_packets; + tx_bytes = s->tx_bytes; + rx_packets = s->rx_packets; + rx_bytes = s->rx_bytes; + } while (u64_stats_fetch_retry_irq(&s->syncp, start)); + + stats->tx_packets += tx_packets; + stats->tx_bytes += tx_bytes; + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + } } void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops) @@ -1202,7 +1227,11 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); - u64_stats_init(&p->stats64.syncp); + p->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!p->stats64) { + free_netdev(slave_dev); + return -ENOMEM; + } p->dp = &ds->ports[port]; INIT_LIST_HEAD(&p->mall_tc_list); p->xmit = dst->tag_ops->xmit; @@ -1217,6 +1246,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, netdev_err(master, "error %d registering interface %s\n", ret, slave_dev->name); ds->ports[port].netdev = NULL; + free_percpu(p->stats64); free_netdev(slave_dev); return ret; } @@ -1227,6 +1257,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, if (ret) { netdev_err(master, "error %d setting up slave phy\n", ret); unregister_netdev(slave_dev); + free_percpu(p->stats64); free_netdev(slave_dev); return ret; } @@ -1249,6 +1280,7 @@ void dsa_slave_destroy(struct net_device *slave_dev) of_phy_deregister_fixed_link(port_dn); } unregister_netdev(slave_dev); + free_percpu(p->stats64); free_netdev(slave_dev); } -- cgit v1.2.3-55-g7522 From fb84af8a4397ee664a51c2da1dd64fb3d582ee24 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 4 Aug 2017 12:14:00 -0700 Subject: netvsc: fix rtnl deadlock on unregister of vf With new transparent VF support, it is possible to get a deadlock when some of the deferred work is running and the unregister_vf is trying to cancel the work element. The solution is to use trylock and reschedule (similar to bonding and team device). Reported-by: Vitaly Kuznetsov Fixes: 0c195567a8f6 ("netvsc: transparent VF management") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c71728d82049..e75c0f852a63 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1601,7 +1601,11 @@ static void netvsc_vf_setup(struct work_struct *w) struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); struct net_device *vf_netdev; - rtnl_lock(); + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); if (vf_netdev) __netvsc_vf_setup(ndev, vf_netdev); @@ -1655,7 +1659,11 @@ static void netvsc_vf_update(struct work_struct *w) struct net_device *vf_netdev; bool vf_is_up; - rtnl_lock(); + if (!rtnl_trylock()) { + schedule_work(w); + return; + } + vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); if (!vf_netdev) goto unlock; -- cgit v1.2.3-55-g7522 From 2d1611aff3f22a58ae8331ada6c1592e784ccb93 Mon Sep 17 00:00:00 2001 From: Alexandru Gagniuc Date: Fri, 4 Aug 2017 13:08:51 -0700 Subject: net: stmmac: Add Adaptrum Anarion GMAC glue layer Before the GMAC on the Anarion chip can be used, the PHY interface selection must be configured with the DWMAC block in reset. This layer covers a block containing only two registers. Although it is possible to model this as a reset controller and use the "resets" property of stmmac, it's much more intuitive to include this in the glue layer instead. At this time only RGMII is supported, because it is the only mode which has been validated hardware-wise. Signed-off-by: Alexandru Gagniuc Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/Kconfig | 9 ++ drivers/net/ethernet/stmicro/stmmac/Makefile | 1 + .../net/ethernet/stmicro/stmmac/dwmac-anarion.c | 152 +++++++++++++++++++++ 3 files changed, 162 insertions(+) create mode 100644 drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 85c0e41f8021..97035766c291 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -45,6 +45,15 @@ config DWMAC_GENERIC platform specific code to function or is using platform data for setup. +config DWMAC_ANARION + tristate "Adaptrum Anarion GMAC support" + default ARC + depends on OF && (ARC || COMPILE_TEST) + help + Support for Adaptrum Anarion GMAC Ethernet controller. + + This selects the Anarion SoC glue layer support for the stmmac driver. + config DWMAC_IPQ806X tristate "QCA IPQ806x DWMAC support" default ARCH_QCOM diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index fd4937a7fcab..238307fadcdb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -7,6 +7,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ # Ordering matters. Generic driver must be last. obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o +obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c new file mode 100644 index 000000000000..85ce80c600c7 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c @@ -0,0 +1,152 @@ +/* + * Adaptrum Anarion DWMAC glue layer + * + * Copyright (C) 2017, Adaptrum, Inc. + * (Written by Alexandru Gagniuc for Adaptrum, Inc.) + * Licensed under the GPLv2 or (at your option) any later version. + */ + +#include +#include +#include +#include + +#include "stmmac.h" +#include "stmmac_platform.h" + +#define GMAC_RESET_CONTROL_REG 0 +#define GMAC_SW_CONFIG_REG 4 +#define GMAC_CONFIG_INTF_SEL_MASK (0x7 << 0) +#define GMAC_CONFIG_INTF_RGMII (0x1 << 0) + +struct anarion_gmac { + uintptr_t ctl_block; + uint32_t phy_intf_sel; +}; + +static uint32_t gmac_read_reg(struct anarion_gmac *gmac, uint8_t reg) +{ + return readl((void *)(gmac->ctl_block + reg)); +}; + +static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val) +{ + writel(val, (void *)(gmac->ctl_block + reg)); +} + +static int anarion_gmac_init(struct platform_device *pdev, void *priv) +{ + uint32_t sw_config; + struct anarion_gmac *gmac = priv; + + /* Reset logic, configure interface mode, then release reset. SIMPLE! */ + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1); + + sw_config = gmac_read_reg(gmac, GMAC_SW_CONFIG_REG); + sw_config &= ~GMAC_CONFIG_INTF_SEL_MASK; + sw_config |= (gmac->phy_intf_sel & GMAC_CONFIG_INTF_SEL_MASK); + gmac_write_reg(gmac, GMAC_SW_CONFIG_REG, sw_config); + + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 0); + + return 0; +} + +static void anarion_gmac_exit(struct platform_device *pdev, void *priv) +{ + struct anarion_gmac *gmac = priv; + + gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1); +} + +static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev) +{ + int phy_mode; + struct resource *res; + void __iomem *ctl_block; + struct anarion_gmac *gmac; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ctl_block = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ctl_block)) { + dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n", + PTR_ERR(ctl_block)); + return ctl_block; + } + + gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL); + if (!gmac) + return ERR_PTR(-ENOMEM); + + gmac->ctl_block = (uintptr_t)ctl_block; + + phy_mode = of_get_phy_mode(pdev->dev.of_node); + switch (phy_mode) { + case PHY_INTERFACE_MODE_RGMII: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII_ID /* Fall through */: + case PHY_INTERFACE_MODE_RGMII_RXID: /* Fall through */ + case PHY_INTERFACE_MODE_RGMII_TXID: + gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII; + break; + default: + dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", + phy_mode); + return ERR_PTR(-ENOTSUPP); + } + + return gmac; +} + +static int anarion_dwmac_probe(struct platform_device *pdev) +{ + int ret; + struct anarion_gmac *gmac; + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + gmac = anarion_config_dt(pdev); + if (IS_ERR(gmac)) + return PTR_ERR(gmac); + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->init = anarion_gmac_init; + plat_dat->exit = anarion_gmac_exit; + anarion_gmac_init(pdev, gmac); + plat_dat->bsp_priv = gmac; + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) { + stmmac_remove_config_dt(pdev, plat_dat); + return ret; + } + + return 0; +} + +static const struct of_device_id anarion_dwmac_match[] = { + { .compatible = "adaptrum,anarion-gmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, anarion_dwmac_match); + +static struct platform_driver anarion_dwmac_driver = { + .probe = anarion_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "anarion-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = anarion_dwmac_match, + }, +}; +module_platform_driver(anarion_dwmac_driver); + +MODULE_DESCRIPTION("Adaptrum Anarion DWMAC specific glue layer"); +MODULE_AUTHOR("Alexandru Gagniuc "); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-55-g7522 From 1126f470843a044250ef8863b358607df3cd10c8 Mon Sep 17 00:00:00 2001 From: Alexandru Gagniuc Date: Fri, 4 Aug 2017 13:08:52 -0700 Subject: dt-bindings: net: Document bindings for anarion-gmac Signed-off-by: Alexandru Gagniuc Signed-off-by: David S. Miller --- .../devicetree/bindings/net/anarion-gmac.txt | 25 ++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/anarion-gmac.txt diff --git a/Documentation/devicetree/bindings/net/anarion-gmac.txt b/Documentation/devicetree/bindings/net/anarion-gmac.txt new file mode 100644 index 000000000000..fe678965ae69 --- /dev/null +++ b/Documentation/devicetree/bindings/net/anarion-gmac.txt @@ -0,0 +1,25 @@ +* Adaptrum Anarion ethernet controller + +This device is a platform glue layer for stmmac. +Please see stmmac.txt for the other unchanged properties. + +Required properties: + - compatible: Should be "adaptrum,anarion-gmac", "snps,dwmac" + - phy-mode: Should be "rgmii". Other modes are not currently supported. + + +Examples: + + gmac1: ethernet@f2014000 { + compatible = "adaptrum,anarion-gmac", "snps,dwmac"; + reg = <0xf2014000 0x4000>, <0xf2018100 8>; + + interrupt-parent = <&core_intc>; + interrupts = <21>; + interrupt-names = "macirq"; + + clocks = <&core_clk>; + clock-names = "stmmaceth"; + + phy-mode = "rgmii"; + }; -- cgit v1.2.3-55-g7522 From 233e7936c84b7d7dd90c10e2ba27abb5ab42956f Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:51 +0800 Subject: sctp: remove the typedef sctp_lower_cwnd_t This patch is to remove the typedef sctp_lower_cwnd_t, and replace with enum sctp_lower_cwnd in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 4 ++-- include/net/sctp/structs.h | 3 ++- net/sctp/transport.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 9b18044c551e..761064ee3ac5 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -378,12 +378,12 @@ typedef enum { } sctp_retransmit_reason_t; /* Reasons to lower cwnd. */ -typedef enum { +enum sctp_lower_cwnd { SCTP_LOWER_CWND_T3_RTX, SCTP_LOWER_CWND_FAST_RTX, SCTP_LOWER_CWND_ECNE, SCTP_LOWER_CWND_INACTIVE, -} sctp_lower_cwnd_t; +}; /* SCTP-AUTH Necessary constants */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 66cd7639b912..53802d8872d7 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -950,7 +950,8 @@ int sctp_transport_hold(struct sctp_transport *); void sctp_transport_put(struct sctp_transport *); void sctp_transport_update_rto(struct sctp_transport *, __u32); void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32); -void sctp_transport_lower_cwnd(struct sctp_transport *, sctp_lower_cwnd_t); +void sctp_transport_lower_cwnd(struct sctp_transport *t, + enum sctp_lower_cwnd reason); void sctp_transport_burst_limited(struct sctp_transport *); void sctp_transport_burst_reset(struct sctp_transport *); unsigned long sctp_transport_timeout(struct sctp_transport *); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 80a97c8501a7..2d9bd3776bc8 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -490,7 +490,7 @@ void sctp_transport_raise_cwnd(struct sctp_transport *transport, * detected. */ void sctp_transport_lower_cwnd(struct sctp_transport *transport, - sctp_lower_cwnd_t reason) + enum sctp_lower_cwnd reason) { struct sctp_association *asoc = transport->asoc; -- cgit v1.2.3-55-g7522 From 125c29820252bfa5bf8081e75618e4ee7e9487da Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:52 +0800 Subject: sctp: remove the typedef sctp_retransmit_reason_t This patch is to remove the typedef sctp_retransmit_reason_t, and replace with enum sctp_retransmit_reason in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 4 ++-- include/net/sctp/structs.h | 4 ++-- net/sctp/outqueue.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 761064ee3ac5..922fba5880d6 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -370,12 +370,12 @@ typedef enum { peer */ /* Reasons to retransmit. */ -typedef enum { +enum sctp_retransmit_reason { SCTP_RTXR_T3_RTX, SCTP_RTXR_FAST_RTX, SCTP_RTXR_PMTUD, SCTP_RTXR_T1_RTX, -} sctp_retransmit_reason_t; +}; /* Reasons to lower cwnd. */ enum sctp_lower_cwnd { diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 53802d8872d7..5e872acee6e3 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1054,8 +1054,8 @@ int sctp_outq_sack(struct sctp_outq *, struct sctp_chunk *); int sctp_outq_is_empty(const struct sctp_outq *); void sctp_outq_restart(struct sctp_outq *); -void sctp_retransmit(struct sctp_outq *, struct sctp_transport *, - sctp_retransmit_reason_t); +void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, + enum sctp_retransmit_reason reason); void sctp_retransmit_mark(struct sctp_outq *, struct sctp_transport *, __u8); void sctp_outq_uncork(struct sctp_outq *, gfp_t gfp); void sctp_prsctp_prune(struct sctp_association *asoc, diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index d2a8adfd4a6f..08ee0ed9a0c6 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -534,7 +534,7 @@ void sctp_retransmit_mark(struct sctp_outq *q, * one packet out. */ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, - sctp_retransmit_reason_t reason) + enum sctp_retransmit_reason reason) { struct net *net = sock_net(q->asoc->base.sk); -- cgit v1.2.3-55-g7522 From 701ef3e6c74be771a76be39817941e68e7228644 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:53 +0800 Subject: sctp: remove the typedef sctp_scope_policy_t This patch is to remove the typedef sctp_scope_policy_t and keep it's members as an anonymous enum. It is also to define SCTP_SCOPE_POLICY_MAX to replace the num 3 in sysctl.c to make codes clear. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 6 ++++-- net/sctp/sysctl.c | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 922fba5880d6..acb03eb64842 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -341,12 +341,14 @@ typedef enum { SCTP_SCOPE_UNUSABLE, /* IPv4 unusable addresses */ } sctp_scope_t; -typedef enum { +enum { SCTP_SCOPE_POLICY_DISABLE, /* Disable IPv4 address scoping */ SCTP_SCOPE_POLICY_ENABLE, /* Enable IPv4 address scoping */ SCTP_SCOPE_POLICY_PRIVATE, /* Follow draft but allow IPv4 private addresses */ SCTP_SCOPE_POLICY_LINK, /* Follow draft but allow IPv4 link local addresses */ -} sctp_scope_policy_t; +}; + +#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK /* Based on IPv4 scoping , * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24, diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 0e732f68c2bf..ef7ca44d6e6a 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -46,7 +46,7 @@ static int timer_max = 86400000; /* ms in one day */ static int int_max = INT_MAX; static int sack_timer_min = 1; static int sack_timer_max = 500; -static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ +static int addr_scope_max = SCTP_SCOPE_POLICY_MAX; static int rwnd_scale_max = 16; static int rto_alpha_min = 0; static int rto_beta_min = 0; -- cgit v1.2.3-55-g7522 From 1c662018d2d41ecc5550cbd74d29d2d32c164ed3 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:54 +0800 Subject: sctp: remove the typedef sctp_scope_t This patch is to remove the typedef sctp_scope_t, and replace with enum sctp_scope in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 4 ++-- include/net/sctp/sctp.h | 4 ++-- include/net/sctp/structs.h | 17 +++++++++-------- net/sctp/associola.c | 17 ++++++++--------- net/sctp/bind_addr.c | 20 ++++++++++---------- net/sctp/ipv6.c | 6 +++--- net/sctp/protocol.c | 6 +++--- net/sctp/sm_make_chunk.c | 6 +++--- net/sctp/socket.c | 4 ++-- 9 files changed, 42 insertions(+), 42 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index acb03eb64842..0503bb70e5d9 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -333,13 +333,13 @@ typedef enum { * At this point, the IPv6 scopes will be mapped to these internal scopes * as much as possible. */ -typedef enum { +enum sctp_scope { SCTP_SCOPE_GLOBAL, /* IPv4 global addresses */ SCTP_SCOPE_PRIVATE, /* IPv4 private addresses */ SCTP_SCOPE_LINK, /* IPv4 link local address */ SCTP_SCOPE_LOOPBACK, /* IPv4 loopback address */ SCTP_SCOPE_UNUSABLE, /* IPv4 unusable addresses */ -} sctp_scope_t; +}; enum { SCTP_SCOPE_POLICY_DISABLE, /* Disable IPv4 address scoping */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 84650fed1e6a..ca66b033ec38 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -94,8 +94,8 @@ /* * sctp/protocol.c */ -int sctp_copy_local_addr_list(struct net *, struct sctp_bind_addr *, - sctp_scope_t, gfp_t gfp, int flags); +int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *addr, + enum sctp_scope, gfp_t gfp, int flags); struct sctp_pf *sctp_get_pf_specific(sa_family_t family); int sctp_register_pf(struct sctp_pf *, sa_family_t); void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int); diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 5e872acee6e3..d771d418481f 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -449,7 +449,7 @@ struct sctp_af { int (*addr_valid) (union sctp_addr *, struct sctp_sock *, const struct sk_buff *); - sctp_scope_t (*scope) (union sctp_addr *); + enum sctp_scope (*scope)(union sctp_addr *); void (*inaddr_any) (union sctp_addr *, __be16); int (*is_any) (const union sctp_addr *); int (*available) (union sctp_addr *, @@ -1111,7 +1111,7 @@ void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port); void sctp_bind_addr_free(struct sctp_bind_addr *); int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, - sctp_scope_t scope, gfp_t gfp, + enum sctp_scope scope, gfp_t gfp, int flags); int sctp_bind_addr_dup(struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, @@ -1135,8 +1135,9 @@ union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len, __u16 port, gfp_t gfp); -sctp_scope_t sctp_scope(const union sctp_addr *); -int sctp_in_scope(struct net *net, const union sctp_addr *addr, const sctp_scope_t scope); +enum sctp_scope sctp_scope(const union sctp_addr *addr); +int sctp_in_scope(struct net *net, const union sctp_addr *addr, + const enum sctp_scope scope); int sctp_is_any(struct sock *sk, const union sctp_addr *addr); int sctp_is_ep_boundall(struct sock *sk); @@ -1925,8 +1926,8 @@ static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base) struct sctp_association * -sctp_association_new(const struct sctp_endpoint *, const struct sock *, - sctp_scope_t scope, gfp_t gfp); +sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk, + enum sctp_scope scope, gfp_t gfp); void sctp_association_free(struct sctp_association *); void sctp_association_put(struct sctp_association *); void sctp_association_hold(struct sctp_association *); @@ -1967,8 +1968,8 @@ void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); void sctp_assoc_del_nonprimary_peers(struct sctp_association *, struct sctp_transport *); -int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *, - sctp_scope_t, gfp_t); +int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, + enum sctp_scope scope, gfp_t gfp); int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *, struct sctp_cookie*, gfp_t gfp); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 40ec83679d6e..4c1f1bb2aaad 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -63,11 +63,11 @@ static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); /* 1st Level Abstractions. */ /* Initialize a new association from provided memory. */ -static struct sctp_association *sctp_association_init(struct sctp_association *asoc, - const struct sctp_endpoint *ep, - const struct sock *sk, - sctp_scope_t scope, - gfp_t gfp) +static struct sctp_association *sctp_association_init( + struct sctp_association *asoc, + const struct sctp_endpoint *ep, + const struct sock *sk, + enum sctp_scope scope, gfp_t gfp) { struct net *net = sock_net(sk); struct sctp_sock *sp; @@ -301,9 +301,8 @@ fail_init: /* Allocate and initialize a new association */ struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, - const struct sock *sk, - sctp_scope_t scope, - gfp_t gfp) + const struct sock *sk, + enum sctp_scope scope, gfp_t gfp) { struct sctp_association *asoc; @@ -1564,7 +1563,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) * local endpoint and the remote peer. */ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, - sctp_scope_t scope, gfp_t gfp) + enum sctp_scope scope, gfp_t gfp) { int flags; diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 1ebc184a0e23..7df3704982f5 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -45,9 +45,9 @@ #include /* Forward declarations for internal helpers. */ -static int sctp_copy_one_addr(struct net *, struct sctp_bind_addr *, - union sctp_addr *, sctp_scope_t scope, gfp_t gfp, - int flags); +static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, + union sctp_addr *addr, enum sctp_scope scope, + gfp_t gfp, int flags); static void sctp_bind_addr_clean(struct sctp_bind_addr *); /* First Level Abstractions. */ @@ -57,7 +57,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *); */ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, - sctp_scope_t scope, gfp_t gfp, + enum sctp_scope scope, gfp_t gfp, int flags) { struct sctp_sockaddr_entry *addr; @@ -440,9 +440,8 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, /* Copy out addresses from the global local address list. */ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, - union sctp_addr *addr, - sctp_scope_t scope, gfp_t gfp, - int flags) + union sctp_addr *addr, enum sctp_scope scope, + gfp_t gfp, int flags) { int error = 0; @@ -485,9 +484,10 @@ int sctp_is_any(struct sock *sk, const union sctp_addr *addr) } /* Is 'addr' valid for 'scope'? */ -int sctp_in_scope(struct net *net, const union sctp_addr *addr, sctp_scope_t scope) +int sctp_in_scope(struct net *net, const union sctp_addr *addr, + enum sctp_scope scope) { - sctp_scope_t addr_scope = sctp_scope(addr); + enum sctp_scope addr_scope = sctp_scope(addr); /* The unusable SCTP addresses will not be considered with * any defined scopes. @@ -545,7 +545,7 @@ int sctp_is_ep_boundall(struct sock *sk) ********************************************************************/ /* What is the scope of 'addr'? */ -sctp_scope_t sctp_scope(const union sctp_addr *addr) +enum sctp_scope sctp_scope(const union sctp_addr *addr) { struct sctp_af *af; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 107d7c912922..a2a1c1d08d51 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -243,8 +243,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, union sctp_addr *daddr = &t->ipaddr; union sctp_addr dst_saddr; struct in6_addr *final_p, final; + enum sctp_scope scope; __u8 matchlen = 0; - sctp_scope_t scope; memset(fl6, 0, sizeof(struct flowi6)); fl6->daddr = daddr->v6.sin6_addr; @@ -624,10 +624,10 @@ static int sctp_v6_addr_valid(union sctp_addr *addr, } /* What is the scope of 'addr'? */ -static sctp_scope_t sctp_v6_scope(union sctp_addr *addr) +static enum sctp_scope sctp_v6_scope(union sctp_addr *addr) { + enum sctp_scope retval; int v6scope; - sctp_scope_t retval; /* The IPv6 scope is really a set of bit fields. * See IFA_* in . Map to a generic SCTP scope. diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 852556d67ae3..fcd80feb293f 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -196,7 +196,7 @@ static void sctp_free_local_addr_list(struct net *net) /* Copy the local addresses which are valid for 'scope' into 'bp'. */ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, - sctp_scope_t scope, gfp_t gfp, int copy_flags) + enum sctp_scope scope, gfp_t gfp, int copy_flags) { struct sctp_sockaddr_entry *addr; union sctp_addr laddr; @@ -400,9 +400,9 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) * IPv4 scoping can be controlled through sysctl option * net.sctp.addr_scope_policy */ -static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) +static enum sctp_scope sctp_v4_scope(union sctp_addr *addr) { - sctp_scope_t retval; + enum sctp_scope retval; /* Check for unusable SCTP addresses. */ if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index d17e8d1f2ed9..a034d842e335 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1578,8 +1578,8 @@ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, gfp_t gfp) { struct sctp_association *asoc; + enum sctp_scope scope; struct sk_buff *skb; - sctp_scope_t scope; /* Create the bare association. */ scope = sctp_scope(sctp_source(chunk)); @@ -1701,7 +1701,7 @@ struct sctp_association *sctp_unpack_cookie( int headersize, bodysize, fixed_size; __u8 *digest = ep->digest; unsigned int len; - sctp_scope_t scope; + enum sctp_scope scope; struct sk_buff *skb = chunk->skb; ktime_t kt; @@ -2502,7 +2502,7 @@ static int sctp_process_param(struct sctp_association *asoc, int i; __u16 sat; int retval = 1; - sctp_scope_t scope; + enum sctp_scope scope; u32 stale; struct sctp_af *af; union sctp_addr_param *addr_param; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 1db478e34520..a1e2113806dd 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1055,7 +1055,7 @@ static int __sctp_connect(struct sock *sk, struct sctp_association *asoc2; struct sctp_transport *transport; union sctp_addr to; - sctp_scope_t scope; + enum sctp_scope scope; long timeo; int err = 0; int addrcnt = 0; @@ -1610,7 +1610,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; - sctp_scope_t scope; + enum sctp_scope scope; bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; -- cgit v1.2.3-55-g7522 From 0ceaeebe28d46e49c865f88a3e3ca75f6cf13e1f Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:55 +0800 Subject: sctp: remove the typedef sctp_transport_cmd_t This patch is to remove the typedef sctp_transport_cmd_t, and replace with enum sctp_transport_cmd in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 4 ++-- include/net/sctp/structs.h | 7 ++++--- net/sctp/associola.c | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 0503bb70e5d9..8ce6d3263e41 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -320,11 +320,11 @@ typedef enum { } sctp_xmit_t; /* These are the commands for manipulating transports. */ -typedef enum { +enum sctp_transport_cmd { SCTP_TRANSPORT_UP, SCTP_TRANSPORT_DOWN, SCTP_TRANSPORT_PF, -} sctp_transport_cmd_t; +}; /* These are the address scopes defined mainly for IPv4 addresses * based on draft of SCTP IPv4 scoping . diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index d771d418481f..d098d1c4ed74 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1947,9 +1947,10 @@ void sctp_assoc_del_peer(struct sctp_association *asoc, const union sctp_addr *addr); void sctp_assoc_rm_peer(struct sctp_association *asoc, struct sctp_transport *peer); -void sctp_assoc_control_transport(struct sctp_association *, - struct sctp_transport *, - sctp_transport_cmd_t, sctp_sn_error_t); +void sctp_assoc_control_transport(struct sctp_association *asoc, + struct sctp_transport *transport, + enum sctp_transport_cmd command, + sctp_sn_error_t error); struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *, __u32); struct sctp_transport *sctp_assoc_is_match(struct sctp_association *, struct net *, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 4c1f1bb2aaad..b53efed8ff71 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -796,7 +796,7 @@ void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, */ void sctp_assoc_control_transport(struct sctp_association *asoc, struct sctp_transport *transport, - sctp_transport_cmd_t command, + enum sctp_transport_cmd command, sctp_sn_error_t error) { struct sctp_ulpevent *event; -- cgit v1.2.3-55-g7522 From 8496561430df6e2c21b4ed37bd93604d3acaf5d6 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:56 +0800 Subject: sctp: remove the typedef sctp_sock_state_t This patch is to remove the typedef sctp_sock_state_t, and replace with enum sctp_sock_state in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 6 +++--- include/net/sctp/sctp.h | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 8ce6d3263e41..049868ea7ae9 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -42,7 +42,7 @@ #include #include /* For ipv6hdr. */ -#include /* For TCP states used in sctp_sock_state_t */ +#include /* For TCP states used in enum sctp_sock_state */ /* Value used for stream negotiation. */ enum { SCTP_MAX_STREAM = 0xffff }; @@ -214,13 +214,13 @@ typedef enum { * - A socket in SCTP_SS_ESTABLISHED state indicates that it has a single * association. */ -typedef enum { +enum sctp_sock_state { SCTP_SS_CLOSED = TCP_CLOSE, SCTP_SS_LISTENING = TCP_LISTEN, SCTP_SS_ESTABLISHING = TCP_SYN_SENT, SCTP_SS_ESTABLISHED = TCP_ESTABLISHED, SCTP_SS_CLOSING = TCP_CLOSE_WAIT, -} sctp_sock_state_t; +}; /* These functions map various type to printable names. */ const char *sctp_cname(const sctp_subtype_t); /* chunk types */ diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index ca66b033ec38..0022bc713434 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -565,7 +565,8 @@ static inline int __sctp_state(const struct sctp_association *asoc, /* Is the socket in this state? */ #define sctp_sstate(sk, state) __sctp_sstate((sk), (SCTP_SS_##state)) -static inline int __sctp_sstate(const struct sock *sk, sctp_sock_state_t state) +static inline int __sctp_sstate(const struct sock *sk, + enum sctp_sock_state state) { return sk->sk_state == state; } -- cgit v1.2.3-55-g7522 From 86b36f2a9b9ea58dd2100b0e6f1f45a1f67ee95e Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:57 +0800 Subject: sctp: remove the typedef sctp_xmit_t This patch is to remove the typedef sctp_xmit_t, and replace with enum sctp_xmit in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 4 +-- include/net/sctp/structs.h | 9 ++++--- net/sctp/output.c | 60 ++++++++++++++++++++++---------------------- net/sctp/outqueue.c | 8 +++--- 4 files changed, 41 insertions(+), 40 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 049868ea7ae9..311b2e3773e8 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -312,12 +312,12 @@ enum { SCTP_MAX_GABS = 16 }; /* These return values describe the success or failure of a number of * routines which form the lower interface to SCTP_outqueue. */ -typedef enum { +enum sctp_xmit { SCTP_XMIT_OK, SCTP_XMIT_PMTU_FULL, SCTP_XMIT_RWND_FULL, SCTP_XMIT_DELAY, -} sctp_xmit_t; +}; /* These are the commands for manipulating transports. */ enum sctp_transport_cmd { diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index d098d1c4ed74..6fab67e7f1e5 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -697,10 +697,11 @@ struct sctp_packet { void sctp_packet_init(struct sctp_packet *, struct sctp_transport *, __u16 sport, __u16 dport); void sctp_packet_config(struct sctp_packet *, __u32 vtag, int); -sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *, - struct sctp_chunk *, int, gfp_t); -sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *, - struct sctp_chunk *); +enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk, + int one_packet, gfp_t gfp); +enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk); int sctp_packet_transmit(struct sctp_packet *, gfp_t); void sctp_packet_free(struct sctp_packet *); diff --git a/net/sctp/output.c b/net/sctp/output.c index 9d8504985744..4a865cd06d76 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -57,15 +57,15 @@ #include /* Forward declarations for private helpers. */ -static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet, - struct sctp_chunk *chunk); -static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, - struct sctp_chunk *chunk); +static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk); +static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, + struct sctp_chunk *chunk); static void sctp_packet_append_data(struct sctp_packet *packet, - struct sctp_chunk *chunk); -static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, - struct sctp_chunk *chunk, - u16 chunk_len); + struct sctp_chunk *chunk); +static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, + struct sctp_chunk *chunk, + u16 chunk_len); static void sctp_packet_reset(struct sctp_packet *packet) { @@ -181,11 +181,11 @@ void sctp_packet_free(struct sctp_packet *packet) * as it can fit in the packet, but any more data that does not fit in this * packet can be sent only after receiving the COOKIE_ACK. */ -sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, - struct sctp_chunk *chunk, - int one_packet, gfp_t gfp) +enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk, + int one_packet, gfp_t gfp) { - sctp_xmit_t retval; + enum sctp_xmit retval; pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__, packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); @@ -218,12 +218,12 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, } /* Try to bundle an auth chunk into the packet. */ -static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, - struct sctp_chunk *chunk) +static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt, + struct sctp_chunk *chunk) { struct sctp_association *asoc = pkt->transport->asoc; + enum sctp_xmit retval = SCTP_XMIT_OK; struct sctp_chunk *auth; - sctp_xmit_t retval = SCTP_XMIT_OK; /* if we don't have an association, we can't do authentication */ if (!asoc) @@ -254,10 +254,10 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, } /* Try to bundle a SACK with the packet. */ -static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, - struct sctp_chunk *chunk) +static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt, + struct sctp_chunk *chunk) { - sctp_xmit_t retval = SCTP_XMIT_OK; + enum sctp_xmit retval = SCTP_XMIT_OK; /* If sending DATA and haven't aleady bundled a SACK, try to * bundle one in to the packet. @@ -299,11 +299,11 @@ out: /* Append a chunk to the offered packet reporting back any inability to do * so. */ -static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet, - struct sctp_chunk *chunk) +static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk) { - sctp_xmit_t retval = SCTP_XMIT_OK; __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); + enum sctp_xmit retval = SCTP_XMIT_OK; /* Check to see if this chunk will fit into the packet */ retval = sctp_packet_will_fit(packet, chunk, chunk_len); @@ -353,10 +353,10 @@ finish: /* Append a chunk to the offered packet reporting back any inability to do * so. */ -sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, - struct sctp_chunk *chunk) +enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk) { - sctp_xmit_t retval = SCTP_XMIT_OK; + enum sctp_xmit retval = SCTP_XMIT_OK; pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); @@ -653,8 +653,8 @@ out: ********************************************************************/ /* This private function check to see if a chunk can be added */ -static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, - struct sctp_chunk *chunk) +static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, + struct sctp_chunk *chunk) { size_t datasize, rwnd, inflight, flight_size; struct sctp_transport *transport = packet->transport; @@ -762,12 +762,12 @@ static void sctp_packet_append_data(struct sctp_packet *packet, sctp_chunk_assign_ssn(chunk); } -static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, - struct sctp_chunk *chunk, - u16 chunk_len) +static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, + struct sctp_chunk *chunk, + u16 chunk_len) { + enum sctp_xmit retval = SCTP_XMIT_OK; size_t psize, pmtu, maxsize; - sctp_xmit_t retval = SCTP_XMIT_OK; psize = packet->size; if (packet->transport->asoc) diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 08ee0ed9a0c6..2966ff400755 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -594,14 +594,14 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, int rtx_timeout, int *start_timer) { - struct list_head *lqueue; struct sctp_transport *transport = pkt->transport; - sctp_xmit_t status; struct sctp_chunk *chunk, *chunk1; - int fast_rtx; + struct list_head *lqueue; + enum sctp_xmit status; int error = 0; int timer = 0; int done = 0; + int fast_rtx; lqueue = &q->retransmit; fast_rtx = q->fast_rtx; @@ -781,7 +781,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) struct sctp_transport *transport = NULL; struct sctp_transport *new_transport; struct sctp_chunk *chunk, *tmp; - sctp_xmit_t status; + enum sctp_xmit status; int error = 0; int start_timer = 0; int one_packet = 0; -- cgit v1.2.3-55-g7522 From 4785c7ae1848244da3435ba5269f6288c15975c7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:58 +0800 Subject: sctp: remove the typedef sctp_ierror_t This patch is to remove the typedef sctp_ierror_t, and replace with enum sctp_ierror in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 5 ++--- net/sctp/sm_make_chunk.c | 23 ++++++++++++----------- net/sctp/sm_statefuns.c | 28 +++++++++++++++------------- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 311b2e3773e8..9a694653b708 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -155,8 +155,7 @@ SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive) - sizeof(struct sctp_data_chunk))) /* Internal error codes */ -typedef enum { - +enum sctp_ierror { SCTP_IERROR_NO_ERROR = 0, SCTP_IERROR_BASE = 1000, SCTP_IERROR_NO_COOKIE, @@ -177,7 +176,7 @@ typedef enum { SCTP_IERROR_PROTO_VIOLATION, SCTP_IERROR_ERROR, SCTP_IERROR_ABORT, -} sctp_ierror_t; +}; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index a034d842e335..3a8fb1dffbc1 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2065,10 +2065,11 @@ static void sctp_process_ext_param(struct sctp_association *asoc, * SCTP_IERROR_ERROR - stop and report an error. * SCTP_IERROR_NOMEME - out of memory. */ -static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, - union sctp_params param, - struct sctp_chunk *chunk, - struct sctp_chunk **errp) +static enum sctp_ierror sctp_process_unk_param( + const struct sctp_association *asoc, + union sctp_params param, + struct sctp_chunk *chunk, + struct sctp_chunk **errp) { int retval = SCTP_IERROR_NO_ERROR; @@ -2117,13 +2118,13 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, * SCTP_IERROR_ERROR - stop processing, trigger an ERROR * SCTP_IERROR_NO_ERROR - continue with the chunk */ -static sctp_ierror_t sctp_verify_param(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - union sctp_params param, - enum sctp_cid cid, - struct sctp_chunk *chunk, - struct sctp_chunk **err_chunk) +static enum sctp_ierror sctp_verify_param(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + union sctp_params param, + enum sctp_cid cid, + struct sctp_chunk *chunk, + struct sctp_chunk **err_chunk) { struct sctp_hmac_algo_param *hmacs; int retval = SCTP_IERROR_NO_ERROR; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 8af90a5f23cd..5381697333df 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -144,11 +144,12 @@ static sctp_disposition_t sctp_sf_violation_chunk( void *arg, sctp_cmd_seq_t *commands); -static sctp_ierror_t sctp_sf_authenticate(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const sctp_subtype_t type, - struct sctp_chunk *chunk); +static enum sctp_ierror sctp_sf_authenticate( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk); static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, @@ -756,7 +757,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, */ if (chunk->auth_chunk) { struct sctp_chunk auth; - sctp_ierror_t ret; + enum sctp_ierror ret; /* Make sure that we and the peer are AUTH capable */ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { @@ -4077,11 +4078,12 @@ gen_shutdown: * * The return value is the disposition of the chunk. */ -static sctp_ierror_t sctp_sf_authenticate(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const sctp_subtype_t type, - struct sctp_chunk *chunk) +static enum sctp_ierror sctp_sf_authenticate( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk) { struct sctp_authhdr *auth_hdr; struct sctp_hmac *hmac; @@ -4159,10 +4161,10 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net, void *arg, sctp_cmd_seq_t *commands) { - struct sctp_authhdr *auth_hdr; struct sctp_chunk *chunk = arg; + struct sctp_authhdr *auth_hdr; struct sctp_chunk *err_chunk; - sctp_ierror_t error; + enum sctp_ierror error; /* Make sure that the peer has AUTH capable */ if (!asoc->peer.auth_capable) -- cgit v1.2.3-55-g7522 From 5210601945f5aedaf2d7f13a88436e27a39c6a8a Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 19:59:59 +0800 Subject: sctp: remove the typedef sctp_state_t This patch is to remove the typedef sctp_state_t, and replace with enum sctp_state in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/command.h | 4 ++-- include/net/sctp/constants.h | 4 ++-- include/net/sctp/sctp.h | 2 +- include/net/sctp/sm.h | 10 +++++----- include/net/sctp/structs.h | 2 +- net/sctp/endpointola.c | 2 +- net/sctp/primitive.c | 2 +- net/sctp/sm_sideeffect.c | 12 ++++++------ net/sctp/sm_statetable.c | 16 +++++++++------- 9 files changed, 28 insertions(+), 26 deletions(-) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 1d5f6ff3f440..be12ec946628 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -126,7 +126,7 @@ typedef union { __u8 u8; int error; __be16 err; - sctp_state_t state; + enum sctp_state state; sctp_event_timeout_t to; struct sctp_chunk *chunk; struct sctp_association *asoc; @@ -167,7 +167,7 @@ SCTP_ARG_CONSTRUCTOR(U16, __u16, u16) SCTP_ARG_CONSTRUCTOR(U8, __u8, u8) SCTP_ARG_CONSTRUCTOR(ERROR, int, error) SCTP_ARG_CONSTRUCTOR(PERR, __be16, err) /* protocol error */ -SCTP_ARG_CONSTRUCTOR(STATE, sctp_state_t, state) +SCTP_ARG_CONSTRUCTOR(STATE, enum sctp_state, state) SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to) SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk) SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 9a694653b708..db9f40b657ba 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -181,7 +181,7 @@ enum sctp_ierror { /* SCTP state defines for internal state machine */ -typedef enum { +enum sctp_state { SCTP_STATE_CLOSED = 0, SCTP_STATE_COOKIE_WAIT = 1, @@ -192,7 +192,7 @@ typedef enum { SCTP_STATE_SHUTDOWN_RECEIVED = 6, SCTP_STATE_SHUTDOWN_ACK_SENT = 7, -} sctp_state_t; +}; #define SCTP_STATE_MAX SCTP_STATE_SHUTDOWN_ACK_SENT #define SCTP_STATE_NUM_STATES (SCTP_STATE_MAX + 1) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 0022bc713434..24ff7931d38c 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -558,7 +558,7 @@ static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style) /* Is the association in this state? */ #define sctp_state(asoc, state) __sctp_state((asoc), (SCTP_STATE_##state)) static inline int __sctp_state(const struct sctp_association *asoc, - sctp_state_t state) + enum sctp_state state) { return asoc->state == state; } diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 860f378333b5..281e8d1e0752 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -175,10 +175,10 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire; /* Prototypes for utility support functions. */ __u8 sctp_get_chunk_type(struct sctp_chunk *chunk); -const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *, - sctp_event_t, - sctp_state_t, - sctp_subtype_t); +const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net, + sctp_event_t event_type, + enum sctp_state state, + sctp_subtype_t event_subtype); int sctp_chunk_iif(const struct sctp_chunk *); struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, struct sctp_chunk *, @@ -313,7 +313,7 @@ struct sctp_chunk *sctp_process_strreset_resp( /* Prototypes for statetable processing. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, - sctp_state_t state, + enum sctp_state state, struct sctp_endpoint *, struct sctp_association *asoc, void *event_arg, diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 6fab67e7f1e5..fbe6e81b889b 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1577,7 +1577,7 @@ struct sctp_association { * * State takes values from SCTP_STATE_*. */ - sctp_state_t state; + enum sctp_state state; /* Overall : The overall association error count. * Error Count : [Clear this any time I get something.] diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 3d506b2f6193..4111c00a9d9d 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -383,7 +383,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) struct sctp_chunk *chunk; struct sctp_inq *inqueue; sctp_subtype_t subtype; - sctp_state_t state; + enum sctp_state state; int error = 0; int first_time = 1; /* is this the first time through the loop */ diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c index f0553a022859..1fb5b9bb3c6d 100644 --- a/net/sctp/primitive.c +++ b/net/sctp/primitive.c @@ -54,7 +54,7 @@ int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \ void *arg) { \ int error = 0; \ sctp_event_t event_type; sctp_subtype_t subtype; \ - sctp_state_t state; \ + enum sctp_state state; \ struct sctp_endpoint *ep; \ \ event_type = SCTP_EVENT_T_PRIMITIVE; \ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 5dda8c42b5f6..b77a81aa907b 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -53,7 +53,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, - sctp_state_t state, + enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, @@ -61,7 +61,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_cmd_seq_t *commands, gfp_t gfp); static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, - sctp_state_t state, + enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association **asoc, void *event_arg, @@ -843,7 +843,7 @@ static void sctp_cmd_assoc_update(sctp_cmd_seq_t *cmds, /* Helper function to change the state of an association. */ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, struct sctp_association *asoc, - sctp_state_t state) + enum sctp_state state) { struct sock *sk = asoc->base.sk; @@ -1140,7 +1140,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc) * good place to start. */ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, - sctp_state_t state, + enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, @@ -1179,7 +1179,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, * This the master state function side effect processing function. *****************************************************************/ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, - sctp_state_t state, + enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association **asoc, void *event_arg, @@ -1265,7 +1265,7 @@ bail: /* This is the side-effect interpreter. */ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype, - sctp_state_t state, + enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 3e958c1c4b95..f46c11bffea7 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -52,9 +52,10 @@ other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES]; static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES]; -static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net, - enum sctp_cid cid, - sctp_state_t state); +static const sctp_sm_table_entry_t *sctp_chunk_event_lookup( + struct net *net, + enum sctp_cid cid, + enum sctp_state state); static const sctp_sm_table_entry_t bug = { @@ -78,7 +79,7 @@ static const sctp_sm_table_entry_t bug = { const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net, sctp_event_t event_type, - sctp_state_t state, + enum sctp_state state, sctp_subtype_t event_subtype) { switch (event_type) { @@ -967,9 +968,10 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, }; -static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net, - enum sctp_cid cid, - sctp_state_t state) +static const sctp_sm_table_entry_t *sctp_chunk_event_lookup( + struct net *net, + enum sctp_cid cid, + enum sctp_state state) { if (state > SCTP_STATE_MAX) return &bug; -- cgit v1.2.3-55-g7522 From dc1e0e6eb8b2e2c6b5c6cdfa4f0cc789e9516de5 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 20:00:00 +0800 Subject: sctp: remove the typedef sctp_event_primitive_t This patch is to remove the typedef sctp_event_primitive_t, and replace with enum sctp_event_primitive in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index db9f40b657ba..162ee954a6af 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -110,7 +110,7 @@ typedef enum { #define SCTP_NUM_OTHER_TYPES (SCTP_EVENT_OTHER_MAX + 1) /* These are primitive requests from the ULP. */ -typedef enum { +enum sctp_event_primitive { SCTP_PRIMITIVE_ASSOCIATE = 0, SCTP_PRIMITIVE_SHUTDOWN, SCTP_PRIMITIVE_ABORT, @@ -118,7 +118,7 @@ typedef enum { SCTP_PRIMITIVE_REQUESTHEARTBEAT, SCTP_PRIMITIVE_ASCONF, SCTP_PRIMITIVE_RECONF, -} sctp_event_primitive_t; +}; #define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_RECONF #define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1) @@ -133,7 +133,7 @@ typedef union { enum sctp_cid chunk; sctp_event_timeout_t timeout; sctp_event_other_t other; - sctp_event_primitive_t primitive; + enum sctp_event_primitive primitive; } sctp_subtype_t; #define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \ @@ -144,7 +144,7 @@ SCTP_ST_## _name (_type _arg) \ SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk) SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, sctp_event_timeout_t, timeout) SCTP_SUBTYPE_CONSTRUCTOR(OTHER, sctp_event_other_t, other) -SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, sctp_event_primitive_t, primitive) +SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive) #define sctp_chunk_is_data(a) (a->chunk_hdr->type == SCTP_CID_DATA) -- cgit v1.2.3-55-g7522 From a0f098d0385a40f9c4c8a2ce48d075f77ea7edd8 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 20:00:01 +0800 Subject: sctp: remove the typedef sctp_event_other_t This patch is to remove the typedef sctp_event_other_t, and replace with enum sctp_event_other in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 162ee954a6af..fd8a80ec573f 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -101,10 +101,10 @@ typedef enum { #define SCTP_EVENT_TIMEOUT_MAX SCTP_EVENT_TIMEOUT_AUTOCLOSE #define SCTP_NUM_TIMEOUT_TYPES (SCTP_EVENT_TIMEOUT_MAX + 1) -typedef enum { +enum sctp_event_other { SCTP_EVENT_NO_PENDING_TSN = 0, SCTP_EVENT_ICMP_PROTO_UNREACH, -} sctp_event_other_t; +}; #define SCTP_EVENT_OTHER_MAX SCTP_EVENT_ICMP_PROTO_UNREACH #define SCTP_NUM_OTHER_TYPES (SCTP_EVENT_OTHER_MAX + 1) @@ -132,7 +132,7 @@ enum sctp_event_primitive { typedef union { enum sctp_cid chunk; sctp_event_timeout_t timeout; - sctp_event_other_t other; + enum sctp_event_other other; enum sctp_event_primitive primitive; } sctp_subtype_t; @@ -143,7 +143,7 @@ SCTP_ST_## _name (_type _arg) \ SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk) SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, sctp_event_timeout_t, timeout) -SCTP_SUBTYPE_CONSTRUCTOR(OTHER, sctp_event_other_t, other) +SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other) SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive) -- cgit v1.2.3-55-g7522 From 19cd1592a24754e16d48398812d5f69b63f674dd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 20:00:02 +0800 Subject: sctp: remove the typedef sctp_event_timeout_t This patch is to remove the typedef sctp_event_timeout_t, and replace with enum sctp_event_timeout in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/command.h | 4 ++-- include/net/sctp/constants.h | 8 ++++---- net/sctp/sm_sideeffect.c | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index be12ec946628..376cb78b6247 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -127,7 +127,7 @@ typedef union { int error; __be16 err; enum sctp_state state; - sctp_event_timeout_t to; + enum sctp_event_timeout to; struct sctp_chunk *chunk; struct sctp_association *asoc; struct sctp_transport *transport; @@ -168,7 +168,7 @@ SCTP_ARG_CONSTRUCTOR(U8, __u8, u8) SCTP_ARG_CONSTRUCTOR(ERROR, int, error) SCTP_ARG_CONSTRUCTOR(PERR, __be16, err) /* protocol error */ SCTP_ARG_CONSTRUCTOR(STATE, enum sctp_state, state) -SCTP_ARG_CONSTRUCTOR(TO, sctp_event_timeout_t, to) +SCTP_ARG_CONSTRUCTOR(TO, enum sctp_event_timeout, to) SCTP_ARG_CONSTRUCTOR(CHUNK, struct sctp_chunk *, chunk) SCTP_ARG_CONSTRUCTOR(ASOC, struct sctp_association *, asoc) SCTP_ARG_CONSTRUCTOR(TRANSPORT, struct sctp_transport *, transport) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index fd8a80ec573f..fb931f07e83a 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -84,7 +84,7 @@ typedef enum { * SCTP_ULP_* to the list of possible chunks. */ -typedef enum { +enum sctp_event_timeout { SCTP_EVENT_TIMEOUT_NONE = 0, SCTP_EVENT_TIMEOUT_T1_COOKIE, SCTP_EVENT_TIMEOUT_T1_INIT, @@ -96,7 +96,7 @@ typedef enum { SCTP_EVENT_TIMEOUT_RECONF, SCTP_EVENT_TIMEOUT_SACK, SCTP_EVENT_TIMEOUT_AUTOCLOSE, -} sctp_event_timeout_t; +}; #define SCTP_EVENT_TIMEOUT_MAX SCTP_EVENT_TIMEOUT_AUTOCLOSE #define SCTP_NUM_TIMEOUT_TYPES (SCTP_EVENT_TIMEOUT_MAX + 1) @@ -131,7 +131,7 @@ enum sctp_event_primitive { typedef union { enum sctp_cid chunk; - sctp_event_timeout_t timeout; + enum sctp_event_timeout timeout; enum sctp_event_other other; enum sctp_event_primitive primitive; } sctp_subtype_t; @@ -142,7 +142,7 @@ SCTP_ST_## _name (_type _arg) \ { sctp_subtype_t _retval; _retval._elt = _arg; return _retval; } SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk) -SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, sctp_event_timeout_t, timeout) +SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, enum sctp_event_timeout, timeout) SCTP_SUBTYPE_CONSTRUCTOR(OTHER, enum sctp_event_other, other) SCTP_SUBTYPE_CONSTRUCTOR(PRIMITIVE, enum sctp_event_primitive, primitive) diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b77a81aa907b..11a344896b71 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -280,7 +280,7 @@ out_unlock: * for timeouts which use the association as their parameter. */ static void sctp_generate_timeout_event(struct sctp_association *asoc, - sctp_event_timeout_t timeout_type) + enum sctp_event_timeout timeout_type) { struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); @@ -1052,8 +1052,8 @@ static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, - sctp_event_timeout_t timer, - char *name) + enum sctp_event_timeout timer, + char *name) { struct sctp_transport *t; -- cgit v1.2.3-55-g7522 From 61f0eb072294a148f707335d4d7f858b2af73770 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 20:00:03 +0800 Subject: sctp: remove the typedef sctp_event_t This patch is to remove the typedef sctp_event_t, and replace with enum sctp_event in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 6 ++---- include/net/sctp/sm.h | 12 +++++------- net/sctp/primitive.c | 2 +- net/sctp/sm_sideeffect.c | 20 +++++++++----------- net/sctp/sm_statetable.c | 2 +- 5 files changed, 18 insertions(+), 24 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index fb931f07e83a..3181e0f95b60 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -71,14 +71,12 @@ enum { SCTP_DEFAULT_INSTREAMS = SCTP_MAX_STREAM }; SCTP_NUM_AUTH_CHUNK_TYPES) /* These are the different flavours of event. */ -typedef enum { - +enum sctp_event { SCTP_EVENT_T_CHUNK = 1, SCTP_EVENT_T_TIMEOUT, SCTP_EVENT_T_OTHER, SCTP_EVENT_T_PRIMITIVE - -} sctp_event_t; +}; /* As a convenience for the state machine, we append SCTP_EVENT_* and * SCTP_ULP_* to the list of possible chunks. diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 281e8d1e0752..96f54cff7964 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -176,7 +176,7 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire; /* Prototypes for utility support functions. */ __u8 sctp_get_chunk_type(struct sctp_chunk *chunk); const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net, - sctp_event_t event_type, + enum sctp_event event_type, enum sctp_state state, sctp_subtype_t event_subtype); int sctp_chunk_iif(const struct sctp_chunk *); @@ -312,12 +312,10 @@ struct sctp_chunk *sctp_process_strreset_resp( /* Prototypes for statetable processing. */ -int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, - enum sctp_state state, - struct sctp_endpoint *, - struct sctp_association *asoc, - void *event_arg, - gfp_t gfp); +int sctp_do_sm(struct net *net, enum sctp_event event_type, + sctp_subtype_t subtype, enum sctp_state state, + struct sctp_endpoint *ep, struct sctp_association *asoc, + void *event_arg, gfp_t gfp); /* 2nd level prototypes */ void sctp_generate_t3_rtx_event(unsigned long peer); diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c index 1fb5b9bb3c6d..c914166984b3 100644 --- a/net/sctp/primitive.c +++ b/net/sctp/primitive.c @@ -53,7 +53,7 @@ int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \ void *arg) { \ int error = 0; \ - sctp_event_t event_type; sctp_subtype_t subtype; \ + enum sctp_event event_type; sctp_subtype_t subtype; \ enum sctp_state state; \ struct sctp_endpoint *ep; \ \ diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 11a344896b71..b545c768cb9e 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -51,7 +51,7 @@ #include #include -static int sctp_cmd_interpreter(sctp_event_t event_type, +static int sctp_cmd_interpreter(enum sctp_event event_type, sctp_subtype_t subtype, enum sctp_state state, struct sctp_endpoint *ep, @@ -60,7 +60,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp); -static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, +static int sctp_side_effects(enum sctp_event event_type, sctp_subtype_t subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association **asoc, @@ -602,7 +602,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, - sctp_event_t event_type, + enum sctp_event event_type, sctp_subtype_t subtype, struct sctp_chunk *chunk, unsigned int error) @@ -1139,12 +1139,10 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc) * If you want to understand all of lksctp, this is a * good place to start. */ -int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, - enum sctp_state state, - struct sctp_endpoint *ep, - struct sctp_association *asoc, - void *event_arg, - gfp_t gfp) +int sctp_do_sm(struct net *net, enum sctp_event event_type, + sctp_subtype_t subtype, enum sctp_state state, + struct sctp_endpoint *ep, struct sctp_association *asoc, + void *event_arg, gfp_t gfp) { sctp_cmd_seq_t commands; const sctp_sm_table_entry_t *state_fn; @@ -1178,7 +1176,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, /***************************************************************** * This the master state function side effect processing function. *****************************************************************/ -static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, +static int sctp_side_effects(enum sctp_event event_type, sctp_subtype_t subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association **asoc, @@ -1263,7 +1261,7 @@ bail: ********************************************************************/ /* This is the side-effect interpreter. */ -static int sctp_cmd_interpreter(sctp_event_t event_type, +static int sctp_cmd_interpreter(enum sctp_event event_type, sctp_subtype_t subtype, enum sctp_state state, struct sctp_endpoint *ep, diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index f46c11bffea7..f7cdb7014244 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -78,7 +78,7 @@ static const sctp_sm_table_entry_t bug = { }) const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net, - sctp_event_t event_type, + enum sctp_event event_type, enum sctp_state state, sctp_subtype_t event_subtype) { -- cgit v1.2.3-55-g7522 From bfc6f8270fefb323662d1d7713f940149f27b7f1 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sat, 5 Aug 2017 20:00:04 +0800 Subject: sctp: remove the typedef sctp_subtype_t This patch is to remove the typedef sctp_subtype_t, and replace with union sctp_subtype in the places where it's using this typedef. Note that it doesn't fix many indents although it should, as sctp_disposition_t's removal would mess them up again. So better to fix them when removing sctp_disposition_t in later patch. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/constants.h | 18 ++-- include/net/sctp/sm.h | 13 +-- net/sctp/associola.c | 2 +- net/sctp/debug.c | 8 +- net/sctp/endpointola.c | 2 +- net/sctp/primitive.c | 2 +- net/sctp/probe.c | 2 +- net/sctp/sm_sideeffect.c | 16 ++-- net/sctp/sm_statefuns.c | 191 ++++++++++++++++++++++--------------------- net/sctp/sm_statetable.c | 9 +- 10 files changed, 134 insertions(+), 129 deletions(-) diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h index 3181e0f95b60..deaafa9b09cb 100644 --- a/include/net/sctp/constants.h +++ b/include/net/sctp/constants.h @@ -124,20 +124,20 @@ enum sctp_event_primitive { /* We define here a utility type for manipulating subtypes. * The subtype constructors all work like this: * - * sctp_subtype_t foo = SCTP_ST_CHUNK(SCTP_CID_INIT); + * union sctp_subtype foo = SCTP_ST_CHUNK(SCTP_CID_INIT); */ -typedef union { +union sctp_subtype { enum sctp_cid chunk; enum sctp_event_timeout timeout; enum sctp_event_other other; enum sctp_event_primitive primitive; -} sctp_subtype_t; +}; #define SCTP_SUBTYPE_CONSTRUCTOR(_name, _type, _elt) \ -static inline sctp_subtype_t \ +static inline union sctp_subtype \ SCTP_ST_## _name (_type _arg) \ -{ sctp_subtype_t _retval; _retval._elt = _arg; return _retval; } +{ union sctp_subtype _retval; _retval._elt = _arg; return _retval; } SCTP_SUBTYPE_CONSTRUCTOR(CHUNK, enum sctp_cid, chunk) SCTP_SUBTYPE_CONSTRUCTOR(TIMEOUT, enum sctp_event_timeout, timeout) @@ -220,10 +220,10 @@ enum sctp_sock_state { }; /* These functions map various type to printable names. */ -const char *sctp_cname(const sctp_subtype_t); /* chunk types */ -const char *sctp_oname(const sctp_subtype_t); /* other events */ -const char *sctp_tname(const sctp_subtype_t); /* timeouts */ -const char *sctp_pname(const sctp_subtype_t); /* primitives */ +const char *sctp_cname(const union sctp_subtype id); /* chunk types */ +const char *sctp_oname(const union sctp_subtype id); /* other events */ +const char *sctp_tname(const union sctp_subtype id); /* timeouts */ +const char *sctp_pname(const union sctp_subtype id); /* primitives */ /* This is a table of printable names of sctp_state_t's. */ extern const char *const sctp_state_tbl[]; diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 96f54cff7964..1e7651c3b158 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -73,7 +73,7 @@ typedef struct { typedef sctp_disposition_t (sctp_state_fn_t) (struct net *, const struct sctp_endpoint *, const struct sctp_association *, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *); typedef void (sctp_timer_event_t) (unsigned long); @@ -175,10 +175,11 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire; /* Prototypes for utility support functions. */ __u8 sctp_get_chunk_type(struct sctp_chunk *chunk); -const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net, - enum sctp_event event_type, - enum sctp_state state, - sctp_subtype_t event_subtype); +const sctp_sm_table_entry_t *sctp_sm_lookup_event( + struct net *net, + enum sctp_event event_type, + enum sctp_state state, + union sctp_subtype event_subtype); int sctp_chunk_iif(const struct sctp_chunk *); struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *, struct sctp_chunk *, @@ -313,7 +314,7 @@ struct sctp_chunk *sctp_process_strreset_resp( /* Prototypes for statetable processing. */ int sctp_do_sm(struct net *net, enum sctp_event event_type, - sctp_subtype_t subtype, enum sctp_state state, + union sctp_subtype subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, gfp_t gfp); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index b53efed8ff71..dfb9651e818b 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1021,11 +1021,11 @@ static void sctp_assoc_bh_rcv(struct work_struct *work) container_of(work, struct sctp_association, base.inqueue.immediate); struct net *net = sock_net(asoc->base.sk); + union sctp_subtype subtype; struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sctp_inq *inqueue; int state; - sctp_subtype_t subtype; int error = 0; /* The association should be held so we should be safe. */ diff --git a/net/sctp/debug.c b/net/sctp/debug.c index 2e47eb2f05cb..3f619fdcbf0a 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -60,7 +60,7 @@ static const char *const sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { }; /* Lookup "chunk type" debug name. */ -const char *sctp_cname(const sctp_subtype_t cid) +const char *sctp_cname(const union sctp_subtype cid) { if (cid.chunk <= SCTP_CID_BASE_MAX) return sctp_cid_tbl[cid.chunk]; @@ -130,7 +130,7 @@ static const char *const sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { }; /* Lookup primitive debug name. */ -const char *sctp_pname(const sctp_subtype_t id) +const char *sctp_pname(const union sctp_subtype id) { if (id.primitive <= SCTP_EVENT_PRIMITIVE_MAX) return sctp_primitive_tbl[id.primitive]; @@ -143,7 +143,7 @@ static const char *const sctp_other_tbl[] = { }; /* Lookup "other" debug name. */ -const char *sctp_oname(const sctp_subtype_t id) +const char *sctp_oname(const union sctp_subtype id) { if (id.other <= SCTP_EVENT_OTHER_MAX) return sctp_other_tbl[id.other]; @@ -165,7 +165,7 @@ static const char *const sctp_timer_tbl[] = { }; /* Lookup timer debug name. */ -const char *sctp_tname(const sctp_subtype_t id) +const char *sctp_tname(const union sctp_subtype id) { BUILD_BUG_ON(SCTP_EVENT_TIMEOUT_MAX + 1 != ARRAY_SIZE(sctp_timer_tbl)); diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 4111c00a9d9d..ee1e601a0b11 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -382,7 +382,7 @@ static void sctp_endpoint_bh_rcv(struct work_struct *work) struct sctp_transport *transport; struct sctp_chunk *chunk; struct sctp_inq *inqueue; - sctp_subtype_t subtype; + union sctp_subtype subtype; enum sctp_state state; int error = 0; int first_time = 1; /* is this the first time through the loop */ diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c index c914166984b3..c0817f7a8964 100644 --- a/net/sctp/primitive.c +++ b/net/sctp/primitive.c @@ -53,7 +53,7 @@ int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \ void *arg) { \ int error = 0; \ - enum sctp_event event_type; sctp_subtype_t subtype; \ + enum sctp_event event_type; union sctp_subtype subtype; \ enum sctp_state state; \ struct sctp_endpoint *ep; \ \ diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 6cc2152e0740..43837dfc86a7 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -130,7 +130,7 @@ static const struct file_operations sctpprobe_fops = { static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index b545c768cb9e..4a12d29d9aa1 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -52,7 +52,7 @@ #include static int sctp_cmd_interpreter(enum sctp_event event_type, - sctp_subtype_t subtype, + union sctp_subtype subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, @@ -60,7 +60,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, sctp_disposition_t status, sctp_cmd_seq_t *commands, gfp_t gfp); -static int sctp_side_effects(enum sctp_event event_type, sctp_subtype_t subtype, +static int sctp_side_effects(enum sctp_event event_type, + union sctp_subtype subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association **asoc, @@ -603,7 +604,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, struct sctp_association *asoc, enum sctp_event event_type, - sctp_subtype_t subtype, + union sctp_subtype subtype, struct sctp_chunk *chunk, unsigned int error) { @@ -1140,7 +1141,7 @@ static void sctp_cmd_send_asconf(struct sctp_association *asoc) * good place to start. */ int sctp_do_sm(struct net *net, enum sctp_event event_type, - sctp_subtype_t subtype, enum sctp_state state, + union sctp_subtype subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, gfp_t gfp) { @@ -1148,7 +1149,7 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type, const sctp_sm_table_entry_t *state_fn; sctp_disposition_t status; int error = 0; - typedef const char *(printfn_t)(sctp_subtype_t); + typedef const char *(printfn_t)(union sctp_subtype); static printfn_t *table[] = { NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, }; @@ -1176,7 +1177,8 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type, /***************************************************************** * This the master state function side effect processing function. *****************************************************************/ -static int sctp_side_effects(enum sctp_event event_type, sctp_subtype_t subtype, +static int sctp_side_effects(enum sctp_event event_type, + union sctp_subtype subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association **asoc, @@ -1262,7 +1264,7 @@ bail: /* This is the side-effect interpreter. */ static int sctp_cmd_interpreter(enum sctp_event event_type, - sctp_subtype_t subtype, + union sctp_subtype subtype, enum sctp_state state, struct sctp_endpoint *ep, struct sctp_association *asoc, diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5381697333df..ac6aaa046529 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -80,19 +80,19 @@ static void sctp_send_stale_cookie_err(struct net *net, static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands); static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands); static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); @@ -116,7 +116,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands); @@ -124,7 +124,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, void *ext, sctp_cmd_seq_t *commands); @@ -132,7 +132,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands); @@ -140,7 +140,7 @@ static sctp_disposition_t sctp_sf_violation_chunk( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands); @@ -148,13 +148,13 @@ static enum sctp_ierror sctp_sf_authenticate( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, struct sctp_chunk *chunk); static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands); @@ -217,7 +217,7 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk, __u16 required_length) sctp_disposition_t sctp_sf_do_4_C(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -303,7 +303,7 @@ sctp_disposition_t sctp_sf_do_4_C(struct net *net, sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -497,7 +497,7 @@ nomem: sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -647,7 +647,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, void *arg, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; @@ -874,7 +874,7 @@ nomem: sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, void *arg, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; @@ -951,7 +951,7 @@ nomem: /* Generate and sendout a heartbeat packet. */ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -977,7 +977,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -1025,7 +1025,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, sctp_disposition_t sctp_sf_send_reconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, void *arg, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_transport *transport = arg; @@ -1076,7 +1076,7 @@ sctp_disposition_t sctp_sf_send_reconf(struct net *net, sctp_disposition_t sctp_sf_beat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -1151,7 +1151,7 @@ nomem: sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -1416,7 +1416,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg, *repl, *err_chunk; @@ -1627,7 +1627,7 @@ cleanup: sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -1681,7 +1681,7 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net, sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -1704,7 +1704,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { /* Per the above section, we'll discard the chunk if we have an @@ -2027,7 +2027,7 @@ nomem: sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2146,7 +2146,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2188,7 +2188,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort( sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2239,7 +2239,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2266,7 +2266,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2330,7 +2330,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2452,7 +2452,7 @@ nomem: sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2489,7 +2489,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2527,7 +2527,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2566,7 +2566,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2581,7 +2581,7 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net, sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2653,7 +2653,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2742,7 +2742,7 @@ out: sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2795,7 +2795,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2859,7 +2859,7 @@ nomem: sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2915,7 +2915,7 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, sctp_disposition_t sctp_sf_do_ecne(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -2972,7 +2972,7 @@ sctp_disposition_t sctp_sf_do_ecne(struct net *net, sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3092,7 +3092,7 @@ discard_noforce: sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3183,7 +3183,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net, sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3257,7 +3257,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3307,7 +3307,7 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, sctp_disposition_t sctp_sf_operr_notify(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3345,7 +3345,7 @@ sctp_disposition_t sctp_sf_operr_notify(struct net *net, sctp_disposition_t sctp_sf_do_9_2_final(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3428,7 +3428,7 @@ nomem: sctp_disposition_t sctp_sf_ootb(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3521,7 +3521,7 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3583,7 +3583,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3608,7 +3608,7 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net, sctp_disposition_t sctp_sf_do_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, void *arg, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; @@ -3725,7 +3725,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, void *arg, + const union sctp_subtype type, + void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *asconf_ack = arg; @@ -3843,7 +3844,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, sctp_disposition_t sctp_sf_do_reconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, void *arg, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_paramhdr *err_param = NULL; @@ -3919,7 +3920,7 @@ sctp_disposition_t sctp_sf_do_reconf(struct net *net, sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -3990,7 +3991,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4082,7 +4083,7 @@ static enum sctp_ierror sctp_sf_authenticate( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, struct sctp_chunk *chunk) { struct sctp_authhdr *auth_hdr; @@ -4157,7 +4158,7 @@ nomem: sctp_disposition_t sctp_sf_eat_auth(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4254,7 +4255,7 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net, sctp_disposition_t sctp_sf_unk_chunk(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4334,7 +4335,7 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net, sctp_disposition_t sctp_sf_discard_chunk(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4374,7 +4375,7 @@ sctp_disposition_t sctp_sf_discard_chunk(struct net *net, sctp_disposition_t sctp_sf_pdiscard(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4402,7 +4403,7 @@ sctp_disposition_t sctp_sf_pdiscard(struct net *net, sctp_disposition_t sctp_sf_violation(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4540,7 +4541,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4560,7 +4561,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, void *ext, sctp_cmd_seq_t *commands) { @@ -4603,7 +4604,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4623,7 +4624,7 @@ static sctp_disposition_t sctp_sf_violation_chunk( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4698,7 +4699,7 @@ static sctp_disposition_t sctp_sf_violation_chunk( sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4810,7 +4811,7 @@ nomem: sctp_disposition_t sctp_sf_do_prm_send(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4850,7 +4851,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4906,7 +4907,7 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4943,7 +4944,7 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( sctp_disposition_t sctp_sf_error_closed(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4957,7 +4958,7 @@ sctp_disposition_t sctp_sf_error_closed(struct net *net, sctp_disposition_t sctp_sf_error_shutdown(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -4984,7 +4985,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5019,7 +5020,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { /* There is a single T1 timer, so we should be able to use @@ -5046,7 +5047,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5095,7 +5096,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5121,7 +5122,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5148,7 +5149,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5179,7 +5180,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5215,7 +5216,7 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5247,7 +5248,7 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat( sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5264,7 +5265,7 @@ sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, sctp_disposition_t sctp_sf_do_prm_reconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { struct sctp_chunk *chunk = arg; @@ -5282,7 +5283,7 @@ sctp_disposition_t sctp_sf_ignore_primitive( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5306,7 +5307,7 @@ sctp_disposition_t sctp_sf_do_no_pending_tsn( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5338,7 +5339,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5408,7 +5409,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5481,7 +5482,7 @@ nomem: sctp_disposition_t sctp_sf_ignore_other(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5509,7 +5510,7 @@ sctp_disposition_t sctp_sf_ignore_other(struct net *net, sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5597,7 +5598,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5628,7 +5629,7 @@ sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net, sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5692,7 +5693,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5742,7 +5743,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5813,7 +5814,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5884,7 +5885,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire( sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5921,7 +5922,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5963,7 +5964,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire( sctp_disposition_t sctp_sf_not_impl(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -5981,7 +5982,7 @@ sctp_disposition_t sctp_sf_not_impl(struct net *net, sctp_disposition_t sctp_sf_bug(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { @@ -6002,7 +6003,7 @@ sctp_disposition_t sctp_sf_bug(struct net *net, sctp_disposition_t sctp_sf_timer_ignore(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, - const sctp_subtype_t type, + const union sctp_subtype type, void *arg, sctp_cmd_seq_t *commands) { diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index f7cdb7014244..d437f3801399 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -77,10 +77,11 @@ static const sctp_sm_table_entry_t bug = { rtn; \ }) -const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net, - enum sctp_event event_type, - enum sctp_state state, - sctp_subtype_t event_subtype) +const sctp_sm_table_entry_t *sctp_sm_lookup_event( + struct net *net, + enum sctp_event event_type, + enum sctp_state state, + union sctp_subtype event_subtype) { switch (event_type) { case SCTP_EVENT_T_CHUNK: -- cgit v1.2.3-55-g7522 From df44f531ee572a20128b2b610370ef3861b369ae Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Sat, 5 Aug 2017 21:34:44 +0200 Subject: Bluetooth: bluecard: Always enable LEDs (fix for Anycom CF-300) Anycom CF-300 (HP C8249A) has both power and activity LEDs. However the id read in bluecard_open() is 0x73 so the driver does not enable the LEDs. Remove the CARD_HAS_PCCARD_ID check to enable LEDs. Signed-off-by: Ondrej Zary Signed-off-by: Marcel Holtmann --- drivers/bluetooth/bluecard_cs.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index d4b0b655dde6..5cd1e164fde0 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -160,9 +160,6 @@ static void bluecard_activity_led_timeout(u_long arg) struct bluecard_info *info = (struct bluecard_info *)arg; unsigned int iobase = info->p_dev->resource[0]->start; - if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) - return; - if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { /* Disable activity LED */ outb(0x08 | 0x20, iobase + 0x30); @@ -177,9 +174,6 @@ static void bluecard_enable_activity_led(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; - if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) - return; - if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { /* Enable activity LED */ outb(0x10 | 0x40, iobase + 0x30); @@ -625,16 +619,13 @@ static int bluecard_hci_flush(struct hci_dev *hdev) static int bluecard_hci_open(struct hci_dev *hdev) { struct bluecard_info *info = hci_get_drvdata(hdev); + unsigned int iobase = info->p_dev->resource[0]->start; if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); - if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { - unsigned int iobase = info->p_dev->resource[0]->start; - - /* Enable LED */ - outb(0x08 | 0x20, iobase + 0x30); - } + /* Enable LED */ + outb(0x08 | 0x20, iobase + 0x30); return 0; } @@ -643,15 +634,12 @@ static int bluecard_hci_open(struct hci_dev *hdev) static int bluecard_hci_close(struct hci_dev *hdev) { struct bluecard_info *info = hci_get_drvdata(hdev); + unsigned int iobase = info->p_dev->resource[0]->start; bluecard_hci_flush(hdev); - if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { - unsigned int iobase = info->p_dev->resource[0]->start; - - /* Disable LED */ - outb(0x00, iobase + 0x30); - } + /* Disable LED */ + outb(0x00, iobase + 0x30); return 0; } -- cgit v1.2.3-55-g7522 From 859d2351172482c5bf5b727b1fe10d98ba334fd6 Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Sat, 5 Aug 2017 21:34:45 +0200 Subject: Bluetooth: bluecard: fix LED behavior Keep power LED on during activity. LED timer races with power LED disabling in hci_close(), resulting in power LED left on after closing. Stop LED timer before disabling power LED. BTW. On cards without an activity LED, the behavior is a bit weird: The LED is on after hci_open() but only until the first data transfer. Then it's off in idle and on during activity. It could be improved by keeping the LED on in idle and flashing during activity. Signed-off-by: Ondrej Zary Signed-off-by: Marcel Holtmann --- drivers/bluetooth/bluecard_cs.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index 5cd1e164fde0..61ac48e1aa55 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -161,7 +161,7 @@ static void bluecard_activity_led_timeout(u_long arg) unsigned int iobase = info->p_dev->resource[0]->start; if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { - /* Disable activity LED */ + /* Disable activity LED, keep power LED enabled */ outb(0x08 | 0x20, iobase + 0x30); } else { /* Disable power LED */ @@ -175,8 +175,8 @@ static void bluecard_enable_activity_led(struct bluecard_info *info) unsigned int iobase = info->p_dev->resource[0]->start; if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { - /* Enable activity LED */ - outb(0x10 | 0x40, iobase + 0x30); + /* Enable activity LED, keep power LED enabled */ + outb(0x18 | 0x60, iobase + 0x30); /* Stop the LED after HZ/4 */ mod_timer(&(info->timer), jiffies + HZ / 4); @@ -624,7 +624,7 @@ static int bluecard_hci_open(struct hci_dev *hdev) if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); - /* Enable LED */ + /* Enable power LED */ outb(0x08 | 0x20, iobase + 0x30); return 0; @@ -638,7 +638,10 @@ static int bluecard_hci_close(struct hci_dev *hdev) bluecard_hci_flush(hdev); - /* Disable LED */ + /* Stop LED timer */ + del_timer_sync(&(info->timer)); + + /* Disable power LED */ outb(0x00, iobase + 0x30); return 0; -- cgit v1.2.3-55-g7522 From 706b35834820715d122a984eba2369cae7bf3abd Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 6 Jun 2017 17:46:49 +0300 Subject: net/mlx5e: Rearrange netdevice ops structures Since we are going to allow building the driver without eswitch support, it would be possible to compile out the sriov netdevice ops struct such that the basic ops instance will be used for non VF devices too. Add missing udp tunnel ndos into mlx5e_netdev_ops_basic. While here, rearrange some ndos in the sriov ops struct and put vf/eswitch related ndos towards the end of it. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 57f31fa478ce..c2986777a1d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3706,6 +3706,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -3730,13 +3733,19 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, - .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif + .ndo_tx_timeout = mlx5e_tx_timeout, + .ndo_xdp = mlx5e_xdp, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mlx5e_netpoll, +#endif + /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, @@ -3745,11 +3754,6 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, - .ndo_tx_timeout = mlx5e_tx_timeout, - .ndo_xdp = mlx5e_xdp, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, -#endif .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, }; -- cgit v1.2.3-55-g7522 From 07c9f1e57839c678c867b89aa9dcb2220e579b13 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Tue, 6 Jun 2017 09:12:04 +0300 Subject: net/mlx5e: NIC netdev init flow cleanup Remove redundant call to unregister vport representor in mlx5e_add error flow. Hide the representor priv and eswitch internal structures from en_main.c as preparation step for downstream patches which would allow building the driver without support for representors and eswitch. Fixes: 6f08a22c5fb2 ("net/mlx5e: Register/unregister vport representors on interface attach/detach") Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 22 ++++++---------------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 13 +++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 1 + 3 files changed, 20 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index c2986777a1d8..b44d6f677845 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4428,32 +4428,27 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) static void *mlx5e_add(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - struct mlx5e_rep_priv *rpriv = NULL; + struct net_device *netdev; + void *rpriv = NULL; void *priv; - int vport; int err; - struct net_device *netdev; err = mlx5e_check_required_hca_cap(mdev); if (err) return NULL; if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { - mlx5_core_warn(mdev, - "Not creating net device, Failed to alloc rep priv data\n"); + mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); return NULL; } - rpriv->rep = &esw->offloads.vport_reps[0]; } netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - goto err_unregister_reps; + goto err_free_rpriv; } priv = netdev_priv(netdev); @@ -4474,14 +4469,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) err_detach: mlx5e_detach(mdev, priv); - err_destroy_netdev: mlx5e_destroy_netdev(priv); - -err_unregister_reps: - for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); - +err_free_rpriv: kfree(rpriv); return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 45e60be9c277..a0dd0e7e5b57 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1099,3 +1099,16 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ } + +void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) +{ + struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5e_rep_priv *rpriv; + + rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); + if (!rpriv) + return NULL; + + rpriv->rep = &esw->offloads.vport_reps[0]; + return rpriv; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index a0a1a7a1d6c0..23e43bbf928d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -123,6 +123,7 @@ struct mlx5e_encap_entry { int encap_size; }; +void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); void mlx5e_register_vport_reps(struct mlx5e_priv *priv); void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); -- cgit v1.2.3-55-g7522 From a9f7705ffd663ff057222e91a86d9bc1d697fd58 Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sun, 11 Jun 2017 19:05:10 +0300 Subject: net/mlx5: Unify vport manager capability check Expose MLX5_VPORT_MANAGER macro to check for strict vport manager E-switch and MPFS (Multi Physical Function Switch) abilities. VPORT manager must be a PF with an ethernet link and with FW advertised vport group manager capability Replace older checks with the new macro and use it where needed in eswitch.c and mlx5e netdev eswitch related flows. The same macro will be reused in MPFS separation downstream patch. Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 18 ++++++----------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 4 +--- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 23 ++++++++-------------- .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 4 ++++ 4 files changed, 19 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b44d6f677845..e3c858c44532 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2581,12 +2581,6 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) } } -static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev) -{ - return (MLX5_CAP_GEN(mdev, vport_group_manager) && - MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH); -} - void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { int num_txqs = priv->channels.num * priv->channels.params.num_tc; @@ -2600,7 +2594,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2611,7 +2605,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -4079,7 +4073,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); #ifdef CONFIG_NET_SWITCHDEV - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4221,7 +4215,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4255,7 +4249,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4437,7 +4431,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) if (err) return NULL; - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { + if (MLX5_VPORT_MANAGER(mdev)) { rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 52b9a64cd3a2..24d2f707fdfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -688,9 +688,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; int err; - if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH && - MLX5_CAP_GEN(dev, vport_group_manager) && - mlx5_core_is_pf(dev)) + if (MLX5_VPORT_MANAGER(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); if (MLX5_CAP_GEN(dev, port_module_event)) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 8b18cc9ec026..5c001b61d04a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1611,13 +1611,14 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) } /* Public E-Switch API */ +#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) + int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; int i, enabled_events; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!ESW_ALLOWED(esw)) return 0; if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || @@ -1667,9 +1668,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) int nvports; int i; - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH || - esw->mode == SRIOV_NONE) + if (!ESW_ALLOWED(esw) || esw->mode == SRIOV_NONE) return; esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n", @@ -1698,8 +1697,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) void mlx5_eswitch_attach(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!ESW_ALLOWED(esw)) return; esw_enable_vport(esw, 0, UC_ADDR_CHANGE); @@ -1708,8 +1706,7 @@ void mlx5_eswitch_attach(struct mlx5_eswitch *esw) void mlx5_eswitch_detach(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!ESW_ALLOWED(esw)) return; esw_disable_vport(esw, 0); @@ -1723,8 +1720,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) int vport_num; int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager) || - MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!MLX5_VPORT_MANAGER(dev)) return 0; esw_info(dev, @@ -1806,8 +1802,7 @@ abort: void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) { - if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) || - MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) return; esw_info(esw->dev, "cleanup\n"); @@ -1838,8 +1833,6 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) } /* Vport Administration */ -#define ESW_ALLOWED(esw) \ - (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 6a263e8d883a..d8da9240a00b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -43,6 +43,10 @@ #define DRIVER_VERSION "5.0-0" #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev)) +#define MLX5_VPORT_MANAGER(mdev) \ + (MLX5_CAP_GEN(mdev, vport_group_manager) && \ + (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ + mlx5_core_is_pf(mdev)) extern uint mlx5_core_debug_mask; -- cgit v1.2.3-55-g7522 From eeb66cdb682678bfd1f02a4547e3649b38ffea7e Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Sun, 4 Jun 2017 23:11:55 +0300 Subject: net/mlx5: Separate between E-Switch and MPFS Multi-Physical Function Switch (MPFs) is required for when multi-PF configuration is enabled to allow passing user configured unicast MAC addresses to the requesting PF. Before this patch eswitch.c used to manage the HW MPFS l2 table, E-Switch always (regardless of sriov) enabled vport(0) (NIC PF) vport's contexts update on unicast mac address list changes, to populate the PF's MPFS L2 table accordingly. In downstream patch we would like to allow compiling the driver without E-Switch functionalities, for that we move MPFS l2 table logic out of eswitch.c into its own file, and provide Kconfig flag (MLX5_MPFS) to allow compiling out MPFS for those who don't want Multi-PF support. NIC PF netdevice will now directly update MPFS l2 table via the new MPFS API. VF netdevice has no access to MPFS L2 table, so E-Switch will remain responsible of updating its MPFS l2 table on behalf of its VFs. Due to this change we also don't require enabling vport(0) (PF vport) unicast mac changes events anymore, for when SRIOV is not enabled. Which means E-Switch is now activated only on SRIOV activation, and not required otherwise. Signed-off-by: Saeed Mahameed Cc: Jes Sorensen Cc: kernel-team@fb.com --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 10 + drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 + drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 17 +- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 190 ++++--------------- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 55 +----- drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c | 201 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h | 95 ++++++++++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 26 ++- include/linux/mlx5/driver.h | 2 + 9 files changed, 377 insertions(+), 221 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index 5aee05992f27..d7174295b6ef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -34,6 +34,16 @@ config MLX5_CORE_EN ---help--- Ethernet support in Mellanox Technologies ConnectX-4 NIC. +config MLX5_MPFS + bool "Mellanox Technologies MLX5 MPFS support" + depends on MLX5_CORE_EN + default y + ---help--- + Mellanox Technologies Ethernet Multi-Physical Function Switch (MPFS) + support in ConnectX NIC. MPFs is required for when multi-PF configuration + is enabled to allow passing user configured unicast MAC addresses to the + requesting PF. + config MLX5_CORE_EN_DCB bool "Data Center Bridging (DCB) Support" default y diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 9d17e4e76d3a..c867e48f8a4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -16,6 +16,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += eswitch.o eswitch_offloads.o \ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o +mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o + mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index dfccb5305e9c..eecbc6d4f51f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -36,6 +36,7 @@ #include #include #include "en.h" +#include "lib/mpfs.h" static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, struct mlx5e_l2_rule *ai, int type); @@ -65,6 +66,7 @@ struct mlx5e_l2_hash_node { struct hlist_node hlist; u8 action; struct mlx5e_l2_rule ai; + bool mpfs; }; static inline int mlx5e_hash_l2(u8 *addr) @@ -362,17 +364,30 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, struct mlx5e_l2_hash_node *hn) { - switch (hn->action) { + u8 action = hn->action; + int l2_err = 0; + + switch (action) { case MLX5E_ACTION_ADD: mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); + if (!is_multicast_ether_addr(hn->ai.addr)) { + l2_err = mlx5_mpfs_add_mac(priv->mdev, hn->ai.addr); + hn->mpfs = !l2_err; + } hn->action = MLX5E_ACTION_NONE; break; case MLX5E_ACTION_DEL: + if (!is_multicast_ether_addr(hn->ai.addr) && hn->mpfs) + l2_err = mlx5_mpfs_del_mac(priv->mdev, hn->ai.addr); mlx5e_del_l2_flow_rule(priv, &hn->ai); mlx5e_del_l2_from_hash(hn); break; } + + if (l2_err) + netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", + action == MLX5E_ACTION_ADD ? "add" : "del", hn->ai.addr, l2_err); } static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 5c001b61d04a..fd51f0ea8df9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -46,19 +46,13 @@ enum { MLX5_ACTION_DEL = 2, }; -/* E-Switch UC L2 table hash node */ -struct esw_uc_addr { - struct l2addr_node node; - u32 table_index; - u32 vport; -}; - /* Vport UC/MC hash node */ struct vport_addr { struct l2addr_node node; u8 action; u32 vport; - struct mlx5_flow_handle *flow_rule; /* SRIOV only */ + struct mlx5_flow_handle *flow_rule; + bool mpfs; /* UC MAC was added to MPFs */ /* A flag indicating that mac was added due to mc promiscuous vport */ bool mc_promisc; }; @@ -154,81 +148,6 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport, return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in)); } -/* HW L2 Table (MPFS) management */ -static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index, - u8 *mac, u8 vlan_valid, u16 vlan) -{ - u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; - u8 *in_mac_addr; - - MLX5_SET(set_l2_table_entry_in, in, opcode, - MLX5_CMD_OP_SET_L2_TABLE_ENTRY); - MLX5_SET(set_l2_table_entry_in, in, table_index, index); - MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid); - MLX5_SET(set_l2_table_entry_in, in, vlan, vlan); - - in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); - ether_addr_copy(&in_mac_addr[2], mac); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index) -{ - u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; - - MLX5_SET(delete_l2_table_entry_in, in, opcode, - MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); - MLX5_SET(delete_l2_table_entry_in, in, table_index, index); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix) -{ - int err = 0; - - *ix = find_first_zero_bit(l2_table->bitmap, l2_table->size); - if (*ix >= l2_table->size) - err = -ENOSPC; - else - __set_bit(*ix, l2_table->bitmap); - - return err; -} - -static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix) -{ - __clear_bit(ix, l2_table->bitmap); -} - -static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac, - u8 vlan_valid, u16 vlan, - u32 *index) -{ - struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; - int err; - - err = alloc_l2_table_index(l2_table, index); - if (err) - return err; - - err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan); - if (err) - free_l2_table_index(l2_table, *index); - - return err; -} - -static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) -{ - struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table; - - del_l2_table_entry_cmd(dev, index); - free_l2_table_index(l2_table, index); -} - /* E-Switch FDB */ static struct mlx5_flow_handle * __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, @@ -455,65 +374,60 @@ typedef int (*vport_addr_action)(struct mlx5_eswitch *esw, static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { - struct hlist_head *hash = esw->l2_table.l2_hash; - struct esw_uc_addr *esw_uc; u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; int err; - esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); - if (esw_uc) { + /* Skip mlx5_mpfs_add_mac for PFs, + * it is already done by the PF netdev in mlx5e_execute_l2_action + */ + if (!vport) + goto fdb_add; + + err = mlx5_mpfs_add_mac(esw->dev, mac); + if (err) { esw_warn(esw->dev, - "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n", - mac, vport, esw_uc->vport); - return -EEXIST; + "Failed to add L2 table mac(%pM) for vport(%d), err(%d)\n", + mac, vport, err); + return err; } + vaddr->mpfs = true; - esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL); - if (!esw_uc) - return -ENOMEM; - esw_uc->vport = vport; - - err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index); - if (err) - goto abort; - +fdb_add: /* SRIOV is enabled: Forward UC MAC to vport */ if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY) vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport); - esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n", - vport, mac, esw_uc->table_index, vaddr->flow_rule); - return err; -abort: - l2addr_hash_del(esw_uc); + esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", + vport, mac, vaddr->flow_rule); + return err; } static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) { - struct hlist_head *hash = esw->l2_table.l2_hash; - struct esw_uc_addr *esw_uc; u8 *mac = vaddr->node.addr; u32 vport = vaddr->vport; + int err = 0; - esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr); - if (!esw_uc || esw_uc->vport != vport) { - esw_debug(esw->dev, - "MAC(%pM) doesn't belong to vport (%d)\n", - mac, vport); - return -EINVAL; - } - esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n", - vport, mac, esw_uc->table_index, vaddr->flow_rule); + /* Skip mlx5_mpfs_del_mac for PFs, + * it is already done by the PF netdev in mlx5e_execute_l2_action + */ + if (!vport || !vaddr->mpfs) + goto fdb_del; - del_l2_table_entry(esw->dev, esw_uc->table_index); + err = mlx5_mpfs_del_mac(esw->dev, mac); + if (err) + esw_warn(esw->dev, + "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n", + mac, vport, err); + vaddr->mpfs = false; +fdb_del: if (vaddr->flow_rule) mlx5_del_flow_rules(vaddr->flow_rule); vaddr->flow_rule = NULL; - l2addr_hash_del(esw_uc); return 0; } @@ -1635,7 +1549,6 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); esw->mode = mode; - esw_disable_vport(esw, 0); if (mode == SRIOV_LEGACY) err = esw_create_legacy_fdb_table(esw, nvfs + 1); @@ -1648,7 +1561,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (err) esw_warn(esw->dev, "Failed to create eswitch TSAR"); - enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; + /* Don't enable vport events when in SRIOV_OFFLOADS mode, since: + * 1. L2 table (MPFS) is programmed by PF/VF representors netdevs set_rx_mode + * 2. FDB/Eswitch is programmed by user space tools + */ + enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : 0; for (i = 0; i <= nvfs; i++) esw_enable_vport(esw, i, enabled_events); @@ -1657,7 +1574,6 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) return 0; abort: - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); esw->mode = SRIOV_NONE; return err; } @@ -1691,30 +1607,10 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_offloads_cleanup(esw, nvports); esw->mode = SRIOV_NONE; - /* VPORT 0 (PF) must be enabled back with non-sriov configuration */ - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); -} - -void mlx5_eswitch_attach(struct mlx5_eswitch *esw) -{ - if (!ESW_ALLOWED(esw)) - return; - - esw_enable_vport(esw, 0, UC_ADDR_CHANGE); - /* VF Vports will be enabled when SRIOV is enabled */ -} - -void mlx5_eswitch_detach(struct mlx5_eswitch *esw) -{ - if (!ESW_ALLOWED(esw)) - return; - - esw_disable_vport(esw, 0); } int mlx5_eswitch_init(struct mlx5_core_dev *dev) { - int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); int total_vports = MLX5_TOTAL_VPORTS(dev); struct mlx5_eswitch *esw; int vport_num; @@ -1724,8 +1620,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) return 0; esw_info(dev, - "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n", - total_vports, l2_table_size, + "Total vports %d, per vport: max uc(%d) max mc(%d)\n", + total_vports, MLX5_MAX_UC_PER_VPORT(dev), MLX5_MAX_MC_PER_VPORT(dev)); @@ -1735,14 +1631,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->dev = dev; - esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size), - sizeof(uintptr_t), GFP_KERNEL); - if (!esw->l2_table.bitmap) { - err = -ENOMEM; - goto abort; - } - esw->l2_table.size = l2_table_size; - esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq"); if (!esw->work_queue) { err = -ENOMEM; @@ -1793,7 +1681,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) abort: if (esw->work_queue) destroy_workqueue(esw->work_queue); - kfree(esw->l2_table.bitmap); kfree(esw->vports); kfree(esw->offloads.vport_reps); kfree(esw); @@ -1809,7 +1696,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); - kfree(esw->l2_table.bitmap); kfree(esw->offloads.vport_reps); kfree(esw->vports); kfree(esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 834a33050969..701d228de4ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -37,6 +37,7 @@ #include #include #include +#include "lib/mpfs.h" #define MLX5_MAX_UC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) @@ -44,9 +45,6 @@ #define MLX5_MAX_MC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) -#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) -#define MLX5_L2_ADDR_HASH(addr) (addr[5]) - #define FDB_UPLINK_VPORT 0xffff #define MLX5_MIN_BW_SHARE 1 @@ -54,48 +52,6 @@ #define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) -/* L2 -mac address based- hash helpers */ -struct l2addr_node { - struct hlist_node hlist; - u8 addr[ETH_ALEN]; -}; - -#define for_each_l2hash_node(hn, tmp, hash, i) \ - for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ - hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) - -#define l2addr_hash_find(hash, mac, type) ({ \ - int ix = MLX5_L2_ADDR_HASH(mac); \ - bool found = false; \ - type *ptr = NULL; \ - \ - hlist_for_each_entry(ptr, &hash[ix], node.hlist) \ - if (ether_addr_equal(ptr->node.addr, mac)) {\ - found = true; \ - break; \ - } \ - if (!found) \ - ptr = NULL; \ - ptr; \ -}) - -#define l2addr_hash_add(hash, mac, type, gfp) ({ \ - int ix = MLX5_L2_ADDR_HASH(mac); \ - type *ptr = NULL; \ - \ - ptr = kzalloc(sizeof(type), gfp); \ - if (ptr) { \ - ether_addr_copy(ptr->node.addr, mac); \ - hlist_add_head(&ptr->node.hlist, &hash[ix]);\ - } \ - ptr; \ -}) - -#define l2addr_hash_del(ptr) ({ \ - hlist_del(&ptr->node.hlist); \ - kfree(ptr); \ -}) - struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_group *allow_untagged_spoofchk_grp; @@ -150,12 +106,6 @@ struct mlx5_vport { u16 enabled_events; }; -struct mlx5_l2_table { - struct hlist_head l2_hash[MLX5_L2_ADDR_HASH_SIZE]; - u32 size; - unsigned long *bitmap; -}; - struct mlx5_eswitch_fdb { void *fdb; union { @@ -222,7 +172,6 @@ struct esw_mc_addr { /* SRIOV only */ struct mlx5_eswitch { struct mlx5_core_dev *dev; - struct mlx5_l2_table l2_table; struct mlx5_eswitch_fdb fdb_table; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct workqueue_struct *work_queue; @@ -250,8 +199,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); -void mlx5_eswitch_attach(struct mlx5_eswitch *esw); -void mlx5_eswitch_detach(struct mlx5_eswitch *esw); void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c new file mode 100644 index 000000000000..7cb67122e8b5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2017, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" +#include "lib/mpfs.h" + +/* HW L2 Table (MPFS) management */ +static int set_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index, u8 *mac) +{ + u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0}; + u8 *in_mac_addr; + + MLX5_SET(set_l2_table_entry_in, in, opcode, MLX5_CMD_OP_SET_L2_TABLE_ENTRY); + MLX5_SET(set_l2_table_entry_in, in, table_index, index); + + in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address); + ether_addr_copy(&in_mac_addr[2], mac); + + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +static int del_l2table_entry_cmd(struct mlx5_core_dev *dev, u32 index) +{ + u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0}; + + MLX5_SET(delete_l2_table_entry_in, in, opcode, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY); + MLX5_SET(delete_l2_table_entry_in, in, table_index, index); + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); +} + +/* UC L2 table hash node */ +struct l2table_node { + struct l2addr_node node; + u32 index; /* index in HW l2 table */ +}; + +struct mlx5_mpfs { + struct hlist_head hash[MLX5_L2_ADDR_HASH_SIZE]; + struct mutex lock; /* Synchronize l2 table access */ + u32 size; + unsigned long *bitmap; +}; + +static int alloc_l2table_index(struct mlx5_mpfs *l2table, u32 *ix) +{ + int err = 0; + + *ix = find_first_zero_bit(l2table->bitmap, l2table->size); + if (*ix >= l2table->size) + err = -ENOSPC; + else + __set_bit(*ix, l2table->bitmap); + + return err; +} + +static void free_l2table_index(struct mlx5_mpfs *l2table, u32 ix) +{ + __clear_bit(ix, l2table->bitmap); +} + +int mlx5_mpfs_init(struct mlx5_core_dev *dev) +{ + int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); + struct mlx5_mpfs *mpfs; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); + if (!mpfs) + return -ENOMEM; + + mutex_init(&mpfs->lock); + mpfs->size = l2table_size; + mpfs->bitmap = kcalloc(BITS_TO_LONGS(l2table_size), + sizeof(uintptr_t), GFP_KERNEL); + if (!mpfs->bitmap) { + kfree(mpfs); + return -ENOMEM; + } + + dev->priv.mpfs = mpfs; + return 0; +} + +void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + + if (!MLX5_VPORT_MANAGER(dev)) + return; + + WARN_ON(!hlist_empty(mpfs->hash)); + kfree(mpfs->bitmap); + kfree(mpfs); +} + +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + u32 index; + int err; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mutex_lock(&mpfs->lock); + + l2addr = l2addr_hash_find(mpfs->hash, mac, struct l2table_node); + if (l2addr) { + err = -EEXIST; + goto abort; + } + + err = alloc_l2table_index(mpfs, &index); + if (err) + goto abort; + + l2addr = l2addr_hash_add(mpfs->hash, mac, struct l2table_node, GFP_KERNEL); + if (!l2addr) { + free_l2table_index(mpfs, index); + err = -ENOMEM; + goto abort; + } + + l2addr->index = index; + err = set_l2table_entry_cmd(dev, index, mac); + if (err) { + l2addr_hash_del(l2addr); + free_l2table_index(mpfs, index); + } + + mlx5_core_dbg(dev, "MPFS mac added %pM, index (%d)\n", mac, index); +abort: + mutex_unlock(&mpfs->lock); + return err; +} + +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) +{ + struct mlx5_mpfs *mpfs = dev->priv.mpfs; + struct l2table_node *l2addr; + int err = 0; + u32 index; + + if (!MLX5_VPORT_MANAGER(dev)) + return 0; + + mutex_lock(&mpfs->lock); + + l2addr = l2addr_hash_find(mpfs->hash, mac, struct l2table_node); + if (!l2addr) { + err = -ENOENT; + goto unlock; + } + + index = l2addr->index; + del_l2table_entry_cmd(dev, index); + l2addr_hash_del(l2addr); + free_l2table_index(mpfs, index); + mlx5_core_dbg(dev, "MPFS mac deleted %pM, index (%d)\n", mac, index); +unlock: + mutex_unlock(&mpfs->lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h new file mode 100644 index 000000000000..4a7b2c3203a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_MPFS_H__ +#define __MLX5_MPFS_H__ + +#include +#include + +/* L2 -mac address based- hash helpers */ +#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE)) +#define MLX5_L2_ADDR_HASH(addr) (addr[5]) + +struct l2addr_node { + struct hlist_node hlist; + u8 addr[ETH_ALEN]; +}; + +#define for_each_l2hash_node(hn, tmp, hash, i) \ + for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \ + hlist_for_each_entry_safe(hn, tmp, &(hash)[i], hlist) + +#define l2addr_hash_find(hash, mac, type) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + bool found = false; \ + type *ptr = NULL; \ + \ + hlist_for_each_entry(ptr, &(hash)[ix], node.hlist) \ + if (ether_addr_equal(ptr->node.addr, mac)) {\ + found = true; \ + break; \ + } \ + if (!found) \ + ptr = NULL; \ + ptr; \ +}) + +#define l2addr_hash_add(hash, mac, type, gfp) ({ \ + int ix = MLX5_L2_ADDR_HASH(mac); \ + type *ptr = NULL; \ + \ + ptr = kzalloc(sizeof(type), gfp); \ + if (ptr) { \ + ether_addr_copy(ptr->node.addr, mac); \ + hlist_add_head(&ptr->node.hlist, &(hash)[ix]);\ + } \ + ptr; \ +}) + +#define l2addr_hash_del(ptr) ({ \ + hlist_del(&(ptr)->node.hlist); \ + kfree(ptr); \ +}) + +#ifdef CONFIG_MLX5_MPFS +int mlx5_mpfs_init(struct mlx5_core_dev *dev); +void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev); +int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac); +int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac); +#else /* #ifndef CONFIG_MLX5_MPFS */ +static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {} +static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; } +#endif +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c065132b956d..d4a9c9b7b6a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -53,6 +53,7 @@ #include #include "mlx5_core.h" #include "fs_core.h" +#include "lib/mpfs.h" #ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" #endif @@ -946,11 +947,17 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_tables_cleanup; } + err = mlx5_mpfs_init(dev); + if (err) { + dev_err(&pdev->dev, "Failed to init l2 table %d\n", err); + goto err_rl_cleanup; + } + #ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_init(dev); if (err) { dev_err(&pdev->dev, "Failed to init eswitch %d\n", err); - goto err_rl_cleanup; + goto err_mpfs_cleanup; } #endif @@ -973,11 +980,11 @@ err_sriov_cleanup: err_eswitch_cleanup: #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); - -err_rl_cleanup: +err_mpfs_cleanup: #endif + mlx5_mpfs_cleanup(dev); +err_rl_cleanup: mlx5_cleanup_rl_table(dev); - err_tables_cleanup: mlx5_cleanup_mkey_table(dev); mlx5_cleanup_srq_table(dev); @@ -998,6 +1005,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) #ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); #endif + mlx5_mpfs_cleanup(dev); mlx5_cleanup_rl_table(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mkey_table(dev); @@ -1155,10 +1163,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_fs; } -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_attach(dev->priv.eswitch); -#endif - err = mlx5_sriov_attach(dev); if (err) { dev_err(&pdev->dev, "sriov init failed %d\n", err); @@ -1202,9 +1206,6 @@ err_fpga_start: mlx5_sriov_detach(dev); err_sriov: -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_detach(dev->priv.eswitch); -#endif mlx5_cleanup_fs(dev); err_fs: @@ -1279,9 +1280,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_fpga_device_stop(dev); mlx5_sriov_detach(dev); -#ifdef CONFIG_MLX5_CORE_EN - mlx5_eswitch_detach(dev->priv.eswitch); -#endif mlx5_cleanup_fs(dev); mlx5_irq_clear_affinity_hints(dev); free_comp_eqs(dev); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index df6ce59a1f95..88d6eb5b3a76 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -550,6 +550,7 @@ struct mlx5_fc_stats { unsigned long sampling_interval; /* jiffies */ }; +struct mlx5_mpfs; struct mlx5_eswitch; struct mlx5_lag; struct mlx5_pagefault; @@ -647,6 +648,7 @@ struct mlx5_priv { spinlock_t ctx_lock; struct mlx5_flow_steering *steering; + struct mlx5_mpfs *mpfs; struct mlx5_eswitch *eswitch; struct mlx5_core_sriov sriov; struct mlx5_lag *lag; -- cgit v1.2.3-55-g7522 From e80541ecabd57b69726232b89242e28d8123cccc Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 5 Jun 2017 15:17:12 +0300 Subject: net/mlx5: Add CONFIG_MLX5_ESWITCH Kconfig Allow to selectively build the driver with or without sriov eswitch, VF representors and TC offloads. Also remove the need of two ndo ops structures (sriov & basic) and keep only one unified ndo ops, compile out VF SRIOV ndos when not needed (MLX5_ESWITCH=n), and for VF netdev calling those ndos will result in returning -EPERM. Signed-off-by: Saeed Mahameed Reviewed-by: Or Gerlitz Cc: Jes Sorensen Cc: kernel-team@fb.com --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 11 +++++ drivers/net/ethernet/mellanox/mlx5/core/Makefile | 9 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 53 +++++++---------------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | 8 ++++ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 2 + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 7 +++ drivers/net/ethernet/mellanox/mlx5/core/eq.c | 4 -- drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | 23 +++++++--- drivers/net/ethernet/mellanox/mlx5/core/main.c | 10 +---- drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 14 +----- 10 files changed, 69 insertions(+), 72 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index d7174295b6ef..fdaef00465d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -44,6 +44,17 @@ config MLX5_MPFS is enabled to allow passing user configured unicast MAC addresses to the requesting PF. +config MLX5_ESWITCH + bool "Mellanox Technologies MLX5 SRIOV E-Switch support" + depends on MLX5_CORE_EN + default y + ---help--- + Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC. + E-Switch provides internal SRIOV packet steering and switching for the + enabled VFs and PF in two available modes: + Legacy SRIOV mode (L2 mac vlan steering based). + Switchdev mode (eswitch offloads). + config MLX5_CORE_EN_DCB bool "Data Center Bridging (DCB) Support" default y diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index c867e48f8a4c..22ed657d263a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -11,13 +11,14 @@ mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += eswitch.o eswitch_offloads.o \ - en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ - en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ - en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o +mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ + en_tx.o en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ + en_arfs.o en_fs_ethtool.o en_selftest.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o + mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e3c858c44532..b19e9d235008 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3025,6 +3025,7 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { +#ifdef CONFIG_MLX5_ESWITCH struct mlx5e_priv *priv = netdev_priv(dev); if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) @@ -3048,6 +3049,7 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, } mqprio: +#endif if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; @@ -3350,6 +3352,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } +#ifdef CONFIG_MLX5_ESWITCH static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -3452,6 +3455,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev, return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, vf_stats); } +#endif static void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) @@ -3685,7 +3689,7 @@ static void mlx5e_netpoll(struct net_device *dev) } #endif -static const struct net_device_ops mlx5e_netdev_ops_basic = { +static const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, @@ -3711,34 +3715,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif -}; - -static const struct net_device_ops mlx5e_netdev_ops_sriov = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, - .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, - .ndo_select_queue = mlx5e_select_queue, - .ndo_get_stats64 = mlx5e_get_stats, - .ndo_set_rx_mode = mlx5e_set_rx_mode, - .ndo_set_mac_address = mlx5e_set_mac, - .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, - .ndo_set_features = mlx5e_set_features, - .ndo_change_mtu = mlx5e_change_mtu, - .ndo_do_ioctl = mlx5e_ioctl, - .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, - .ndo_features_check = mlx5e_features_check, -#ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = mlx5e_rx_flow_steer, -#endif - .ndo_tx_timeout = mlx5e_tx_timeout, - .ndo_xdp = mlx5e_xdp, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, -#endif +#ifdef CONFIG_MLX5_ESWITCH /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, @@ -3750,6 +3727,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_get_vf_stats = mlx5e_get_vf_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, +#endif }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -3979,9 +3957,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; +#endif static void mlx5e_build_nic_netdev(struct net_device *netdev) { @@ -3992,15 +3972,12 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - netdev->netdev_ops = &mlx5e_netdev_ops_sriov; + netdev->netdev_ops = &mlx5e_netdev_ops; + #ifdef CONFIG_MLX5_CORE_EN_DCB - if (MLX5_CAP_GEN(mdev, qos)) - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; + if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; #endif - } else { - netdev->netdev_ops = &mlx5e_netdev_ops_basic; - } netdev->watchdog_timeo = 15 * HZ; @@ -4072,7 +4049,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); -#ifdef CONFIG_NET_SWITCHDEV +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4431,6 +4408,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) if (err) return NULL; +#ifdef CONFIG_MLX5_ESWITCH if (MLX5_VPORT_MANAGER(mdev)) { rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { @@ -4438,6 +4416,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; } } +#endif netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); if (!netdev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 23e43bbf928d..5659ed9f51e6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -38,6 +38,7 @@ #include "eswitch.h" #include "en.h" +#ifdef CONFIG_MLX5_ESWITCH struct mlx5e_neigh_update_table { struct rhashtable neigh_ht; /* Save the neigh hash entries in a list in addition to the hash table @@ -142,5 +143,12 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); +#else /* CONFIG_MLX5_ESWITCH */ +static inline void mlx5e_register_vport_reps(struct mlx5e_priv *priv) {} +static inline void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) {} +static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } +static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} +#endif #endif /* __MLX5E_REP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 325b2c8c1c6d..8e224bcbc6a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -857,6 +857,7 @@ wq_ll_pop: &wqe->next.next_wqe_index); } +#ifdef CONFIG_MLX5_ESWITCH void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { struct net_device *netdev = rq->netdev; @@ -901,6 +902,7 @@ wq_ll_pop: mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, &wqe->next.next_wqe_index); } +#endif static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index ecbe30d808ae..36473ec65ce8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -35,6 +35,7 @@ #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff +#ifdef CONFIG_MLX5_ESWITCH int mlx5e_tc_init(struct mlx5e_priv *priv); void mlx5e_tc_cleanup(struct mlx5e_priv *priv); @@ -60,4 +61,10 @@ static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) return atomic_read(&priv->fs.tc.ht.nelems); } +#else /* CONFIG_MLX5_ESWITCH */ +static inline int mlx5e_tc_init(struct mlx5e_priv *priv) { return 0; } +static inline void mlx5e_tc_cleanup(struct mlx5e_priv *priv) {} +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; } +#endif + #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 24d2f707fdfc..de704ff5619a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -36,9 +36,7 @@ #include #include "mlx5_core.h" #include "fpga/core.h" -#ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" -#endif enum { MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), @@ -467,11 +465,9 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) } break; -#ifdef CONFIG_MLX5_CORE_EN case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); break; -#endif case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: mlx5_port_module_event(dev, eqe); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 701d228de4ad..565c8b7a399a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -39,6 +39,14 @@ #include #include "lib/mpfs.h" +enum { + SRIOV_NONE, + SRIOV_LEGACY, + SRIOV_OFFLOADS +}; + +#ifdef CONFIG_MLX5_ESWITCH + #define MLX5_MAX_UC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_uc_list)) @@ -125,12 +133,6 @@ struct mlx5_eswitch_fdb { }; }; -enum { - SRIOV_NONE, - SRIOV_LEGACY, - SRIOV_OFFLOADS -}; - struct mlx5_esw_sq { struct mlx5_flow_handle *send_to_vport_rule; struct list_head list; @@ -292,4 +294,13 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, #define esw_debug(dev, format, ...) \ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__) +#else /* CONFIG_MLX5_ESWITCH */ +/* eswitch API stubs */ +static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } +static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} +static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {} +static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } +static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} +#endif /* CONFIG_MLX5_ESWITCH */ + #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d4a9c9b7b6a2..124c7c3c3a00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -54,9 +54,7 @@ #include "mlx5_core.h" #include "fs_core.h" #include "lib/mpfs.h" -#ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" -#endif #include "lib/mlx5.h" #include "fpga/core.h" #include "accel/ipsec.h" @@ -953,13 +951,11 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_rl_cleanup; } -#ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_init(dev); if (err) { dev_err(&pdev->dev, "Failed to init eswitch %d\n", err); goto err_mpfs_cleanup; } -#endif err = mlx5_sriov_init(dev); if (err) { @@ -978,10 +974,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) err_sriov_cleanup: mlx5_sriov_cleanup(dev); err_eswitch_cleanup: -#ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); err_mpfs_cleanup: -#endif mlx5_mpfs_cleanup(dev); err_rl_cleanup: mlx5_cleanup_rl_table(dev); @@ -1002,9 +996,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) { mlx5_fpga_cleanup(dev); mlx5_sriov_cleanup(dev); -#ifdef CONFIG_MLX5_CORE_EN mlx5_eswitch_cleanup(dev->priv.eswitch); -#endif mlx5_mpfs_cleanup(dev); mlx5_cleanup_rl_table(dev); mlx5_cleanup_reserved_gids(dev); @@ -1311,7 +1303,7 @@ struct mlx5_core_event_handler { }; static const struct devlink_ops mlx5_devlink_ops = { -#ifdef CONFIG_MLX5_CORE_EN +#ifdef CONFIG_MLX5_ESWITCH .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, .eswitch_mode_get = mlx5_devlink_eswitch_mode_get, .eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index bf99d40e30b4..5e7ffc9fad78 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -33,9 +33,7 @@ #include #include #include "mlx5_core.h" -#ifdef CONFIG_MLX5_CORE_EN #include "eswitch.h" -#endif bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) { @@ -57,14 +55,12 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) return -EBUSY; } -#ifdef CONFIG_MLX5_CORE_EN err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); if (err) { mlx5_core_warn(dev, "failed to enable eswitch SRIOV (%d)\n", err); return err; } -#endif for (vf = 0; vf < num_vfs; vf++) { err = mlx5_core_enable_hca(dev, vf + 1); @@ -88,11 +84,7 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) int vf; if (!sriov->enabled_vfs) -#ifdef CONFIG_MLX5_CORE_EN - goto disable_sriov_resources; -#else - return; -#endif + goto out; for (vf = 0; vf < sriov->num_vfs; vf++) { if (!sriov->vfs_ctx[vf].enabled) @@ -106,10 +98,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev) sriov->enabled_vfs--; } -#ifdef CONFIG_MLX5_CORE_EN -disable_sriov_resources: +out: mlx5_eswitch_disable_sriov(dev->priv.eswitch); -#endif if (mlx5_wait_for_vf_pages(dev)) mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); -- cgit v1.2.3-55-g7522 From 97834eba7c194659a72c5bb0f8c19c7055bb69ea Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Wed, 7 Jun 2017 12:14:24 +0300 Subject: net/mlx5: Delay events till ib registration ends When mlx5_ib registers itself to mlx5_core as an interface, it will call mlx5_add_device which will call mlx5_ib interface add callback, in case the latter successfully returns, only then mlx5_core will add it to the interface list and async events will be forwarded to mlx5_ib. Between mlx5_ib interface add callback and mlx5_core adding the mlx5_ib interface to its devices list, arriving mlx5_core events can be missed by the new mlx5_ib registering interface. In other words: thread 1: mlx5_ib: mlx5_register_interface(dev) thread 1: mlx5_core: mlx5_add_device(dev) thread 1: mlx5_core: ctx = dev->add => (mlx5_ib)->mlx5_ib_add thread 2: mlx5_core_event: **new event arrives, forward to dev_list thread 1: mlx5_core: add_ctx_to_dev_list(ctx) /* previous event was missed by the new interface.*/ It is ok to miss events before dev->add (mlx5_ib)->mlx5_ib_add_device but not after. We fix this race by accumulating the events that come between the ib_register_device (inside mlx5_add_device->(dev->add)) till the adding to the list completes and fire them to the new registering interface after that. Fixes: f1ee87fe55c8 ("net/mlx5: Organize device list API in one place") Signed-off-by: Erez Shitrit Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/dev.c | 73 ++++++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/main.c | 3 ++ include/linux/mlx5/driver.h | 3 ++ 3 files changed, 79 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index a62f4b6a21a5..ff60cf7342ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -45,11 +45,70 @@ struct mlx5_device_context { unsigned long state; }; +struct mlx5_delayed_event { + struct list_head list; + struct mlx5_core_dev *dev; + enum mlx5_dev_event event; + unsigned long param; +}; + enum { MLX5_INTERFACE_ADDED, MLX5_INTERFACE_ATTACHED, }; +static void add_delayed_event(struct mlx5_priv *priv, + struct mlx5_core_dev *dev, + enum mlx5_dev_event event, + unsigned long param) +{ + struct mlx5_delayed_event *delayed_event; + + delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC); + if (!delayed_event) { + mlx5_core_err(dev, "event %d is missed\n", event); + return; + } + + mlx5_core_dbg(dev, "Accumulating event %d\n", event); + delayed_event->dev = dev; + delayed_event->event = event; + delayed_event->param = param; + list_add_tail(&delayed_event->list, &priv->waiting_events_list); +} + +static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, + struct mlx5_core_dev *dev, + struct mlx5_priv *priv) +{ + struct mlx5_delayed_event *de; + struct mlx5_delayed_event *n; + + /* stop delaying events */ + priv->is_accum_events = false; + + /* fire all accumulated events before new event comes */ + list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { + dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); + list_del(&de->list); + kfree(de); + } +} + +static void cleanup_delayed_evets(struct mlx5_priv *priv) +{ + struct mlx5_delayed_event *de; + struct mlx5_delayed_event *n; + + spin_lock_irq(&priv->ctx_lock); + priv->is_accum_events = false; + list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { + list_del(&de->list); + kfree(de); + } + spin_unlock_irq(&priv->ctx_lock); +} + void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; @@ -63,6 +122,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) return; dev_ctx->intf = intf; + /* accumulating events that can come after mlx5_ib calls to + * ib_register_device, till adding that interface to the events list. + */ + + priv->is_accum_events = true; + dev_ctx->context = intf->add(dev); set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); if (intf->attach) @@ -71,6 +136,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (dev_ctx->context) { spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); + + fire_delayed_event_locked(dev_ctx, dev, priv); + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING if (dev_ctx->intf->pfault) { if (priv->pfault) { @@ -84,6 +152,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) spin_unlock_irq(&priv->ctx_lock); } else { kfree(dev_ctx); + /* delete all accumulated events */ + cleanup_delayed_evets(priv); } } @@ -341,6 +411,9 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, spin_lock_irqsave(&priv->ctx_lock, flags); + if (priv->is_accum_events) + add_delayed_event(priv, dev, event, param); + list_for_each_entry(dev_ctx, &priv->ctx_list, list) if (dev_ctx->intf->event) dev_ctx->intf->event(dev, dev_ctx->context, event, param); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 124c7c3c3a00..6dbd637b4e66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1343,6 +1343,9 @@ static int init_one(struct pci_dev *pdev, mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); + INIT_LIST_HEAD(&priv->waiting_events_list); + priv->is_accum_events = false; + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING err = init_srcu_struct(&priv->pfault_srcu); if (err) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 88d6eb5b3a76..d26f18b39c4a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -647,6 +647,9 @@ struct mlx5_priv { struct list_head ctx_list; spinlock_t ctx_lock; + struct list_head waiting_events_list; + bool is_accum_events; + struct mlx5_flow_steering *steering; struct mlx5_mpfs *mpfs; struct mlx5_eswitch *eswitch; -- cgit v1.2.3-55-g7522 From 61690e09c3b4a40401bdaa89ee11522cc0dc4b11 Mon Sep 17 00:00:00 2001 From: Rabie Loulou Date: Mon, 10 Jul 2017 14:35:10 +0300 Subject: net/mlx5: Fix counter list hardware structure The counter list hardware structure doesn't contain a clear and num_of_counters fields, remove them. These wrong fields were never used by the driver hence no other driver changes. Fixes: a351a1b03bf1 ("net/mlx5: Introduce bulk reading of flow counters") Signed-off-by: Rabie Loulou Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 3030121b4746..f847a3a57913 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1071,8 +1071,7 @@ struct mlx5_ifc_dest_format_struct_bits { }; struct mlx5_ifc_flow_counter_list_bits { - u8 clear[0x1]; - u8 num_of_counters[0xf]; + u8 reserved_at_0[0x10]; u8 flow_counter_id[0x10]; u8 reserved_at_20[0x20]; -- cgit v1.2.3-55-g7522 From a8ffcc741acb3c7f3dcf4c7d001209aa0995a5f1 Mon Sep 17 00:00:00 2001 From: Rabie Loulou Date: Sun, 9 Jul 2017 13:39:30 +0300 Subject: net/mlx5: Increase the maximum flow counters supported Read new NIC capability field which represnts 16 MSBs of the max flow counters number supported (max_flow_counter_31_16). Backward compatibility with older firmware is preserved, the modified driver reads max_flow_counter_31_16 as 0 from the older firmware and uses up to 64K counters. Changed flow counter id from 16 bits to 32 bits. Backward compatibility with older firmware is preserved as we kept the 16 LSBs of the counter id in place and added 16 MSBs from reserved field. Changed the background bulk reading of flow counters to work in chunks of at most 32K counters, to make sure we don't attempt to allocate very large buffers. Signed-off-by: Rabie Loulou Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | 6 ++++-- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 12 ++++++------ drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | 10 +++++----- drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c | 13 ++++++++++--- include/linux/mlx5/mlx5_ifc.h | 16 ++++++---------- 6 files changed, 32 insertions(+), 27 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 95b64025ce36..e7c186b58579 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -433,6 +433,8 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) struct mlx5_flow_table *fdb = NULL; int esw_size, err = 0; u32 flags = 0; + u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | + MLX5_CAP_GEN(dev, max_flow_counter_15_0); root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); if (!root_ns) { @@ -443,9 +445,9 @@ static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw) esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n", MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), - MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS); + max_flow_counter, ESW_OFFLOADS_NUM_GROUPS); - esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS, + esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS, 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index e750f07793b8..16b32f31d691 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -359,7 +359,7 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id) { u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; @@ -374,7 +374,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id) return err; } -int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id) { u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0}; u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0}; @@ -385,7 +385,7 @@ int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes) { u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) + @@ -409,14 +409,14 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, } struct mlx5_cmd_fc_bulk { - u16 id; + u32 id; int num; int outlen; u32 out[0]; }; struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num) +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num) { struct mlx5_cmd_fc_bulk *b; int outlen = @@ -453,7 +453,7 @@ mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b) } void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u16 id, + struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes) { int index = id - b->id; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 0f98a7cf4877..c6d7bdf255b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -74,20 +74,20 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, u32 underlay_qpn); -int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id); -int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id); -int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id, +int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id); +int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id); +int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id, u64 *packets, u64 *bytes); struct mlx5_cmd_fc_bulk; struct mlx5_cmd_fc_bulk * -mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num); +mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num); void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b); int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b); void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev, - struct mlx5_cmd_fc_bulk *b, u16 id, + struct mlx5_cmd_fc_bulk *b, u32 id, u64 *packets, u64 *bytes); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 990acee6fb09..9fb5a333df52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -136,7 +136,7 @@ struct mlx5_fc { u64 lastpackets; u64 lastbytes; - u16 id; + u32 id; bool deleted; bool aging; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 6507d8acc54d..89d1f8650033 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -38,6 +38,8 @@ #include "fs_cmd.h" #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000) +/* Max number of counters to query in bulk read is 32K */ +#define MLX5_SW_MAX_COUNTERS_BULK BIT(15) /* locking scheme: * @@ -90,16 +92,21 @@ static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) rb_insert_color(&counter->node, root); } +/* The function returns the last node that was queried so the caller + * function can continue calling it till all counters are queried. + */ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, struct mlx5_fc *first, - u16 last_id) + u32 last_id) { struct mlx5_cmd_fc_bulk *b; struct rb_node *node = NULL; - u16 afirst_id; + u32 afirst_id; int num; int err; - int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk); + + int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK, + (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk))); /* first id must be aligned to 4 when using bulk query */ afirst_id = first->id & ~0x3; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index f847a3a57913..c99daffc3c3c 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -963,7 +963,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_2a0[0x10]; u8 max_wqe_sz_rq[0x10]; - u8 reserved_at_2c0[0x10]; + u8 max_flow_counter_31_16[0x10]; u8 max_wqe_sz_sq_dc[0x10]; u8 reserved_at_2e0[0x7]; @@ -981,7 +981,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_340[0x8]; u8 log_max_flow_counter_bulk[0x8]; - u8 max_flow_counter[0x10]; + u8 max_flow_counter_15_0[0x10]; u8 reserved_at_360[0x3]; @@ -1071,8 +1071,7 @@ struct mlx5_ifc_dest_format_struct_bits { }; struct mlx5_ifc_flow_counter_list_bits { - u8 reserved_at_0[0x10]; - u8 flow_counter_id[0x10]; + u8 flow_counter_id[0x20]; u8 reserved_at_20[0x20]; }; @@ -4402,8 +4401,7 @@ struct mlx5_ifc_query_flow_counter_in_bits { u8 reserved_at_c1[0xf]; u8 num_of_counters[0x10]; - u8 reserved_at_e0[0x10]; - u8 flow_counter_id[0x10]; + u8 flow_counter_id[0x20]; }; struct mlx5_ifc_query_esw_vport_context_out_bits { @@ -6271,8 +6269,7 @@ struct mlx5_ifc_dealloc_flow_counter_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 reserved_at_40[0x10]; - u8 flow_counter_id[0x10]; + u8 flow_counter_id[0x20]; u8 reserved_at_60[0x20]; }; @@ -7097,8 +7094,7 @@ struct mlx5_ifc_alloc_flow_counter_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x10]; - u8 flow_counter_id[0x10]; + u8 flow_counter_id[0x20]; u8 reserved_at_60[0x20]; }; -- cgit v1.2.3-55-g7522 From 2572ac53c46f58e500b9d8d0f99785666038c590 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:17 +0200 Subject: net: sched: make type an argument for ndo_setup_tc Since the type is always present, push it to be a separate argument to ndo_setup_tc. On the way, name the type enum and use it for arg type. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 +++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 7 ++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 5 +++-- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 ++++--- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 7 ++++--- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 7 ++++--- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 6 +++--- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 7 ++++--- drivers/net/ethernet/intel/i40e/i40e_main.c | 6 +++--- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 9 +++++---- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 ++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 9 +++++---- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 10 +++++----- drivers/net/ethernet/netronome/nfp/bpf/main.c | 5 +++-- drivers/net/ethernet/netronome/nfp/flower/main.h | 3 ++- drivers/net/ethernet/netronome/nfp/flower/offload.c | 5 +++-- drivers/net/ethernet/netronome/nfp/nfp_app.h | 6 ++++-- drivers/net/ethernet/netronome/nfp/nfp_port.c | 7 ++++--- drivers/net/ethernet/netronome/nfp/nfp_port.h | 5 +++-- drivers/net/ethernet/sfc/efx.h | 5 +++-- drivers/net/ethernet/sfc/falcon/efx.h | 5 +++-- drivers/net/ethernet/sfc/falcon/tx.c | 7 ++++--- drivers/net/ethernet/sfc/tx.c | 7 ++++--- drivers/net/ethernet/ti/netcp_core.c | 7 ++++--- include/linux/netdevice.h | 9 +++++---- net/dsa/slave.c | 6 +++--- net/sched/cls_bpf.c | 4 ++-- net/sched/cls_flower.c | 14 ++++++-------- net/sched/cls_matchall.c | 10 ++++------ net/sched/cls_u32.c | 20 ++++++++------------ net/sched/sch_mqprio.c | 13 ++++++------- 32 files changed, 125 insertions(+), 113 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ecef3ee87b17..6a6ea3bdd056 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1918,14 +1918,14 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ -static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, +static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc_to_netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); u8 tc; - if (tc_to_netdev->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 67fe3d826566..4395d1cac86f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4284,10 +4284,11 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) { - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index c26688d2f326..1ac4eb0d3413 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,8 +486,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 156fb374522b..b98d9f33d9af 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7237,10 +7237,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *ntc) { - if (ntc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index fdf220aa08d6..89d2b0cd9869 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2889,8 +2889,9 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } -static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) { struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); @@ -2906,7 +2907,7 @@ static int cxgb_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, } if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - tc->type == TC_SETUP_CLSU32) { + type == TC_SETUP_CLSU32) { switch (tc->cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 550ea1ec7b6c..d86d766777c8 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -342,14 +342,15 @@ static void dpaa_get_stats64(struct net_device *net_dev, } } -static int dpaa_setup_tc(struct net_device *net_dev, u32 handle, - u32 chain_index, __be16 proto, struct tc_to_netdev *tc) +static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) { struct dpaa_priv *priv = netdev_priv(net_dev); u8 num_tc; int i; - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index ad9481c7ceae..6bb1e35336cc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1219,11 +1219,11 @@ static int hns3_setup_tc(struct net_device *netdev, u8 tc) return 0; } -static int hns3_nic_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 protocol, +static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (handle != TC_H_ROOT || type != TC_SETUP_MQPRIO) return -EINVAL; return hns3_setup_tc(dev, tc->mqprio->num_tc); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 5e37387c7082..b30190639e78 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1265,10 +1265,11 @@ err_queueing_scheme: return err; } -static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) { - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4104944ea367..7d47a718f922 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5656,11 +5656,11 @@ exit: return ret; } -static int __i40e_setup_tc(struct net_device *netdev, u32 handle, - u32 chain_index, __be16 proto, +static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 091fcc7e6e43..d39db9711df6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9226,8 +9226,9 @@ free_jump: return err; } -static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) { struct ixgbe_adapter *adapter = netdev_priv(dev); @@ -9235,7 +9236,7 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, return -EOPNOTSUPP; if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - tc->type == TC_SETUP_CLSU32) { + type == TC_SETUP_CLSU32) { switch (tc->cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: @@ -9255,7 +9256,7 @@ static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, } } - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3a291fc1780a..5c33550765ed 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -130,11 +130,11 @@ out: return err; } -static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, +static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 57f31fa478ce..4052e225f1dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3027,8 +3027,8 @@ out: return err; } -static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, +static int mlx5e_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -3039,7 +3039,7 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, if (chain_index) return -EOPNOTSUPP; - switch (tc->type) { + switch (type) { case TC_SETUP_CLSFLOWER: switch (tc->cls_flower->command) { case TC_CLSFLOWER_REPLACE: @@ -3054,7 +3054,7 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, } mqprio: - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 45e60be9c277..d44049ed5371 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -651,7 +651,8 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, return 0; } -static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, +static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, + enum tc_setup_type type, u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { @@ -664,15 +665,15 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw); - return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle, - chain_index, + return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, type, + handle, chain_index, proto, tc); } if (chain_index) return -EOPNOTSUPP; - switch (tc->type) { + switch (type) { case TC_SETUP_CLSFLOWER: switch (tc->cls_flower->command) { case TC_CLSFLOWER_REPLACE: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 66d511d45c25..155424266cbf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1693,8 +1693,8 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, kfree(mall_tc_entry); } -static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, +static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -1703,7 +1703,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, if (chain_index) return -EOPNOTSUPP; - switch (tc->type) { + switch (type) { case TC_SETUP_MATCHALL: switch (tc->cls_mall->command) { case TC_CLSMATCHALL_REPLACE: @@ -1733,9 +1733,9 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle, default: return -EOPNOTSUPP; } + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } static const struct net_device_ops mlxsw_sp_port_netdev_ops = { diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index afbdf5fd4e4f..788880808a6e 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -121,7 +121,8 @@ static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn) } static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc) + enum tc_setup_type type, u32 handle, __be16 proto, + struct tc_to_netdev *tc) { struct nfp_net *nn = netdev_priv(netdev); @@ -130,7 +131,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, if (proto != htons(ETH_P_ALL)) return -EOPNOTSUPP; - if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { + if (type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { if (!nn->dp.bpf_offload_xdp) return nfp_net_bpf_offload(nn, tc->cls_bpf); else diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 9e64c048e83f..314e6e8ba649 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -135,7 +135,8 @@ int nfp_flower_metadata_init(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc); + enum tc_setup_type type, u32 handle, __be16 proto, + struct tc_to_netdev *tc); int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 4ad10bd5e139..d045cf8c140a 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -385,7 +385,8 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, } int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc) + enum tc_setup_type type, u32 handle, __be16 proto, + struct tc_to_netdev *tc) { if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) return -EOPNOTSUPP; @@ -393,7 +394,7 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, if (!eth_proto_is_802_3(proto)) return -EOPNOTSUPP; - if (tc->type != TC_SETUP_CLSFLOWER) + if (type != TC_SETUP_CLSFLOWER) return -EINVAL; return nfp_flower_repr_offload(app, netdev, tc->cls_flower); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 5d714e10d9a9..b3b03bb9d907 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -109,7 +109,8 @@ struct nfp_app_type { void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, - u32 handle, __be16 proto, struct tc_to_netdev *tc); + enum tc_setup_type type, u32 handle, __be16 proto, + struct tc_to_netdev *tc); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog); @@ -238,12 +239,13 @@ static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn) static inline int nfp_app_setup_tc(struct nfp_app *app, struct net_device *netdev, + enum tc_setup_type type, u32 handle, __be16 proto, struct tc_to_netdev *tc) { if (!app || !app->type->setup_tc) return -EOPNOTSUPP; - return app->type->setup_tc(app, netdev, handle, proto, tc); + return app->type->setup_tc(app, netdev, type, handle, proto, tc); } static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index d16a7b78ba9b..9d776f982352 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -88,8 +88,9 @@ const struct switchdev_ops nfp_port_switchdev_ops = { .switchdev_port_attr_get = nfp_port_attr_get, }; -int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) { struct nfp_port *port; @@ -100,7 +101,7 @@ int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, if (!port) return -EOPNOTSUPP; - return nfp_app_setup_tc(port->app, netdev, handle, proto, tc); + return nfp_app_setup_tc(port->app, netdev, type, handle, proto, tc); } struct nfp_port * diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 56c76926c82a..239c5401000c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -109,8 +109,9 @@ struct nfp_port { extern const struct switchdev_ops nfp_port_switchdev_ops; -int nfp_port_setup_tc(struct net_device *netdev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc); struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); struct nfp_port * diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index fcea9371ab7f..e41a7179bc05 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -32,8 +32,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h index e5a7a40cc8b6..f3bc67ec1f30 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.h +++ b/drivers/net/ethernet/sfc/falcon/efx.h @@ -32,8 +32,9 @@ netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb); void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index); -int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc); +int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc); unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx); extern bool ef4_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index f1520a404ac6..6c4752694c1f 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -425,8 +425,9 @@ void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *ntc) { struct ef4_nic *efx = netdev_priv(net_dev); struct ef4_channel *channel; @@ -434,7 +435,7 @@ int ef4_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, unsigned tc, num_tc; int rc; - if (ntc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; num_tc = ntc->mqprio->num_tc; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 02d41eb4a8e9..0c08c10d751c 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -653,8 +653,9 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *ntc) +int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *ntc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; @@ -662,7 +663,7 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, u32 chain_index, unsigned tc, num_tc; int rc; - if (ntc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; num_tc = ntc->mqprio->num_tc; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 9d52c3a78621..cb21742f6177 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1877,8 +1877,9 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; } -static int netcp_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) +static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) { u8 num_tc; int i; @@ -1886,7 +1887,7 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, u32 chain_index, /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - if (tc->type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3a3cdc1b1f31..e4238e540544 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -774,7 +774,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, /* These structures hold the attributes of qdisc and classifiers * that are being passed to the netdevice through the setup_tc op. */ -enum { +enum tc_setup_type { TC_SETUP_MQPRIO, TC_SETUP_CLSU32, TC_SETUP_CLSFLOWER, @@ -785,7 +785,6 @@ enum { struct tc_cls_u32_offload; struct tc_to_netdev { - unsigned int type; union { struct tc_cls_u32_offload *cls_u32; struct tc_cls_flower_offload *cls_flower; @@ -978,8 +977,9 @@ struct xfrmdev_ops { * with PF and querying it may introduce a theoretical security risk. * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); - * int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u32 chain_index, - * __be16 protocol, struct tc_to_netdev *tc); + * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, + * u32 handle, u32 chain_index, __be16 protocol, + * struct tc_to_netdev *tc); * Called to setup any 'tc' scheduler, classifier or action on @dev. * This is always called from the stack with the rtnl lock held and netif * tx queues stopped. This allows the netdevice to perform queue @@ -1227,6 +1227,7 @@ struct net_device_ops { struct net_device *dev, int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, + enum tc_setup_type type, u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 75c5c5808220..b4b63c20ec80 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -863,8 +863,8 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev, kfree(mall_tc_entry); } -static int dsa_slave_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 protocol, +static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc) { bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); @@ -872,7 +872,7 @@ static int dsa_slave_setup_tc(struct net_device *dev, u32 handle, if (chain_index) return -EOPNOTSUPP; - switch (tc->type) { + switch (type) { case TC_SETUP_MATCHALL: switch (tc->cls_mall->command) { case TC_CLSMATCHALL_REPLACE: diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index cf248c3137ad..e2bf2753173d 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -151,7 +151,6 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, struct tc_to_netdev offload; int err; - offload.type = TC_SETUP_CLSBPF; offload.cls_bpf = &bpf_offload; bpf_offload.command = cmd; @@ -161,7 +160,8 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, bpf_offload.exts_integrated = prog->exts_integrated; bpf_offload.gen_flags = prog->gen_flags; - err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, + tp->q->handle, tp->chain->index, tp->protocol, &offload); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 7ab524fc43f9..ddeed17d2024 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -236,11 +236,10 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) offload.prio = tp->prio; offload.cookie = (unsigned long)f; - tc->type = TC_SETUP_CLSFLOWER; tc->cls_flower = &offload; - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->chain->index, - tp->protocol, tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tp->q->handle, + tp->chain->index, tp->protocol, tc); } static int fl_hw_replace_filter(struct tcf_proto *tp, @@ -273,11 +272,11 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, offload.key = &f->mkey; offload.exts = &f->exts; - tc->type = TC_SETUP_CLSFLOWER; tc->cls_flower = &offload; - err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->chain->index, tp->protocol, tc); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + tp->q->handle, tp->chain->index, + tp->protocol, tc); if (!err) f->flags |= TCA_CLS_FLAGS_IN_HW; @@ -300,10 +299,9 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) offload.cookie = (unsigned long)f; offload.exts = &f->exts; - tc->type = TC_SETUP_CLSFLOWER; tc->cls_flower = &offload; - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS, tp->q->handle, tp->chain->index, tp->protocol, tc); } diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index f35177b48373..6ffe0b82ab83 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -58,14 +58,13 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, struct tc_cls_matchall_offload mall_offload = {0}; int err; - offload.type = TC_SETUP_MATCHALL; offload.cls_mall = &mall_offload; offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; offload.cls_mall->exts = &head->exts; offload.cls_mall->cookie = cookie; - err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->chain->index, + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MATCHALL, + tp->q->handle, tp->chain->index, tp->protocol, &offload); if (!err) head->flags |= TCA_CLS_FLAGS_IN_HW; @@ -81,14 +80,13 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, struct tc_to_netdev offload; struct tc_cls_matchall_offload mall_offload = {0}; - offload.type = TC_SETUP_MATCHALL; offload.cls_mall = &mall_offload; offload.cls_mall->command = TC_CLSMATCHALL_DESTROY; offload.cls_mall->exts = NULL; offload.cls_mall->cookie = cookie; - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->chain->index, - tp->protocol, &offload); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MATCHALL, tp->q->handle, + tp->chain->index, tp->protocol, &offload); } static void mall_destroy(struct tcf_proto *tp) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 9fd243799fe7..d1bae4cc749f 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -434,15 +434,14 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) struct tc_cls_u32_offload u32_offload = {0}; struct tc_to_netdev offload; - offload.type = TC_SETUP_CLSU32; offload.cls_u32 = &u32_offload; if (tc_should_offload(dev, tp, 0)) { offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; offload.cls_u32->knode.handle = handle; - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->chain->index, tp->protocol, - &offload); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, + tp->q->handle, tp->chain->index, + tp->protocol, &offload); } } @@ -457,7 +456,6 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, if (!tc_should_offload(dev, tp, flags)) return tc_skip_sw(flags) ? -EINVAL : 0; - offload.type = TC_SETUP_CLSU32; offload.cls_u32 = &u32_offload; offload.cls_u32->command = TC_CLSU32_NEW_HNODE; @@ -465,7 +463,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, offload.cls_u32->hnode.handle = h->handle; offload.cls_u32->hnode.prio = h->prio; - err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, tp->q->handle, tp->chain->index, tp->protocol, &offload); if (tc_skip_sw(flags)) @@ -480,7 +478,6 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) struct tc_cls_u32_offload u32_offload = {0}; struct tc_to_netdev offload; - offload.type = TC_SETUP_CLSU32; offload.cls_u32 = &u32_offload; if (tc_should_offload(dev, tp, 0)) { @@ -489,9 +486,9 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) offload.cls_u32->hnode.handle = h->handle; offload.cls_u32->hnode.prio = h->prio; - dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, - tp->chain->index, tp->protocol, - &offload); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, + tp->q->handle, tp->chain->index, + tp->protocol, &offload); } } @@ -503,7 +500,6 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, struct tc_to_netdev offload; int err; - offload.type = TC_SETUP_CLSU32; offload.cls_u32 = &u32_offload; if (!tc_should_offload(dev, tp, flags)) @@ -524,7 +520,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, if (n->ht_down) offload.cls_u32->knode.link_handle = n->ht_down->handle; - err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, tp->q->handle, tp->chain->index, tp->protocol, &offload); diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index e0c02725cd48..329610ce4dfe 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -40,10 +40,10 @@ static void mqprio_destroy(struct Qdisc *sch) if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) { struct tc_mqprio_qopt offload = { 0 }; - struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO, - { .mqprio = &offload } }; + struct tc_to_netdev tc = { { .mqprio = &offload } }; - dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, 0, &tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, + sch->handle, 0, 0, &tc); } else { netdev_set_num_tc(dev, 0); } @@ -149,11 +149,10 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) */ if (qopt->hw) { struct tc_mqprio_qopt offload = *qopt; - struct tc_to_netdev tc = { .type = TC_SETUP_MQPRIO, - { .mqprio = &offload } }; + struct tc_to_netdev tc = { { .mqprio = &offload } }; - err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, - 0, 0, &tc); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, + sch->handle, 0, 0, &tc); if (err) return err; -- cgit v1.2.3-55-g7522 From ade9b6588420b335851951702ab975c975b0c1b2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:18 +0200 Subject: net: sched: rename TC_SETUP_MATCHALL to TC_SETUP_CLSMATCHALL In order to be aligned with the rest of the types, rename TC_SETUP_MATCHALL to TC_SETUP_CLSMATCHALL. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- include/linux/netdevice.h | 2 +- net/dsa/slave.c | 2 +- net/sched/cls_matchall.c | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 155424266cbf..6438c38e7a68 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1704,7 +1704,7 @@ static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, return -EOPNOTSUPP; switch (type) { - case TC_SETUP_MATCHALL: + case TC_SETUP_CLSMATCHALL: switch (tc->cls_mall->command) { case TC_CLSMATCHALL_REPLACE: return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e4238e540544..f8051a36f900 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -778,7 +778,7 @@ enum tc_setup_type { TC_SETUP_MQPRIO, TC_SETUP_CLSU32, TC_SETUP_CLSFLOWER, - TC_SETUP_MATCHALL, + TC_SETUP_CLSMATCHALL, TC_SETUP_CLSBPF, }; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index b4b63c20ec80..453f6ddcd023 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -873,7 +873,7 @@ static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, return -EOPNOTSUPP; switch (type) { - case TC_SETUP_MATCHALL: + case TC_SETUP_CLSMATCHALL: switch (tc->cls_mall->command) { case TC_CLSMATCHALL_REPLACE: return dsa_slave_add_cls_matchall(dev, protocol, diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 6ffe0b82ab83..a8853ada22f6 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -63,7 +63,7 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, offload.cls_mall->exts = &head->exts; offload.cls_mall->cookie = cookie; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MATCHALL, + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, tp->q->handle, tp->chain->index, tp->protocol, &offload); if (!err) @@ -85,7 +85,7 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, offload.cls_mall->exts = NULL; offload.cls_mall->cookie = cookie; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MATCHALL, tp->q->handle, + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, tp->q->handle, tp->chain->index, tp->protocol, &offload); } -- cgit v1.2.3-55-g7522 From 3e0e82664322290a59189f7c2bcb39b0de932505 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:19 +0200 Subject: net: sched: make egress_dev flag part of flower offload struct Since this is specific to flower now, make it part of the flower offload struct. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- include/linux/netdevice.h | 1 - include/net/pkt_cls.h | 1 + net/sched/cls_flower.c | 2 +- 4 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index d44049ed5371..0e6bab182071 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -661,7 +661,7 @@ static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) return -EOPNOTSUPP; - if (tc->egress_dev) { + if (type == TC_SETUP_CLSFLOWER && tc->cls_flower->egress_dev) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f8051a36f900..bd49dbaee84e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -792,7 +792,6 @@ struct tc_to_netdev { struct tc_cls_bpf_offload *cls_bpf; struct tc_mqprio_qopt *mqprio; }; - bool egress_dev; }; /* These structures hold the attributes of xdp state that are being passed diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e0c54f111467..8213acdfdf5a 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -504,6 +504,7 @@ struct tc_cls_flower_offload { struct fl_flow_key *mask; struct fl_flow_key *key; struct tcf_exts *exts; + bool egress_dev; }; enum tc_matchall_command { diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index ddeed17d2024..52deeed2b7f5 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -259,7 +259,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, return tc_skip_sw(f->flags) ? -EINVAL : 0; } dev = f->hw_dev; - tc->egress_dev = true; + offload.egress_dev = true; } else { f->hw_dev = dev; } -- cgit v1.2.3-55-g7522 From f73230430ac295b14cd1ee162dbf7ccfd90cbf6b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:20 +0200 Subject: cxgb4: push cls_u32 setup_tc processing into a separate function Let cxgb_setup_tc be a splitter for specific setup_tc types and push out cls_u32 specific code into a separate function. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 42 +++++++++++++++---------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 89d2b0cd9869..651229070113 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2889,6 +2889,26 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) return err; } +static int cxgb_setup_tc_cls_u32(struct net_device *dev, + enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_cls_u32_offload *cls_u32) +{ + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || + chain_index) + return -EOPNOTSUPP; + + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return cxgb4_config_knode(dev, proto, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return cxgb4_delete_knode(dev, proto, cls_u32); + default: + return -EOPNOTSUPP; + } +} + static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) @@ -2896,9 +2916,6 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); - if (chain_index) - return -EOPNOTSUPP; - if (!(adap->flags & FULL_INIT_DONE)) { dev_err(adap->pdev_dev, "Failed to setup tc on port %d. Link Down?\n", @@ -2906,20 +2923,13 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, return -EINVAL; } - if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - type == TC_SETUP_CLSU32) { - switch (tc->cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return cxgb4_config_knode(dev, proto, tc->cls_u32); - case TC_CLSU32_DELETE_KNODE: - return cxgb4_delete_knode(dev, proto, tc->cls_u32); - default: - return -EOPNOTSUPP; - } + switch (type) { + case TC_SETUP_CLSU32: + return cxgb_setup_tc_cls_u32(dev, type, handle, chain_index, + proto, tc->cls_u32); + default: + return -EOPNOTSUPP; } - - return -EOPNOTSUPP; } static netdev_features_t cxgb_fix_features(struct net_device *dev, -- cgit v1.2.3-55-g7522 From bc32afdb2b01ae7652cbb829475d270f7d813618 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:21 +0200 Subject: ixgbe: push cls_u32 and mqprio setup_tc processing into separate functions Let __ixgbe_setup_tc be a splitter for specific setup_tc types and push out cls_u32 and mqprio specific codes into separate functions. Also change the return values so they are the same as in the rest of the drivers. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 67 ++++++++++++++++----------- 1 file changed, 39 insertions(+), 28 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d39db9711df6..35db198199b0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9226,42 +9226,53 @@ free_jump: return err; } -static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int ixgbe_setup_tc_cls_u32(struct net_device *dev, + u32 handle, u32 chain_index, __be16 proto, + struct tc_cls_u32_offload *cls_u32) { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (chain_index) + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || + chain_index) return -EOPNOTSUPP; - if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && - type == TC_SETUP_CLSU32) { - switch (tc->cls_u32->command) { - case TC_CLSU32_NEW_KNODE: - case TC_CLSU32_REPLACE_KNODE: - return ixgbe_configure_clsu32(adapter, - proto, tc->cls_u32); - case TC_CLSU32_DELETE_KNODE: - return ixgbe_delete_clsu32(adapter, tc->cls_u32); - case TC_CLSU32_NEW_HNODE: - case TC_CLSU32_REPLACE_HNODE: - return ixgbe_configure_clsu32_add_hnode(adapter, proto, - tc->cls_u32); - case TC_CLSU32_DELETE_HNODE: - return ixgbe_configure_clsu32_del_hnode(adapter, - tc->cls_u32); - default: - return -EINVAL; - } + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, proto, cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, proto, + cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); + default: + return -EOPNOTSUPP; } +} - if (type != TC_SETUP_MQPRIO) - return -EINVAL; - - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; +static int ixgbe_setup_tc_mqprio(struct net_device *dev, + struct tc_mqprio_qopt *mqprio) +{ + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + return ixgbe_setup_tc(dev, mqprio->num_tc); +} - return ixgbe_setup_tc(dev, tc->mqprio->num_tc); +static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) +{ + switch (type) { + case TC_SETUP_CLSU32: + return ixgbe_setup_tc_cls_u32(dev, handle, chain_index, proto, + tc->cls_u32); + case TC_SETUP_MQPRIO: + return ixgbe_setup_tc_mqprio(dev, tc->mqprio); + default: + return -EOPNOTSUPP; + } } #ifdef CONFIG_PCI_IOV -- cgit v1.2.3-55-g7522 From 0cf0f6d3d39672c044393aef71ee782430ca8b13 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:22 +0200 Subject: mlx5e: push cls_flower and mqprio setup_tc processing into separate functions Let mlx5e_setup_tc (former mlx5e_ndo_setup_tc) be a splitter for specific setup_tc types and push out cls_flower and mqprio specific codes into separate functions. Also change the return values so they are the same as in the rest of the drivers. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 56 +++++++++++++---------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 4052e225f1dc..adf35da74a85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2998,12 +2998,16 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) return 0; } -static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) +static int mlx5e_setup_tc_mqprio(struct net_device *netdev, + struct tc_mqprio_qopt *mqprio) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; + u8 tc = mqprio->num_tc; int err = 0; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (tc && tc != MLX5E_MAX_NUM_TC) return -EINVAL; @@ -3027,39 +3031,41 @@ out: return err; } -static int mlx5e_ndo_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int mlx5e_setup_tc_cls_flower(struct net_device *dev, + u32 handle, u32 chain_index, __be16 proto, + struct tc_cls_flower_offload *cls_flower) { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - goto mqprio; + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || + chain_index) + return -EOPNOTSUPP; - if (chain_index) + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, proto, cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, cls_flower); + default: return -EOPNOTSUPP; + } +} +static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) +{ switch (type) { case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, tc->cls_flower); - case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, tc->cls_flower); - } + return mlx5e_setup_tc_cls_flower(dev, handle, chain_index, + proto, tc->cls_flower); + case TC_SETUP_MQPRIO: + return mlx5e_setup_tc_mqprio(dev, tc->mqprio); default: return -EOPNOTSUPP; } - -mqprio: - if (type != TC_SETUP_MQPRIO) - return -EINVAL; - - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return mlx5e_setup_tc(dev, tc->mqprio->num_tc); } static void @@ -3695,7 +3701,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_setup_tc = mlx5e_setup_tc, .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, @@ -3720,7 +3726,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_setup_tc = mlx5e_setup_tc, .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, -- cgit v1.2.3-55-g7522 From 8c818c27f37f4d847769a38b9d20c1b5ae21075d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:23 +0200 Subject: mlx5e_rep: push cls_flower setup_tc processing into a separate function Let mlx5e_rep_setup_tc (former mlx5e_rep_ndo_setup_tc) be a splitter for specific setup_tc types and push out cls_flower specific code into a separate function. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 49 ++++++++++++++---------- 1 file changed, 29 insertions(+), 20 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 0e6bab182071..e6cc642f6d8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -651,38 +651,47 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, return 0; } -static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, - enum tc_setup_type type, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, + u32 handle, u32 chain_index, + __be16 proto, + struct tc_to_netdev *tc) { + struct tc_cls_flower_offload *cls_flower = tc->cls_flower; struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || + chain_index) return -EOPNOTSUPP; - if (type == TC_SETUP_CLSFLOWER && tc->cls_flower->egress_dev) { + if (cls_flower->egress_dev) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw); - return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, type, - handle, chain_index, - proto, tc); + dev = mlx5_eswitch_get_uplink_netdev(esw); + return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + handle, chain_index, + proto, tc); } - if (chain_index) + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, proto, cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, cls_flower); + default: return -EOPNOTSUPP; + } +} +static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) +{ switch (type) { case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, tc->cls_flower); - case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, tc->cls_flower); - } + return mlx5e_rep_setup_tc_cls_flower(dev, handle, chain_index, + proto, tc); default: return -EOPNOTSUPP; } @@ -774,7 +783,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { .ndo_stop = mlx5e_rep_close, .ndo_start_xmit = mlx5e_xmit, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, - .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, + .ndo_setup_tc = mlx5e_rep_setup_tc, .ndo_get_stats64 = mlx5e_rep_get_stats, .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, -- cgit v1.2.3-55-g7522 From fd33f1dfed6141280bc85817e6f79b87a5c7320b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:24 +0200 Subject: mlxsw: spectrum: push cls_flower and cls_matchall setup_tc processing into separate functions Let mlxsw_sp_setup_tc be a splitter for specific setup_tc types and push out cls_flower and cls_matchall specific codes into separate functions. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 81 ++++++++++++++++---------- 1 file changed, 51 insertions(+), 30 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 6438c38e7a68..9f8ba37fa7e3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1693,46 +1693,67 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, kfree(mall_tc_entry); } -static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, +static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, + u32 handle, u32 chain_index, + __be16 proto, + struct tc_cls_matchall_offload *f) +{ + bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + + if (chain_index) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_CLSMATCHALL_REPLACE: + return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, proto, f, + ingress); + case TC_CLSMATCHALL_DESTROY: + mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int +mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) + struct tc_cls_flower_offload *f) { - struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); if (chain_index) return -EOPNOTSUPP; + switch (f->command) { + case TC_CLSFLOWER_REPLACE: + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, + proto, f); + case TC_CLSFLOWER_DESTROY: + mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); + return 0; + case TC_CLSFLOWER_STATS: + return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f); + default: + return -EOPNOTSUPP; + } +} + +static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 proto, + struct tc_to_netdev *tc) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + switch (type) { case TC_SETUP_CLSMATCHALL: - switch (tc->cls_mall->command) { - case TC_CLSMATCHALL_REPLACE: - return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, - proto, - tc->cls_mall, - ingress); - case TC_CLSMATCHALL_DESTROY: - mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, - tc->cls_mall); - return 0; - default: - return -EOPNOTSUPP; - } + return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, handle, + chain_index, proto, + tc->cls_mall); case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, - proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, - tc->cls_flower); - return 0; - case TC_CLSFLOWER_STATS: - return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, - tc->cls_flower); - default: - return -EOPNOTSUPP; - } + return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, handle, + chain_index, proto, + tc->cls_flower); default: return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From 9cbf14ede21e1789f24e87a9ba08bbc92211fe42 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:25 +0200 Subject: mlxsw: spectrum: rename cls arg in matchall processing To sync-up with the naming in the rest of the driver, rename the cls arg. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 9f8ba37fa7e3..f333d086932d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1618,7 +1618,7 @@ mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, __be16 protocol, - struct tc_cls_matchall_offload *cls, + struct tc_cls_matchall_offload *f, bool ingress) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; @@ -1626,7 +1626,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, LIST_HEAD(actions); int err; - if (!tcf_exts_has_one_action(cls->exts)) { + if (!tcf_exts_has_one_action(f->exts)) { netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n"); return -EOPNOTSUPP; } @@ -1634,9 +1634,9 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); if (!mall_tc_entry) return -ENOMEM; - mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->cookie = f->cookie; - tcf_exts_to_list(cls->exts, &actions); + tcf_exts_to_list(f->exts, &actions); a = list_first_entry(&actions, struct tc_action, list); if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) { @@ -1648,7 +1648,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, mirror, a, ingress); } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) { mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE; - err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls, + err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f, a, ingress); } else { err = -EOPNOTSUPP; @@ -1666,12 +1666,12 @@ err_add_action: } static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - struct tc_cls_matchall_offload *cls) + struct tc_cls_matchall_offload *f) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port, - cls->cookie); + f->cookie); if (!mall_tc_entry) { netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n"); return; -- cgit v1.2.3-55-g7522 From 3fbae382f7dd81c4e43b76169e08cc0d440e760b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:26 +0200 Subject: dsa: push cls_matchall setup_tc processing into a separate function Let dsa_slave_setup_tc be a splitter for specific setup_tc types and push out cls_matchall specific code into a separate function. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/slave.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 453f6ddcd023..e76d576b941d 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -863,26 +863,35 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev, kfree(mall_tc_entry); } -static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 protocol, - struct tc_to_netdev *tc) +static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, + u32 handle, u32 chain_index, + __be16 protocol, + struct tc_cls_matchall_offload *cls) { bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); if (chain_index) return -EOPNOTSUPP; + switch (cls->command) { + case TC_CLSMATCHALL_REPLACE: + return dsa_slave_add_cls_matchall(dev, protocol, cls, ingress); + case TC_CLSMATCHALL_DESTROY: + dsa_slave_del_cls_matchall(dev, cls); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, + u32 handle, u32 chain_index, __be16 protocol, + struct tc_to_netdev *tc) +{ switch (type) { case TC_SETUP_CLSMATCHALL: - switch (tc->cls_mall->command) { - case TC_CLSMATCHALL_REPLACE: - return dsa_slave_add_cls_matchall(dev, protocol, - tc->cls_mall, - ingress); - case TC_CLSMATCHALL_DESTROY: - dsa_slave_del_cls_matchall(dev, tc->cls_mall); - return 0; - } + return dsa_slave_setup_tc_cls_matchall(dev, handle, chain_index, + protocol, tc->cls_mall); default: return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From 37cba6b3f86b24d82f27713b3154657ecc95f678 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:27 +0200 Subject: nfp: change flows in apps that offload ndo_setup_tc Change the flows a bit in preparation of follow-up changes in ndo_setup_tc args. Also, change the error code to align with the rest of the drivers. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/main.c | 16 ++++++---------- drivers/net/ethernet/netronome/nfp/flower/offload.c | 10 +++------- 2 files changed, 9 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 788880808a6e..d7975dcecb40 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -126,19 +126,15 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, { struct nfp_net *nn = netdev_priv(netdev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - return -EOPNOTSUPP; - if (proto != htons(ETH_P_ALL)) + if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || + TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || + proto != htons(ETH_P_ALL)) return -EOPNOTSUPP; - if (type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) { - if (!nn->dp.bpf_offload_xdp) - return nfp_net_bpf_offload(nn, tc->cls_bpf); - else - return -EBUSY; - } + if (nn->dp.bpf_offload_xdp) + return -EBUSY; - return -EINVAL; + return nfp_net_bpf_offload(nn, tc->cls_bpf); } static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index d045cf8c140a..58af438a95c1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -388,14 +388,10 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, u32 handle, __be16 proto, struct tc_to_netdev *tc) { - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + if (type != TC_SETUP_CLSFLOWER || + TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || + !eth_proto_is_802_3(proto)) return -EOPNOTSUPP; - if (!eth_proto_is_802_3(proto)) - return -EOPNOTSUPP; - - if (type != TC_SETUP_CLSFLOWER) - return -EINVAL; - return nfp_flower_repr_offload(app, netdev, tc->cls_flower); } -- cgit v1.2.3-55-g7522 From 74897ef0a553c8376aba53d818af10afcd12d945 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:28 +0200 Subject: hns3pf: don't check handle during mqprio offload Similar to the rest offloaders of mqprio, no need to check handle. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 6bb1e35336cc..ef5795923b0c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1223,7 +1223,7 @@ static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT || type != TC_SETUP_MQPRIO) + if (type != TC_SETUP_MQPRIO) return -EINVAL; return hns3_setup_tc(dev, tc->mqprio->num_tc); -- cgit v1.2.3-55-g7522 From 5fd9fc4e207dba0c05cafe78417952b4c4ca02dc Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:29 +0200 Subject: net: sched: push cls related args into cls_common structure As ndo_setup_tc is generic offload op for whole tc subsystem, does not really make sense to have cls-specific args. So move them under cls_common structurure which is embedded in all cls structs. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 1 - drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 1 - drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1 - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 14 +++++------- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c | 7 +++--- drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h | 6 ++---- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 1 - .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 1 - drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 1 - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 17 ++++++--------- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 11 ++++------ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 15 +++++-------- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 25 ++++++++-------------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 2 +- drivers/net/ethernet/netronome/nfp/bpf/main.c | 10 +++++---- drivers/net/ethernet/netronome/nfp/flower/main.h | 3 +-- .../net/ethernet/netronome/nfp/flower/offload.c | 12 ++++++----- drivers/net/ethernet/netronome/nfp/nfp_app.h | 6 ++---- drivers/net/ethernet/netronome/nfp/nfp_port.c | 6 +----- drivers/net/ethernet/netronome/nfp/nfp_port.h | 1 - drivers/net/ethernet/sfc/efx.h | 1 - drivers/net/ethernet/sfc/falcon/efx.h | 1 - drivers/net/ethernet/sfc/falcon/tx.c | 1 - drivers/net/ethernet/sfc/tx.c | 1 - drivers/net/ethernet/ti/netcp_core.c | 1 - include/linux/netdevice.h | 3 --- include/net/pkt_cls.h | 19 ++++++++++++++++ net/dsa/slave.c | 14 +++++------- net/sched/cls_bpf.c | 7 ++---- net/sched/cls_flower.c | 13 ++++++----- net/sched/cls_matchall.c | 8 +++---- net/sched/cls_u32.c | 20 +++++++---------- net/sched/sch_mqprio.c | 6 ++---- 39 files changed, 101 insertions(+), 144 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 6a6ea3bdd056..bbb7bfe0be7f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1919,7 +1919,6 @@ static void xgbe_poll_controller(struct net_device *netdev) #endif /* End CONFIG_NET_POLL_CONTROLLER */ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc_to_netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4395d1cac86f..257cf4be0162 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4285,7 +4285,6 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) } int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 1ac4eb0d3413..04eb95043023 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -487,7 +487,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc); int bnx2x_get_vf_config(struct net_device *dev, int vf, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b98d9f33d9af..1545b88c545d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7238,7 +7238,6 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) } static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *ntc) { if (type != TC_SETUP_MQPRIO) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 651229070113..13199317c8e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2890,27 +2890,24 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) } static int cxgb_setup_tc_cls_u32(struct net_device *dev, - enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_cls_u32_offload *cls_u32) { - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || - chain_index) + if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_u32->common.chain_index) return -EOPNOTSUPP; switch (cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: - return cxgb4_config_knode(dev, proto, cls_u32); + return cxgb4_config_knode(dev, cls_u32); case TC_CLSU32_DELETE_KNODE: - return cxgb4_delete_knode(dev, proto, cls_u32); + return cxgb4_delete_knode(dev, cls_u32); default: return -EOPNOTSUPP; } } static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { struct port_info *pi = netdev2pinfo(dev); @@ -2925,8 +2922,7 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_CLSU32: - return cxgb_setup_tc_cls_u32(dev, type, handle, chain_index, - proto, tc->cls_u32); + return cxgb_setup_tc_cls_u32(dev, tc->cls_u32); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c index 6f734c52ef25..48970ba08bdc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c @@ -146,11 +146,11 @@ static int fill_action_fields(struct adapter *adap, return 0; } -int cxgb4_config_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls) +int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { const struct cxgb4_match_field *start, *link_start = NULL; struct adapter *adapter = netdev2adap(dev); + __be16 protocol = cls->common.protocol; struct ch_filter_specification fs; struct cxgb4_tc_u32_table *t; struct cxgb4_link *link; @@ -338,8 +338,7 @@ out: return ret; } -int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls) +int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) { struct adapter *adapter = netdev2adap(dev); unsigned int filter_id, max_tids, i, j; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h index 021261a41c13..70a07b7cca56 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h @@ -44,10 +44,8 @@ static inline bool can_tc_u32_offload(struct net_device *dev) return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false; } -int cxgb4_config_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls); -int cxgb4_delete_knode(struct net_device *dev, __be16 protocol, - struct tc_cls_u32_offload *cls); +int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); +int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); void cxgb4_cleanup_tc_u32(struct adapter *adapter); struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index d86d766777c8..3827608cec29 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -343,7 +343,6 @@ static void dpaa_get_stats64(struct net_device *net_dev, } static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { struct dpaa_priv *priv = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index ef5795923b0c..dc64d751db24 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1220,7 +1220,6 @@ static int hns3_setup_tc(struct net_device *netdev, u8 tc) } static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index b30190639e78..71004b8eff95 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1266,7 +1266,6 @@ err_queueing_scheme: } static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 7d47a718f922..97d8bb2e8320 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5657,7 +5657,6 @@ exit: } static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 35db198199b0..0a350314d76b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8851,7 +8851,6 @@ static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, } static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, - __be16 protocol, struct tc_cls_u32_offload *cls) { u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); @@ -9037,9 +9036,9 @@ static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input, } static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, - __be16 protocol, struct tc_cls_u32_offload *cls) { + __be16 protocol = cls->common.protocol; u32 loc = cls->knode.handle & 0xfffff; struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_mat_field *field_ptr; @@ -9227,25 +9226,23 @@ free_jump: } static int ixgbe_setup_tc_cls_u32(struct net_device *dev, - u32 handle, u32 chain_index, __be16 proto, struct tc_cls_u32_offload *cls_u32) { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || - chain_index) + if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_u32->common.chain_index) return -EOPNOTSUPP; switch (cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: - return ixgbe_configure_clsu32(adapter, proto, cls_u32); + return ixgbe_configure_clsu32(adapter, cls_u32); case TC_CLSU32_DELETE_KNODE: return ixgbe_delete_clsu32(adapter, cls_u32); case TC_CLSU32_NEW_HNODE: case TC_CLSU32_REPLACE_HNODE: - return ixgbe_configure_clsu32_add_hnode(adapter, proto, - cls_u32); + return ixgbe_configure_clsu32_add_hnode(adapter, cls_u32); case TC_CLSU32_DELETE_HNODE: return ixgbe_configure_clsu32_del_hnode(adapter, cls_u32); default: @@ -9261,13 +9258,11 @@ static int ixgbe_setup_tc_mqprio(struct net_device *dev, } static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { switch (type) { case TC_SETUP_CLSU32: - return ixgbe_setup_tc_cls_u32(dev, handle, chain_index, proto, - tc->cls_u32); + return ixgbe_setup_tc_cls_u32(dev, tc->cls_u32); case TC_SETUP_MQPRIO: return ixgbe_setup_tc_mqprio(dev, tc->mqprio); default: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 5c33550765ed..e81083e25ba6 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -131,7 +131,6 @@ out: } static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index adf35da74a85..15f2a942962a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3032,18 +3032,17 @@ out: } static int mlx5e_setup_tc_cls_flower(struct net_device *dev, - u32 handle, u32 chain_index, __be16 proto, struct tc_cls_flower_offload *cls_flower) { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || - chain_index) + if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_flower->common.chain_index) return -EOPNOTSUPP; switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, cls_flower); + return mlx5e_configure_flower(priv, cls_flower); case TC_CLSFLOWER_DESTROY: return mlx5e_delete_flower(priv, cls_flower); case TC_CLSFLOWER_STATS: @@ -3054,13 +3053,11 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev, } static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(dev, handle, chain_index, - proto, tc->cls_flower); + return mlx5e_setup_tc_cls_flower(dev, tc->cls_flower); case TC_SETUP_MQPRIO: return mlx5e_setup_tc_mqprio(dev, tc->mqprio); default: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e6cc642f6d8c..e5cf2e7ae052 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -652,15 +652,13 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, } static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, - u32 handle, u32 chain_index, - __be16 proto, struct tc_to_netdev *tc) { struct tc_cls_flower_offload *cls_flower = tc->cls_flower; struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || - chain_index) + if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_flower->common.chain_index) return -EOPNOTSUPP; if (cls_flower->egress_dev) { @@ -668,13 +666,12 @@ static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, dev = mlx5_eswitch_get_uplink_netdev(esw); return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - handle, chain_index, - proto, tc); + tc); } switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, cls_flower); + return mlx5e_configure_flower(priv, cls_flower); case TC_CLSFLOWER_DESTROY: return mlx5e_delete_flower(priv, cls_flower); case TC_CLSFLOWER_STATS: @@ -685,13 +682,11 @@ static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, } static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(dev, handle, chain_index, - proto, tc); + return mlx5e_rep_setup_tc_cls_flower(dev, tc); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 78f50d9f621d..3b10d3df7627 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1939,7 +1939,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return err; } -int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, +int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index ecbe30d808ae..5a0f4a487855 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -38,7 +38,7 @@ int mlx5e_tc_init(struct mlx5e_priv *priv); void mlx5e_tc_cleanup(struct mlx5e_priv *priv); -int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, +int mlx5e_configure_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); int mlx5e_delete_flower(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f333d086932d..1ca3204f5543 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1617,11 +1617,11 @@ mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port) } static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - __be16 protocol, struct tc_cls_matchall_offload *f, bool ingress) { struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; + __be16 protocol = f->common.protocol; const struct tc_action *a; LIST_HEAD(actions); int err; @@ -1694,18 +1694,16 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, - u32 handle, u32 chain_index, - __be16 proto, struct tc_cls_matchall_offload *f) { - bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); - if (chain_index) + if (f->common.chain_index) return -EOPNOTSUPP; switch (f->command) { case TC_CLSMATCHALL_REPLACE: - return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, proto, f, + return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f, ingress); case TC_CLSMATCHALL_DESTROY: mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f); @@ -1717,18 +1715,16 @@ static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, - u32 handle, u32 chain_index, __be16 proto, struct tc_cls_flower_offload *f) { - bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); - if (chain_index) + if (f->common.chain_index) return -EOPNOTSUPP; switch (f->command) { case TC_CLSFLOWER_REPLACE: - return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, - proto, f); + return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); case TC_CLSFLOWER_DESTROY: mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f); return 0; @@ -1740,19 +1736,16 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); switch (type) { case TC_SETUP_CLSMATCHALL: - return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, handle, - chain_index, proto, + return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, tc->cls_mall); case TC_SETUP_CLSFLOWER: - return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, handle, - chain_index, proto, + return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, tc->cls_flower); default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e848f06e34e6..8452d1db2f3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -508,7 +508,7 @@ extern const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops; /* spectrum_flower.c */ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, - __be16 protocol, struct tc_cls_flower_offload *f); + struct tc_cls_flower_offload *f); void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct tc_cls_flower_offload *f); int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 9be48d2e43ca..021b6c0076c0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -368,7 +368,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, } int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, - __be16 protocol, struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; struct net_device *dev = mlxsw_sp_port->dev; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index d7975dcecb40..152a7abb58ed 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -121,20 +121,22 @@ static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn) } static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, u32 handle, __be16 proto, + enum tc_setup_type type, struct tc_to_netdev *tc) { + struct tc_cls_bpf_offload *cls_bpf = tc->cls_bpf; struct nfp_net *nn = netdev_priv(netdev); if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || - TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || - proto != htons(ETH_P_ALL)) + TC_H_MAJ(cls_bpf->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + cls_bpf->common.protocol != htons(ETH_P_ALL) || + cls_bpf->common.chain_index) return -EOPNOTSUPP; if (nn->dp.bpf_offload_xdp) return -EBUSY; - return nfp_net_bpf_offload(nn, tc->cls_bpf); + return nfp_net_bpf_offload(nn, cls_bpf); } static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 314e6e8ba649..eb94d08e35cf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -135,8 +135,7 @@ int nfp_flower_metadata_init(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, u32 handle, __be16 proto, - struct tc_to_netdev *tc); + enum tc_setup_type type, struct tc_to_netdev *tc); int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 58af438a95c1..8197836c650d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -385,13 +385,15 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, } int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, u32 handle, __be16 proto, - struct tc_to_netdev *tc) + enum tc_setup_type type, struct tc_to_netdev *tc) { + struct tc_cls_flower_offload *cls_flower = tc->cls_flower; + if (type != TC_SETUP_CLSFLOWER || - TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS) || - !eth_proto_is_802_3(proto)) + TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + !eth_proto_is_802_3(cls_flower->common.protocol) || + cls_flower->common.chain_index) return -EOPNOTSUPP; - return nfp_flower_repr_offload(app, netdev, tc->cls_flower); + return nfp_flower_repr_offload(app, netdev, cls_flower); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index b3b03bb9d907..7a2f950b149c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -109,8 +109,7 @@ struct nfp_app_type { void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, u32 handle, __be16 proto, - struct tc_to_netdev *tc); + enum tc_setup_type type, struct tc_to_netdev *tc); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog); @@ -240,12 +239,11 @@ static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn) static inline int nfp_app_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, - u32 handle, __be16 proto, struct tc_to_netdev *tc) { if (!app || !app->type->setup_tc) return -EOPNOTSUPP; - return app->type->setup_tc(app, netdev, type, handle, proto, tc); + return app->type->setup_tc(app, netdev, type, tc); } static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 9d776f982352..e8abab2b912e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -89,19 +89,15 @@ const struct switchdev_ops nfp_port_switchdev_ops = { }; int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { struct nfp_port *port; - if (chain_index) - return -EOPNOTSUPP; - port = nfp_port_from_netdev(netdev); if (!port) return -EOPNOTSUPP; - return nfp_app_setup_tc(port->app, netdev, type, handle, proto, tc); + return nfp_app_setup_tc(port->app, netdev, type, tc); } struct nfp_port * diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 239c5401000c..252f06d4307f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -110,7 +110,6 @@ struct nfp_port { extern const struct switchdev_ops nfp_port_switchdev_ops; int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc); struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index e41a7179bc05..b0c6004db138 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -33,7 +33,6 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h index f3bc67ec1f30..4497511fc914 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.h +++ b/drivers/net/ethernet/sfc/falcon/efx.h @@ -33,7 +33,6 @@ netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb, netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb); void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index); int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc); unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx); extern bool ef4_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6c4752694c1f..447519ac3fa4 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -426,7 +426,6 @@ void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue) } int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *ntc) { struct ef4_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 0c08c10d751c..d17af918ac50 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -654,7 +654,6 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) } int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *ntc) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index cb21742f6177..14f91b285f00 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1878,7 +1878,6 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, } static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 proto, struct tc_to_netdev *tc) { u8 num_tc; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index bd49dbaee84e..6e2f7e38cf8e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -977,7 +977,6 @@ struct xfrmdev_ops { * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, - * u32 handle, u32 chain_index, __be16 protocol, * struct tc_to_netdev *tc); * Called to setup any 'tc' scheduler, classifier or action on @dev. * This is always called from the stack with the rtnl lock held and netif @@ -1227,8 +1226,6 @@ struct net_device_ops { int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, - __be16 protocol, struct tc_to_netdev *tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 8213acdfdf5a..ffaddf72108e 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -405,6 +405,21 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) } #endif /* CONFIG_NET_CLS_IND */ +struct tc_cls_common_offload { + u32 handle; + u32 chain_index; + __be16 protocol; +}; + +static inline void +tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, + const struct tcf_proto *tp) +{ + cls_common->handle = tp->q->handle; + cls_common->chain_index = tp->chain->index; + cls_common->protocol = tp->protocol; +} + struct tc_cls_u32_knode { struct tcf_exts *exts; struct tc_u32_sel *sel; @@ -431,6 +446,7 @@ enum tc_clsu32_command { }; struct tc_cls_u32_offload { + struct tc_cls_common_offload common; /* knode values */ enum tc_clsu32_command command; union { @@ -497,6 +513,7 @@ enum tc_fl_command { }; struct tc_cls_flower_offload { + struct tc_cls_common_offload common; enum tc_fl_command command; u32 prio; unsigned long cookie; @@ -513,6 +530,7 @@ enum tc_matchall_command { }; struct tc_cls_matchall_offload { + struct tc_cls_common_offload common; enum tc_matchall_command command; struct tcf_exts *exts; unsigned long cookie; @@ -526,6 +544,7 @@ enum tc_clsbpf_command { }; struct tc_cls_bpf_offload { + struct tc_cls_common_offload common; enum tc_clsbpf_command command; struct tcf_exts *exts; struct bpf_prog *prog; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index e76d576b941d..5e01e9271619 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -774,12 +774,12 @@ dsa_slave_mall_tc_entry_find(struct dsa_slave_priv *p, } static int dsa_slave_add_cls_matchall(struct net_device *dev, - __be16 protocol, struct tc_cls_matchall_offload *cls, bool ingress) { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_mall_tc_entry *mall_tc_entry; + __be16 protocol = cls->common.protocol; struct dsa_switch *ds = p->dp->ds; struct net *net = dev_net(dev); struct dsa_slave_priv *to_p; @@ -864,18 +864,16 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev, } static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, - u32 handle, u32 chain_index, - __be16 protocol, struct tc_cls_matchall_offload *cls) { - bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress = TC_H_MAJ(cls->common.handle) == TC_H_MAJ(TC_H_INGRESS); - if (chain_index) + if (cls->common.chain_index) return -EOPNOTSUPP; switch (cls->command) { case TC_CLSMATCHALL_REPLACE: - return dsa_slave_add_cls_matchall(dev, protocol, cls, ingress); + return dsa_slave_add_cls_matchall(dev, cls, ingress); case TC_CLSMATCHALL_DESTROY: dsa_slave_del_cls_matchall(dev, cls); return 0; @@ -885,13 +883,11 @@ static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, } static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, - u32 handle, u32 chain_index, __be16 protocol, struct tc_to_netdev *tc) { switch (type) { case TC_SETUP_CLSMATCHALL: - return dsa_slave_setup_tc_cls_matchall(dev, handle, chain_index, - protocol, tc->cls_mall); + return dsa_slave_setup_tc_cls_matchall(dev, tc->cls_mall); default: return -EOPNOTSUPP; } diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index e2bf2753173d..dde8efdcee3b 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -153,6 +153,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, offload.cls_bpf = &bpf_offload; + tc_cls_common_offload_init(&bpf_offload.common, tp); bpf_offload.command = cmd; bpf_offload.exts = &prog->exts; bpf_offload.prog = prog->filter; @@ -160,11 +161,7 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, bpf_offload.exts_integrated = prog->exts_integrated; bpf_offload.gen_flags = prog->gen_flags; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, - tp->q->handle, - tp->chain->index, - tp->protocol, &offload); - + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &offload); if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE)) prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 52deeed2b7f5..1fdf2889ba9f 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -232,14 +232,14 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) if (!tc_can_offload(dev, tp)) return; + tc_cls_common_offload_init(&offload.common, tp); offload.command = TC_CLSFLOWER_DESTROY; offload.prio = tp->prio; offload.cookie = (unsigned long)f; tc->cls_flower = &offload; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tp->q->handle, - tp->chain->index, tp->protocol, tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tc); } static int fl_hw_replace_filter(struct tcf_proto *tp, @@ -264,6 +264,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, f->hw_dev = dev; } + tc_cls_common_offload_init(&offload.common, tp); offload.command = TC_CLSFLOWER_REPLACE; offload.prio = tp->prio; offload.cookie = (unsigned long)f; @@ -274,9 +275,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, tc->cls_flower = &offload; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - tp->q->handle, tp->chain->index, - tp->protocol, tc); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tc); if (!err) f->flags |= TCA_CLS_FLAGS_IN_HW; @@ -294,6 +293,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) if (!tc_can_offload(dev, tp)) return; + tc_cls_common_offload_init(&offload.common, tp); offload.command = TC_CLSFLOWER_STATS; offload.prio = tp->prio; offload.cookie = (unsigned long)f; @@ -301,8 +301,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) tc->cls_flower = &offload; - dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS, tp->q->handle, - tp->chain->index, tp->protocol, tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS, tc); } static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index a8853ada22f6..174c700160ca 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -58,14 +58,14 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, struct tc_cls_matchall_offload mall_offload = {0}; int err; + tc_cls_common_offload_init(&mall_offload.common, tp); offload.cls_mall = &mall_offload; offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; offload.cls_mall->exts = &head->exts; offload.cls_mall->cookie = cookie; err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, - tp->q->handle, tp->chain->index, - tp->protocol, &offload); + &offload); if (!err) head->flags |= TCA_CLS_FLAGS_IN_HW; @@ -80,13 +80,13 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, struct tc_to_netdev offload; struct tc_cls_matchall_offload mall_offload = {0}; + tc_cls_common_offload_init(&mall_offload.common, tp); offload.cls_mall = &mall_offload; offload.cls_mall->command = TC_CLSMATCHALL_DESTROY; offload.cls_mall->exts = NULL; offload.cls_mall->cookie = cookie; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, tp->q->handle, - tp->chain->index, tp->protocol, &offload); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &offload); } static void mall_destroy(struct tcf_proto *tp) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index d1bae4cc749f..c0f59c471523 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -437,11 +437,10 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) offload.cls_u32 = &u32_offload; if (tc_should_offload(dev, tp, 0)) { + tc_cls_common_offload_init(&u32_offload.common, tp); offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; offload.cls_u32->knode.handle = handle; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, - tp->q->handle, tp->chain->index, - tp->protocol, &offload); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); } } @@ -458,14 +457,13 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, offload.cls_u32 = &u32_offload; + tc_cls_common_offload_init(&u32_offload.common, tp); offload.cls_u32->command = TC_CLSU32_NEW_HNODE; offload.cls_u32->hnode.divisor = h->divisor; offload.cls_u32->hnode.handle = h->handle; offload.cls_u32->hnode.prio = h->prio; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, tp->q->handle, - tp->chain->index, tp->protocol, - &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); if (tc_skip_sw(flags)) return err; @@ -481,14 +479,13 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) offload.cls_u32 = &u32_offload; if (tc_should_offload(dev, tp, 0)) { + tc_cls_common_offload_init(&u32_offload.common, tp); offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; offload.cls_u32->hnode.divisor = h->divisor; offload.cls_u32->hnode.handle = h->handle; offload.cls_u32->hnode.prio = h->prio; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, - tp->q->handle, tp->chain->index, - tp->protocol, &offload); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); } } @@ -505,6 +502,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, if (!tc_should_offload(dev, tp, flags)) return tc_skip_sw(flags) ? -EINVAL : 0; + tc_cls_common_offload_init(&u32_offload.common, tp); offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; offload.cls_u32->knode.handle = n->handle; offload.cls_u32->knode.fshift = n->fshift; @@ -520,9 +518,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, if (n->ht_down) offload.cls_u32->knode.link_handle = n->ht_down->handle; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, tp->q->handle, - tp->chain->index, tp->protocol, - &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); if (!err) n->flags |= TCA_CLS_FLAGS_IN_HW; diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 329610ce4dfe..09b577dde49c 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -42,8 +42,7 @@ static void mqprio_destroy(struct Qdisc *sch) struct tc_mqprio_qopt offload = { 0 }; struct tc_to_netdev tc = { { .mqprio = &offload } }; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, - sch->handle, 0, 0, &tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &tc); } else { netdev_set_num_tc(dev, 0); } @@ -151,8 +150,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) struct tc_mqprio_qopt offload = *qopt; struct tc_to_netdev tc = { { .mqprio = &offload } }; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, - sch->handle, 0, 0, &tc); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &tc); if (err) return err; -- cgit v1.2.3-55-g7522 From d7c1c8d2e53be974b5c72e31d7d35f6d9737fe84 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:30 +0200 Subject: net: sched: move prio into cls_common prio is not cls_flower specific, but it is meaningful for all classifiers. Seems that only mlxsw cares about the value. Obviously, cls offload in other drivers is broken. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c | 2 +- include/net/pkt_cls.h | 3 ++- net/sched/cls_flower.c | 3 --- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 021b6c0076c0..95428b41c50f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -270,7 +270,7 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, return -EOPNOTSUPP; } - mlxsw_sp_acl_rulei_priority(rulei, f->prio); + mlxsw_sp_acl_rulei_priority(rulei, f->common.prio); if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_dissector_key_control *key = diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index ffaddf72108e..572083af02ac 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -409,6 +409,7 @@ struct tc_cls_common_offload { u32 handle; u32 chain_index; __be16 protocol; + u32 prio; }; static inline void @@ -418,6 +419,7 @@ tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, cls_common->handle = tp->q->handle; cls_common->chain_index = tp->chain->index; cls_common->protocol = tp->protocol; + cls_common->prio = tp->prio; } struct tc_cls_u32_knode { @@ -515,7 +517,6 @@ enum tc_fl_command { struct tc_cls_flower_offload { struct tc_cls_common_offload common; enum tc_fl_command command; - u32 prio; unsigned long cookie; struct flow_dissector *dissector; struct fl_flow_key *mask; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 1fdf2889ba9f..ccdf2f5014ca 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -234,7 +234,6 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) tc_cls_common_offload_init(&offload.common, tp); offload.command = TC_CLSFLOWER_DESTROY; - offload.prio = tp->prio; offload.cookie = (unsigned long)f; tc->cls_flower = &offload; @@ -266,7 +265,6 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, tc_cls_common_offload_init(&offload.common, tp); offload.command = TC_CLSFLOWER_REPLACE; - offload.prio = tp->prio; offload.cookie = (unsigned long)f; offload.dissector = dissector; offload.mask = mask; @@ -295,7 +293,6 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) tc_cls_common_offload_init(&offload.common, tp); offload.command = TC_CLSFLOWER_STATS; - offload.prio = tp->prio; offload.cookie = (unsigned long)f; offload.exts = &f->exts; -- cgit v1.2.3-55-g7522 From 38cf0426e5178b1c3810bb88e65dd23882e40283 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:31 +0200 Subject: net: sched: change return value of ndo_setup_tc for driver supporting mqprio only Change the return value from -EINVAL to -EOPNOTSUPP. The rest of the drivers have it like that, so be aligned. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- drivers/net/ethernet/sfc/falcon/tx.c | 2 +- drivers/net/ethernet/sfc/tx.c | 2 +- drivers/net/ethernet/ti/netcp_core.c | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index bbb7bfe0be7f..37d3e5b65d94 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1925,7 +1925,7 @@ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, u8 tc; if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; tc = tc_to_netdev->mqprio->num_tc; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 257cf4be0162..8687afc24698 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4288,7 +4288,7 @@ int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1545b88c545d..a78f72a53042 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7241,7 +7241,7 @@ static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, struct tc_to_netdev *ntc) { if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 3827608cec29..bfb44c95a7ec 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -350,7 +350,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, int i; if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; num_tc = tc->mqprio->num_tc; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index dc64d751db24..aa43ebda9a00 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1223,7 +1223,7 @@ static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; return hns3_setup_tc(dev, tc->mqprio->num_tc); } diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 71004b8eff95..70888129200b 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1269,7 +1269,7 @@ static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 97d8bb2e8320..1f4633830c79 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5660,7 +5660,7 @@ static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e81083e25ba6..1667e86ac05d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -134,7 +134,7 @@ static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, struct tc_to_netdev *tc) { if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) return -EINVAL; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 447519ac3fa4..0f125e15143a 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -435,7 +435,7 @@ int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, int rc; if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; num_tc = ntc->mqprio->num_tc; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index d17af918ac50..53ba30c3eb7b 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -663,7 +663,7 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, int rc; if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; num_tc = ntc->mqprio->num_tc; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 14f91b285f00..caba0abc0158 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1887,7 +1887,7 @@ static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, ASSERT_RTNL(); if (type != TC_SETUP_MQPRIO) - return -EINVAL; + return -EOPNOTSUPP; tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; num_tc = tc->mqprio->num_tc; -- cgit v1.2.3-55-g7522 From de4784ca030fed17d527dbb2bb4e21328b12de94 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 7 Aug 2017 10:15:32 +0200 Subject: net: sched: get rid of struct tc_to_netdev Get rid of struct tc_to_netdev which is now just unnecessary container and rather pass per-type structures down to drivers directly. Along with that, consolidate the naming of per-type structure variables in cls_*. Signed-off-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 7 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 8 ++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 +- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 7 +- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 6 +- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 8 ++- drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 10 +-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 12 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 2 + drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 8 +-- drivers/net/ethernet/netronome/nfp/bpf/main.c | 5 +- drivers/net/ethernet/netronome/nfp/flower/main.h | 4 +- .../net/ethernet/netronome/nfp/flower/offload.c | 4 +- drivers/net/ethernet/netronome/nfp/nfp_app.h | 8 +-- drivers/net/ethernet/netronome/nfp/nfp_port.c | 4 +- drivers/net/ethernet/netronome/nfp/nfp_port.h | 3 +- drivers/net/ethernet/sfc/efx.h | 2 +- drivers/net/ethernet/sfc/falcon/efx.h | 2 +- drivers/net/ethernet/sfc/falcon/tx.c | 7 +- drivers/net/ethernet/sfc/tx.c | 7 +- drivers/net/ethernet/ti/netcp_core.c | 7 +- include/linux/netdevice.h | 19 +---- net/dsa/slave.c | 4 +- net/sched/cls_bpf.c | 21 +++--- net/sched/cls_flower.c | 54 ++++++-------- net/sched/cls_matchall.c | 27 +++---- net/sched/cls_u32.c | 83 ++++++++++------------ net/sched/sch_mqprio.c | 13 ++-- 33 files changed, 174 insertions(+), 202 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 37d3e5b65d94..2fd9b80b39b0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1919,16 +1919,17 @@ static void xgbe_poll_controller(struct net_device *netdev) #endif /* End CONFIG_NET_POLL_CONTROLLER */ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, - struct tc_to_netdev *tc_to_netdev) + void *type_data) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct tc_mqprio_qopt *mqprio = type_data; u8 tc; if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - tc_to_netdev->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - tc = tc_to_netdev->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + tc = mqprio->num_tc; if (tc > pdata->hw_feat.tc_cnt) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 8687afc24698..1216c1f1e052 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4285,14 +4285,16 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) } int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; + if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return bnx2x_setup_tc(dev, tc->mqprio->num_tc); + return bnx2x_setup_tc(dev, mqprio->num_tc); } /* called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 04eb95043023..a5265e1344f1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -487,7 +487,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc); + void *type_data); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a78f72a53042..6e14fc4fe2c8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7238,14 +7238,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) } static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *ntc) + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; + if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc); + return bnxt_setup_mq_tc(dev, mqprio->num_tc); } #ifdef CONFIG_RFS_ACCEL diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 13199317c8e0..d80b20d695e0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2908,7 +2908,7 @@ static int cxgb_setup_tc_cls_u32(struct net_device *dev, } static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { struct port_info *pi = netdev2pinfo(dev); struct adapter *adap = netdev2adap(dev); @@ -2922,7 +2922,7 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type, switch (type) { case TC_SETUP_CLSU32: - return cxgb_setup_tc_cls_u32(dev, tc->cls_u32); + return cxgb_setup_tc_cls_u32(dev, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index bfb44c95a7ec..733d54caabb6 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -343,17 +343,18 @@ static void dpaa_get_stats64(struct net_device *net_dev, } static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = tc->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = mqprio->num_tc; if (num_tc == priv->num_tc) return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index aa43ebda9a00..069ae426aa24 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1220,12 +1220,14 @@ static int hns3_setup_tc(struct net_device *netdev, u8 tc) } static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; + if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - return hns3_setup_tc(dev, tc->mqprio->num_tc); + return hns3_setup_tc(dev, mqprio->num_tc); } static int hns3_vlan_rx_add_vid(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 70888129200b..e69d49d91d67 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1266,14 +1266,16 @@ err_queueing_scheme: } static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; + if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return fm10k_setup_tc(dev, tc->mqprio->num_tc); + return fm10k_setup_tc(dev, mqprio->num_tc); } static void fm10k_assign_l2_accel(struct fm10k_intfc *interface, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1f4633830c79..a7e5a76703e7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5657,14 +5657,16 @@ exit: } static int __i40e_setup_tc(struct net_device *netdev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; + if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return i40e_setup_tc(netdev, tc->mqprio->num_tc); + return i40e_setup_tc(netdev, mqprio->num_tc); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0a350314d76b..c6b132476de4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9258,13 +9258,13 @@ static int ixgbe_setup_tc_mqprio(struct net_device *dev, } static int __ixgbe_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { switch (type) { case TC_SETUP_CLSU32: - return ixgbe_setup_tc_cls_u32(dev, tc->cls_u32); + return ixgbe_setup_tc_cls_u32(dev, type_data); case TC_SETUP_MQPRIO: - return ixgbe_setup_tc_mqprio(dev, tc->mqprio); + return ixgbe_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 1667e86ac05d..6e67ca7aa7f5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -131,17 +131,19 @@ out: } static int __mlx4_en_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; + if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - if (tc->mqprio->num_tc && tc->mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) + if (mqprio->num_tc && mqprio->num_tc != MLX4_EN_NUM_UP_HIGH) return -EINVAL; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - return mlx4_en_alloc_tx_queue_per_tc(dev, tc->mqprio->num_tc); + return mlx4_en_alloc_tx_queue_per_tc(dev, mqprio->num_tc); } #ifdef CONFIG_RFS_ACCEL diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 15f2a942962a..ae0916238b7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3053,13 +3053,13 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev, } static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(dev, tc->cls_flower); + return mlx5e_setup_tc_cls_flower(dev, type_data); case TC_SETUP_MQPRIO: - return mlx5e_setup_tc_mqprio(dev, tc->mqprio); + return mlx5e_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e5cf2e7ae052..3df994d1e173 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -651,10 +651,10 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, return 0; } -static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, - struct tc_to_netdev *tc) +static int +mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { - struct tc_cls_flower_offload *cls_flower = tc->cls_flower; struct mlx5e_priv *priv = netdev_priv(dev); if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || @@ -666,7 +666,7 @@ static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, dev = mlx5_eswitch_get_uplink_netdev(esw); return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, - tc); + cls_flower); } switch (cls_flower->command) { @@ -682,11 +682,11 @@ static int mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, } static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(dev, tc); + return mlx5e_rep_setup_tc_cls_flower(dev, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 5a0f4a487855..2917d964ffc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -33,6 +33,8 @@ #ifndef __MLX5_EN_TC_H__ #define __MLX5_EN_TC_H__ +#include + #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff int mlx5e_tc_init(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1ca3204f5543..eb7c4549f464 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1736,17 +1736,15 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, } static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); switch (type) { case TC_SETUP_CLSMATCHALL: - return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, - tc->cls_mall); + return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data); case TC_SETUP_CLSFLOWER: - return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, - tc->cls_flower); + return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 152a7abb58ed..f981f60ec306 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -121,10 +121,9 @@ static void nfp_bpf_vnic_clean(struct nfp_app *app, struct nfp_net *nn) } static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, - struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { - struct tc_cls_bpf_offload *cls_bpf = tc->cls_bpf; + struct tc_cls_bpf_offload *cls_bpf = type_data; struct nfp_net *nn = netdev_priv(netdev); if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index eb94d08e35cf..71e4f4f4e9ba 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -38,8 +38,8 @@ #include #include #include +#include -struct tc_to_netdev; struct net_device; struct nfp_app; @@ -135,7 +135,7 @@ int nfp_flower_metadata_init(struct nfp_app *app); void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, struct tc_to_netdev *tc); + enum tc_setup_type type, void *type_data); int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 8197836c650d..01767c7376d5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -385,9 +385,9 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, } int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { - struct tc_cls_flower_offload *cls_flower = tc->cls_flower; + struct tc_cls_flower_offload *cls_flower = type_data; if (type != TC_SETUP_CLSFLOWER || TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 7a2f950b149c..f34e8778fae2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -42,7 +42,6 @@ struct bpf_prog; struct net_device; struct pci_dev; struct sk_buff; -struct tc_to_netdev; struct sk_buff; struct nfp_app; struct nfp_cpp; @@ -109,7 +108,7 @@ struct nfp_app_type { void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, struct tc_to_netdev *tc); + enum tc_setup_type type, void *type_data); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, struct bpf_prog *prog); @@ -238,12 +237,11 @@ static inline bool nfp_app_tc_busy(struct nfp_app *app, struct nfp_net *nn) static inline int nfp_app_setup_tc(struct nfp_app *app, struct net_device *netdev, - enum tc_setup_type type, - struct tc_to_netdev *tc) + enum tc_setup_type type, void *type_data) { if (!app || !app->type->setup_tc) return -EOPNOTSUPP; - return app->type->setup_tc(app, netdev, type, tc); + return app->type->setup_tc(app, netdev, type, type_data); } static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index e8abab2b912e..0cf65e57addb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -89,7 +89,7 @@ const struct switchdev_ops nfp_port_switchdev_ops = { }; int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { struct nfp_port *port; @@ -97,7 +97,7 @@ int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, if (!port) return -EOPNOTSUPP; - return nfp_app_setup_tc(port->app, netdev, type, tc); + return nfp_app_setup_tc(port->app, netdev, type, type_data); } struct nfp_port * diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 252f06d4307f..c88e376dcf0f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -36,7 +36,6 @@ #include -struct tc_to_netdev; struct net_device; struct nfp_app; struct nfp_pf; @@ -110,7 +109,7 @@ struct nfp_port { extern const struct switchdev_ops nfp_port_switchdev_ops; int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, - struct tc_to_netdev *tc); + void *type_data); struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); struct nfp_port * diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index b0c6004db138..d407adf59610 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -33,7 +33,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - struct tc_to_netdev *tc); + void *type_data); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/efx.h b/drivers/net/ethernet/sfc/falcon/efx.h index 4497511fc914..4f3bb30661ea 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.h +++ b/drivers/net/ethernet/sfc/falcon/efx.h @@ -33,7 +33,7 @@ netdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb, netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb); void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index); int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - struct tc_to_netdev *tc); + void *type_data); unsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx); extern bool ef4_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 0f125e15143a..6a75f4140a4b 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -426,9 +426,10 @@ void ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue) } int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - struct tc_to_netdev *ntc) + void *type_data) { struct ef4_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; struct ef4_channel *channel; struct ef4_tx_queue *tx_queue; unsigned tc, num_tc; @@ -437,12 +438,12 @@ int ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type, if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - num_tc = ntc->mqprio->num_tc; + num_tc = mqprio->num_tc; if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC) return -EINVAL; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 53ba30c3eb7b..32bf1fecf864 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -654,9 +654,10 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) } int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, - struct tc_to_netdev *ntc) + void *type_data) { struct efx_nic *efx = netdev_priv(net_dev); + struct tc_mqprio_qopt *mqprio = type_data; struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned tc, num_tc; @@ -665,12 +666,12 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - num_tc = ntc->mqprio->num_tc; + num_tc = mqprio->num_tc; if (num_tc > EFX_MAX_TX_TC) return -EINVAL; - ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; if (num_tc == net_dev->num_tc) return 0; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index caba0abc0158..eb96a6913235 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1878,8 +1878,9 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, } static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { + struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; @@ -1889,8 +1890,8 @@ static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type, if (type != TC_SETUP_MQPRIO) return -EOPNOTSUPP; - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - num_tc = tc->mqprio->num_tc; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + num_tc = mqprio->num_tc; /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6e2f7e38cf8e..1d238d54c484 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -771,9 +771,6 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb); -/* These structures hold the attributes of qdisc and classifiers - * that are being passed to the netdevice through the setup_tc op. - */ enum tc_setup_type { TC_SETUP_MQPRIO, TC_SETUP_CLSU32, @@ -782,18 +779,6 @@ enum tc_setup_type { TC_SETUP_CLSBPF, }; -struct tc_cls_u32_offload; - -struct tc_to_netdev { - union { - struct tc_cls_u32_offload *cls_u32; - struct tc_cls_flower_offload *cls_flower; - struct tc_cls_matchall_offload *cls_mall; - struct tc_cls_bpf_offload *cls_bpf; - struct tc_mqprio_qopt *mqprio; - }; -}; - /* These structures hold the attributes of xdp state that are being passed * to the netdevice through the xdp op. */ @@ -977,7 +962,7 @@ struct xfrmdev_ops { * int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting); * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); * int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, - * struct tc_to_netdev *tc); + * void *type_data); * Called to setup any 'tc' scheduler, classifier or action on @dev. * This is always called from the stack with the rtnl lock held and netif * tx queues stopped. This allows the netdevice to perform queue @@ -1226,7 +1211,7 @@ struct net_device_ops { int vf, bool setting); int (*ndo_setup_tc)(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc); + void *type_data); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5e01e9271619..c6b5de2fe413 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -883,11 +883,11 @@ static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, } static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, - struct tc_to_netdev *tc) + void *type_data) { switch (type) { case TC_SETUP_CLSMATCHALL: - return dsa_slave_setup_tc_cls_matchall(dev, tc->cls_mall); + return dsa_slave_setup_tc_cls_matchall(dev, type_data); default: return -EOPNOTSUPP; } diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index dde8efdcee3b..2d4d06e41cd9 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -147,21 +147,18 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog, enum tc_clsbpf_command cmd) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_cls_bpf_offload bpf_offload = {}; - struct tc_to_netdev offload; + struct tc_cls_bpf_offload cls_bpf = {}; int err; - offload.cls_bpf = &bpf_offload; + tc_cls_common_offload_init(&cls_bpf.common, tp); + cls_bpf.command = cmd; + cls_bpf.exts = &prog->exts; + cls_bpf.prog = prog->filter; + cls_bpf.name = prog->bpf_name; + cls_bpf.exts_integrated = prog->exts_integrated; + cls_bpf.gen_flags = prog->gen_flags; - tc_cls_common_offload_init(&bpf_offload.common, tp); - bpf_offload.command = cmd; - bpf_offload.exts = &prog->exts; - bpf_offload.prog = prog->filter; - bpf_offload.name = prog->bpf_name; - bpf_offload.exts_integrated = prog->exts_integrated; - bpf_offload.gen_flags = prog->gen_flags; - - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSBPF, &cls_bpf); if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE)) prog->gen_flags |= TCA_CLS_FLAGS_IN_HW; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index ccdf2f5014ca..1474bacf4df4 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -88,7 +88,6 @@ struct cls_fl_filter { u32 handle; u32 flags; struct rcu_head rcu; - struct tc_to_netdev tc; struct net_device *hw_dev; }; @@ -225,20 +224,17 @@ static void fl_destroy_filter(struct rcu_head *head) static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) { - struct tc_cls_flower_offload offload = {0}; + struct tc_cls_flower_offload cls_flower = {}; struct net_device *dev = f->hw_dev; - struct tc_to_netdev *tc = &f->tc; if (!tc_can_offload(dev, tp)) return; - tc_cls_common_offload_init(&offload.common, tp); - offload.command = TC_CLSFLOWER_DESTROY; - offload.cookie = (unsigned long)f; + tc_cls_common_offload_init(&cls_flower.common, tp); + cls_flower.command = TC_CLSFLOWER_DESTROY; + cls_flower.cookie = (unsigned long) f; - tc->cls_flower = &offload; - - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower); } static int fl_hw_replace_filter(struct tcf_proto *tp, @@ -247,8 +243,7 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, struct cls_fl_filter *f) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_cls_flower_offload offload = {0}; - struct tc_to_netdev *tc = &f->tc; + struct tc_cls_flower_offload cls_flower = {}; int err; if (!tc_can_offload(dev, tp)) { @@ -258,22 +253,21 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, return tc_skip_sw(f->flags) ? -EINVAL : 0; } dev = f->hw_dev; - offload.egress_dev = true; + cls_flower.egress_dev = true; } else { f->hw_dev = dev; } - tc_cls_common_offload_init(&offload.common, tp); - offload.command = TC_CLSFLOWER_REPLACE; - offload.cookie = (unsigned long)f; - offload.dissector = dissector; - offload.mask = mask; - offload.key = &f->mkey; - offload.exts = &f->exts; - - tc->cls_flower = &offload; + tc_cls_common_offload_init(&cls_flower.common, tp); + cls_flower.command = TC_CLSFLOWER_REPLACE; + cls_flower.cookie = (unsigned long) f; + cls_flower.dissector = dissector; + cls_flower.mask = mask; + cls_flower.key = &f->mkey; + cls_flower.exts = &f->exts; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, tc); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, + &cls_flower); if (!err) f->flags |= TCA_CLS_FLAGS_IN_HW; @@ -284,21 +278,19 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) { - struct tc_cls_flower_offload offload = {0}; + struct tc_cls_flower_offload cls_flower = {}; struct net_device *dev = f->hw_dev; - struct tc_to_netdev *tc = &f->tc; if (!tc_can_offload(dev, tp)) return; - tc_cls_common_offload_init(&offload.common, tp); - offload.command = TC_CLSFLOWER_STATS; - offload.cookie = (unsigned long)f; - offload.exts = &f->exts; - - tc->cls_flower = &offload; + tc_cls_common_offload_init(&cls_flower.common, tp); + cls_flower.command = TC_CLSFLOWER_STATS; + cls_flower.cookie = (unsigned long) f; + cls_flower.exts = &f->exts; - dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS, tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS, + &cls_flower); } static void __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 174c700160ca..c9f6500b8080 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -54,18 +54,16 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, unsigned long cookie) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_to_netdev offload; - struct tc_cls_matchall_offload mall_offload = {0}; + struct tc_cls_matchall_offload cls_mall = {}; int err; - tc_cls_common_offload_init(&mall_offload.common, tp); - offload.cls_mall = &mall_offload; - offload.cls_mall->command = TC_CLSMATCHALL_REPLACE; - offload.cls_mall->exts = &head->exts; - offload.cls_mall->cookie = cookie; + tc_cls_common_offload_init(&cls_mall.common, tp); + cls_mall.command = TC_CLSMATCHALL_REPLACE; + cls_mall.exts = &head->exts; + cls_mall.cookie = cookie; err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, - &offload); + &cls_mall); if (!err) head->flags |= TCA_CLS_FLAGS_IN_HW; @@ -77,16 +75,13 @@ static void mall_destroy_hw_filter(struct tcf_proto *tp, unsigned long cookie) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_to_netdev offload; - struct tc_cls_matchall_offload mall_offload = {0}; + struct tc_cls_matchall_offload cls_mall = {}; - tc_cls_common_offload_init(&mall_offload.common, tp); - offload.cls_mall = &mall_offload; - offload.cls_mall->command = TC_CLSMATCHALL_DESTROY; - offload.cls_mall->exts = NULL; - offload.cls_mall->cookie = cookie; + tc_cls_common_offload_init(&cls_mall.common, tp); + cls_mall.command = TC_CLSMATCHALL_DESTROY; + cls_mall.cookie = cookie; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &offload); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSMATCHALL, &cls_mall); } static void mall_destroy(struct tcf_proto *tp) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index c0f59c471523..4ed51d347d0a 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -431,39 +431,35 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_cls_u32_offload u32_offload = {0}; - struct tc_to_netdev offload; + struct tc_cls_u32_offload cls_u32 = {}; - offload.cls_u32 = &u32_offload; + if (!tc_should_offload(dev, tp, 0)) + return; - if (tc_should_offload(dev, tp, 0)) { - tc_cls_common_offload_init(&u32_offload.common, tp); - offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; - offload.cls_u32->knode.handle = handle; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); - } + tc_cls_common_offload_init(&cls_u32.common, tp); + cls_u32.command = TC_CLSU32_DELETE_KNODE; + cls_u32.knode.handle = handle; + + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); } static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, u32 flags) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_cls_u32_offload u32_offload = {0}; - struct tc_to_netdev offload; + struct tc_cls_u32_offload cls_u32 = {}; int err; if (!tc_should_offload(dev, tp, flags)) return tc_skip_sw(flags) ? -EINVAL : 0; - offload.cls_u32 = &u32_offload; - - tc_cls_common_offload_init(&u32_offload.common, tp); - offload.cls_u32->command = TC_CLSU32_NEW_HNODE; - offload.cls_u32->hnode.divisor = h->divisor; - offload.cls_u32->hnode.handle = h->handle; - offload.cls_u32->hnode.prio = h->prio; + tc_cls_common_offload_init(&cls_u32.common, tp); + cls_u32.command = TC_CLSU32_NEW_HNODE; + cls_u32.hnode.divisor = h->divisor; + cls_u32.hnode.handle = h->handle; + cls_u32.hnode.prio = h->prio; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); if (tc_skip_sw(flags)) return err; @@ -473,52 +469,47 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_cls_u32_offload u32_offload = {0}; - struct tc_to_netdev offload; + struct tc_cls_u32_offload cls_u32 = {}; - offload.cls_u32 = &u32_offload; + if (!tc_should_offload(dev, tp, 0)) + return; - if (tc_should_offload(dev, tp, 0)) { - tc_cls_common_offload_init(&u32_offload.common, tp); - offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; - offload.cls_u32->hnode.divisor = h->divisor; - offload.cls_u32->hnode.handle = h->handle; - offload.cls_u32->hnode.prio = h->prio; + tc_cls_common_offload_init(&cls_u32.common, tp); + cls_u32.command = TC_CLSU32_DELETE_HNODE; + cls_u32.hnode.divisor = h->divisor; + cls_u32.hnode.handle = h->handle; + cls_u32.hnode.prio = h->prio; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); - } + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); } static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, u32 flags) { struct net_device *dev = tp->q->dev_queue->dev; - struct tc_cls_u32_offload u32_offload = {0}; - struct tc_to_netdev offload; + struct tc_cls_u32_offload cls_u32 = {}; int err; - offload.cls_u32 = &u32_offload; - if (!tc_should_offload(dev, tp, flags)) return tc_skip_sw(flags) ? -EINVAL : 0; - tc_cls_common_offload_init(&u32_offload.common, tp); - offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; - offload.cls_u32->knode.handle = n->handle; - offload.cls_u32->knode.fshift = n->fshift; + tc_cls_common_offload_init(&cls_u32.common, tp); + cls_u32.command = TC_CLSU32_REPLACE_KNODE; + cls_u32.knode.handle = n->handle; + cls_u32.knode.fshift = n->fshift; #ifdef CONFIG_CLS_U32_MARK - offload.cls_u32->knode.val = n->val; - offload.cls_u32->knode.mask = n->mask; + cls_u32.knode.val = n->val; + cls_u32.knode.mask = n->mask; #else - offload.cls_u32->knode.val = 0; - offload.cls_u32->knode.mask = 0; + cls_u32.knode.val = 0; + cls_u32.knode.mask = 0; #endif - offload.cls_u32->knode.sel = &n->sel; - offload.cls_u32->knode.exts = &n->exts; + cls_u32.knode.sel = &n->sel; + cls_u32.knode.exts = &n->exts; if (n->ht_down) - offload.cls_u32->knode.link_handle = n->ht_down->handle; + cls_u32.knode.link_handle = n->ht_down->handle; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &offload); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSU32, &cls_u32); if (!err) n->flags |= TCA_CLS_FLAGS_IN_HW; diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 09b577dde49c..2165a05994b7 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -39,10 +39,9 @@ static void mqprio_destroy(struct Qdisc *sch) } if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) { - struct tc_mqprio_qopt offload = { 0 }; - struct tc_to_netdev tc = { { .mqprio = &offload } }; + struct tc_mqprio_qopt mqprio = {}; - dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &tc); + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &mqprio); } else { netdev_set_num_tc(dev, 0); } @@ -147,14 +146,14 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) * supplied and verified mapping */ if (qopt->hw) { - struct tc_mqprio_qopt offload = *qopt; - struct tc_to_netdev tc = { { .mqprio = &offload } }; + struct tc_mqprio_qopt mqprio = *qopt; - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, &tc); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_MQPRIO, + &mqprio); if (err) return err; - priv->hw_offload = offload.hw; + priv->hw_offload = mqprio.hw; } else { netdev_set_num_tc(dev, qopt->num_tc); for (i = 0; i < qopt->num_tc; i++) -- cgit v1.2.3-55-g7522 From c7ab7330f040a6f792e384c381629072ceb82766 Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Mon, 7 Aug 2017 19:35:49 +0200 Subject: Bluetooth: bluecard: blink LED during continuous activity Currently the activity LED is solid on during continuous activity. Blink the LED during continuous activity to match Windows driver behavior. Cards with activity LED: power LED = solid on when up, off when down activity LED = blinking during activity, off when idle Cards without activity LED: power LED = solid on when up, off when down, blinking during activity (don't have such a card so I don't know if Windows driver does the same thing) Signed-off-by: Ondrej Zary Signed-off-by: Marcel Holtmann --- drivers/bluetooth/bluecard_cs.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index 61ac48e1aa55..b07ca9565291 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -93,6 +93,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev); /* Hardware states */ #define CARD_READY 1 +#define CARD_ACTIVITY 2 #define CARD_HAS_PCCARD_ID 4 #define CARD_HAS_POWER_LED 5 #define CARD_HAS_ACTIVITY_LED 6 @@ -160,13 +161,14 @@ static void bluecard_activity_led_timeout(u_long arg) struct bluecard_info *info = (struct bluecard_info *)arg; unsigned int iobase = info->p_dev->resource[0]->start; - if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { - /* Disable activity LED, keep power LED enabled */ - outb(0x08 | 0x20, iobase + 0x30); - } else { - /* Disable power LED */ - outb(0x00, iobase + 0x30); + if (test_bit(CARD_ACTIVITY, &(info->hw_state))) { + /* leave LED in inactive state for HZ/10 for blink effect */ + clear_bit(CARD_ACTIVITY, &(info->hw_state)); + mod_timer(&(info->timer), jiffies + HZ / 10); } + + /* Disable activity LED, enable power LED */ + outb(0x08 | 0x20, iobase + 0x30); } @@ -174,19 +176,22 @@ static void bluecard_enable_activity_led(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; + /* don't disturb running blink timer */ + if (timer_pending(&(info->timer))) + return; + + set_bit(CARD_ACTIVITY, &(info->hw_state)); + if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { /* Enable activity LED, keep power LED enabled */ outb(0x18 | 0x60, iobase + 0x30); - - /* Stop the LED after HZ/4 */ - mod_timer(&(info->timer), jiffies + HZ / 4); } else { - /* Enable power LED */ - outb(0x08 | 0x20, iobase + 0x30); - - /* Stop the LED after HZ/2 */ - mod_timer(&(info->timer), jiffies + HZ / 2); + /* Disable power LED */ + outb(0x00, iobase + 0x30); } + + /* Stop the LED after HZ/10 */ + mod_timer(&(info->timer), jiffies + HZ / 10); } -- cgit v1.2.3-55-g7522 From 2a32ca138e24836d01918797018b0fd0302c6b3f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 7 Aug 2017 12:41:53 +0200 Subject: hns3: fix unused function warning Without CONFIG_PCI_IOV, we get a harmless warning about an unused function: drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c:2273:13: error: 'hclge_disable_sriov' defined but not used [-Werror=unused-function] The #ifdefs in this driver are obviously wrong, so this just removes them and uses an IS_ENABLED() check that does the same thing correctly in a more readable way. Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 27 ++++++++++------------ 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 3611991689bc..7440e85b607c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -2272,19 +2272,17 @@ static void hclge_service_task(struct work_struct *work) static void hclge_disable_sriov(struct hclge_dev *hdev) { -#ifdef CONFIG_PCI_IOV - /* If our VFs are assigned we cannot shut down SR-IOV - * without causing issues, so just leave the hardware - * available but disabled - */ - if (pci_vfs_assigned(hdev->pdev)) { - dev_warn(&hdev->pdev->dev, - "disabling driver while VFs are assigned\n"); - return; - } + /* If our VFs are assigned we cannot shut down SR-IOV + * without causing issues, so just leave the hardware + * available but disabled + */ + if (pci_vfs_assigned(hdev->pdev)) { + dev_warn(&hdev->pdev->dev, + "disabling driver while VFs are assigned\n"); + return; + } - pci_disable_sriov(hdev->pdev); -#endif + pci_disable_sriov(hdev->pdev); } struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle) @@ -4182,9 +4180,8 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) set_bit(HCLGE_STATE_DOWN, &hdev->state); -#ifdef CONFIG_PCI_IOV - hclge_disable_sriov(hdev); -#endif + if (IS_ENABLED(CONFIG_PCI_IOV)) + hclge_disable_sriov(hdev); if (hdev->service_timer.data) del_timer_sync(&hdev->service_timer); -- cgit v1.2.3-55-g7522 From fb74c27735f0a34e76dbf1972084e984ad2ea145 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 08:44:16 -0700 Subject: net: ipv4: add second dif to udp socket lookups Add a second device index, sdif, to udp socket lookups. sdif is the index for ingress devices enslaved to an l3mdev. It allows the lookups to consider the enslaved device as well as the L3 domain when searching for a socket. Early demux lookups are handled in the next patch as part of INET_MATCH changes. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/ip.h | 10 +++++++++ include/net/udp.h | 2 +- net/ipv4/udp.c | 58 +++++++++++++++++++++++++++++++---------------------- net/ipv4/udp_diag.c | 6 +++--- 4 files changed, 48 insertions(+), 28 deletions(-) diff --git a/include/net/ip.h b/include/net/ip.h index 9e59dcf1787a..39db596eb89f 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -78,6 +78,16 @@ struct ipcm_cookie { #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb)) +/* return enslaved device index if relevant */ +static inline int inet_sdif(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (skb && ipv4_l3mdev_skb(IPCB(skb)->flags)) + return IPCB(skb)->iif; +#endif + return 0; +} + struct ip_ra_chain { struct ip_ra_chain __rcu *next; struct sock *sk; diff --git a/include/net/udp.h b/include/net/udp.h index cc8036987dcb..826c713d5a48 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -287,7 +287,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif); struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, - __be32 daddr, __be16 dport, int dif, + __be32 daddr, __be16 dport, int dif, int sdif, struct udp_table *tbl, struct sk_buff *skb); struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 38bca2c4897d..fe14429e4a6c 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -380,8 +380,8 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum) static int compute_score(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, - __be32 daddr, unsigned short hnum, int dif, - bool exact_dif) + __be32 daddr, unsigned short hnum, + int dif, int sdif, bool exact_dif) { int score; struct inet_sock *inet; @@ -413,10 +413,15 @@ static int compute_score(struct sock *sk, struct net *net, } if (sk->sk_bound_dev_if || exact_dif) { - if (sk->sk_bound_dev_if != dif) + bool dev_match = (sk->sk_bound_dev_if == dif || + sk->sk_bound_dev_if == sdif); + + if (exact_dif && !dev_match) return -1; - score += 4; + if (sk->sk_bound_dev_if && dev_match) + score += 4; } + if (sk->sk_incoming_cpu == raw_smp_processor_id()) score++; return score; @@ -436,10 +441,11 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr, /* called with rcu_read_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, - __be32 saddr, __be16 sport, - __be32 daddr, unsigned int hnum, int dif, bool exact_dif, - struct udp_hslot *hslot2, - struct sk_buff *skb) + __be32 saddr, __be16 sport, + __be32 daddr, unsigned int hnum, + int dif, int sdif, bool exact_dif, + struct udp_hslot *hslot2, + struct sk_buff *skb) { struct sock *sk, *result; int score, badness, matches = 0, reuseport = 0; @@ -449,7 +455,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, badness = 0; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, - daddr, hnum, dif, exact_dif); + daddr, hnum, dif, sdif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -477,8 +483,8 @@ static struct sock *udp4_lib_lookup2(struct net *net, * harder than this. -DaveM */ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, - __be16 sport, __be32 daddr, __be16 dport, - int dif, struct udp_table *udptable, struct sk_buff *skb) + __be16 sport, __be32 daddr, __be16 dport, int dif, + int sdif, struct udp_table *udptable, struct sk_buff *skb) { struct sock *sk, *result; unsigned short hnum = ntohs(dport); @@ -496,7 +502,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, goto begin; result = udp4_lib_lookup2(net, saddr, sport, - daddr, hnum, dif, + daddr, hnum, dif, sdif, exact_dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; @@ -511,7 +517,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, goto begin; result = udp4_lib_lookup2(net, saddr, sport, - daddr, hnum, dif, + daddr, hnum, dif, sdif, exact_dif, hslot2, skb); } return result; @@ -521,7 +527,7 @@ begin: badness = 0; sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, sport, - daddr, hnum, dif, exact_dif); + daddr, hnum, dif, sdif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -554,7 +560,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), - udptable, skb); + inet_sdif(skb), udptable, skb); } struct sock *udp4_lib_lookup_skb(struct sk_buff *skb, @@ -576,7 +582,7 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, struct sock *sk; sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport, - dif, &udp_table, NULL); + dif, 0, &udp_table, NULL); if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; @@ -587,7 +593,7 @@ EXPORT_SYMBOL_GPL(udp4_lib_lookup); static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, - int dif, unsigned short hnum) + int dif, int sdif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); @@ -597,7 +603,8 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(sk) || - (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) + (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && + sk->sk_bound_dev_if != sdif)) return false; if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) return false; @@ -628,8 +635,8 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) struct net *net = dev_net(skb->dev); sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, - iph->saddr, uh->source, skb->dev->ifindex, udptable, - NULL); + iph->saddr, uh->source, skb->dev->ifindex, 0, + udptable, NULL); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; /* No socket for error */ @@ -1953,6 +1960,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); unsigned int offset = offsetof(typeof(*sk), sk_node); int dif = skb->dev->ifindex; + int sdif = inet_sdif(skb); struct hlist_node *node; struct sk_buff *nskb; @@ -1967,7 +1975,7 @@ start_lookup: sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) { if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr, - uh->source, saddr, dif, hnum)) + uh->source, saddr, dif, sdif, hnum)) continue; if (!first) { @@ -2157,7 +2165,7 @@ drop: static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, - int dif) + int dif, int sdif) { struct sock *sk, *result; unsigned short hnum = ntohs(loc_port); @@ -2171,7 +2179,7 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, result = NULL; sk_for_each_rcu(sk, &hslot->head) { if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, - rmt_port, rmt_addr, dif, hnum)) { + rmt_port, rmt_addr, dif, sdif, hnum)) { if (result) return NULL; result = sk; @@ -2216,6 +2224,7 @@ void udp_v4_early_demux(struct sk_buff *skb) struct sock *sk = NULL; struct dst_entry *dst; int dif = skb->dev->ifindex; + int sdif = inet_sdif(skb); int ours; /* validate the packet */ @@ -2241,7 +2250,8 @@ void udp_v4_early_demux(struct sk_buff *skb) } sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, - uh->source, iph->saddr, dif); + uh->source, iph->saddr, + dif, sdif); } else if (skb->pkt_type == PACKET_HOST) { sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 4515836d2a3a..1f07fe109535 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -45,7 +45,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, sk = __udp4_lib_lookup(net, req->id.idiag_src[0], req->id.idiag_sport, req->id.idiag_dst[0], req->id.idiag_dport, - req->id.idiag_if, tbl, NULL); + req->id.idiag_if, 0, tbl, NULL); #if IS_ENABLED(CONFIG_IPV6) else if (req->sdiag_family == AF_INET6) sk = __udp6_lib_lookup(net, @@ -182,7 +182,7 @@ static int __udp_diag_destroy(struct sk_buff *in_skb, sk = __udp4_lib_lookup(net, req->id.idiag_dst[0], req->id.idiag_dport, req->id.idiag_src[0], req->id.idiag_sport, - req->id.idiag_if, tbl, NULL); + req->id.idiag_if, 0, tbl, NULL); #if IS_ENABLED(CONFIG_IPV6) else if (req->sdiag_family == AF_INET6) { if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) && @@ -190,7 +190,7 @@ static int __udp_diag_destroy(struct sk_buff *in_skb, sk = __udp4_lib_lookup(net, req->id.idiag_dst[3], req->id.idiag_dport, req->id.idiag_src[3], req->id.idiag_sport, - req->id.idiag_if, tbl, NULL); + req->id.idiag_if, 0, tbl, NULL); else sk = __udp6_lib_lookup(net, -- cgit v1.2.3-55-g7522 From 3fa6f616a7a4d0bdf4d877d530456d8a5c3b109b Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 08:44:17 -0700 Subject: net: ipv4: add second dif to inet socket lookups Add a second device index, sdif, to inet socket lookups. sdif is the index for ingress devices enslaved to an l3mdev. It allows the lookups to consider the enslaved device as well as the L3 domain when searching for a socket. TCP moves the data in the cb. Prior to tcp_v4_rcv (e.g., early demux) the ingress index is obtained from IPCB using inet_sdif and after the cb move in tcp_v4_rcv the tcp_v4_sdif helper is used. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/inet_hashtables.h | 31 +++++++++++++++++-------------- include/net/tcp.h | 10 ++++++++++ net/dccp/ipv4.c | 4 ++-- net/ipv4/inet_hashtables.c | 27 +++++++++++++++++---------- net/ipv4/tcp_ipv4.c | 13 ++++++++----- net/ipv4/udp.c | 6 +++--- net/netfilter/xt_TPROXY.c | 2 +- 7 files changed, 58 insertions(+), 35 deletions(-) diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 5026b1f08bb8..2dbbbff5e1e3 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -221,16 +221,16 @@ struct sock *__inet_lookup_listener(struct net *net, const __be32 saddr, const __be16 sport, const __be32 daddr, const unsigned short hnum, - const int dif); + const int dif, const int sdif); static inline struct sock *inet_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, __be32 saddr, __be16 sport, - __be32 daddr, __be16 dport, int dif) + __be32 daddr, __be16 dport, int dif, int sdif) { return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, sport, - daddr, ntohs(dport), dif); + daddr, ntohs(dport), dif, sdif); } /* Socket demux engine toys. */ @@ -262,22 +262,24 @@ static inline struct sock *inet_lookup_listener(struct net *net, (((__force __u64)(__be32)(__daddr)) << 32) | \ ((__force __u64)(__be32)(__saddr))) #endif /* __BIG_ENDIAN */ -#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ +#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ (((__sk)->sk_portpair == (__ports)) && \ ((__sk)->sk_addrpair == (__cookie)) && \ (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ + ((__sk)->sk_bound_dev_if == (__dif)) || \ + ((__sk)->sk_bound_dev_if == (__sdif))) && \ net_eq(sock_net(__sk), (__net))) #else /* 32-bit arch */ #define INET_ADDR_COOKIE(__name, __saddr, __daddr) \ const int __name __deprecated __attribute__((unused)) -#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif) \ +#define INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif, __sdif) \ (((__sk)->sk_portpair == (__ports)) && \ ((__sk)->sk_daddr == (__saddr)) && \ ((__sk)->sk_rcv_saddr == (__daddr)) && \ (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ + ((__sk)->sk_bound_dev_if == (__dif)) || \ + ((__sk)->sk_bound_dev_if == (__sdif))) && \ net_eq(sock_net(__sk), (__net))) #endif /* 64-bit arch */ @@ -288,7 +290,7 @@ struct sock *__inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 hnum, - const int dif); + const int dif, const int sdif); static inline struct sock * inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, @@ -297,7 +299,7 @@ static inline struct sock * const int dif) { return __inet_lookup_established(net, hashinfo, saddr, sport, daddr, - ntohs(dport), dif); + ntohs(dport), dif, 0); } static inline struct sock *__inet_lookup(struct net *net, @@ -305,20 +307,20 @@ static inline struct sock *__inet_lookup(struct net *net, struct sk_buff *skb, int doff, const __be32 saddr, const __be16 sport, const __be32 daddr, const __be16 dport, - const int dif, + const int dif, const int sdif, bool *refcounted) { u16 hnum = ntohs(dport); struct sock *sk; sk = __inet_lookup_established(net, hashinfo, saddr, sport, - daddr, hnum, dif); + daddr, hnum, dif, sdif); *refcounted = true; if (sk) return sk; *refcounted = false; return __inet_lookup_listener(net, hashinfo, skb, doff, saddr, - sport, daddr, hnum, dif); + sport, daddr, hnum, dif, sdif); } static inline struct sock *inet_lookup(struct net *net, @@ -332,7 +334,7 @@ static inline struct sock *inet_lookup(struct net *net, bool refcounted; sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, - dport, dif, &refcounted); + dport, dif, 0, &refcounted); if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; @@ -344,6 +346,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, int doff, const __be16 sport, const __be16 dport, + const int sdif, bool *refcounted) { struct sock *sk = skb_steal_sock(skb); @@ -355,7 +358,7 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, doff, iph->saddr, sport, - iph->daddr, dport, inet_iif(skb), + iph->daddr, dport, inet_iif(skb), sdif, refcounted); } diff --git a/include/net/tcp.h b/include/net/tcp.h index 5173fecde495..2b89f1ab8552 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -840,6 +840,16 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) return false; } +/* TCP_SKB_CB reference means this can not be used from early demux */ +static inline int tcp_v4_sdif(struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) + return TCP_SKB_CB(skb)->header.h4.iif; +#endif + return 0; +} + /* Due to TSO, an SKB can be composed of multiple actual * packets. To keep these tracked properly, we use this. */ diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 1b202f16531f..001c08696334 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -256,7 +256,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) sk = __inet_lookup_established(net, &dccp_hashinfo, iph->daddr, dh->dccph_dport, iph->saddr, ntohs(dh->dccph_sport), - inet_iif(skb)); + inet_iif(skb), 0); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; @@ -804,7 +804,7 @@ static int dccp_v4_rcv(struct sk_buff *skb) lookup: sk = __inet_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh), - dh->dccph_sport, dh->dccph_dport, &refcounted); + dh->dccph_sport, dh->dccph_dport, 0, &refcounted); if (!sk) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 2e3389d614d1..597bb4cfe805 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -170,7 +170,7 @@ EXPORT_SYMBOL_GPL(__inet_inherit_port); static inline int compute_score(struct sock *sk, struct net *net, const unsigned short hnum, const __be32 daddr, - const int dif, bool exact_dif) + const int dif, const int sdif, bool exact_dif) { int score = -1; struct inet_sock *inet = inet_sk(sk); @@ -185,9 +185,13 @@ static inline int compute_score(struct sock *sk, struct net *net, score += 4; } if (sk->sk_bound_dev_if || exact_dif) { - if (sk->sk_bound_dev_if != dif) + bool dev_match = (sk->sk_bound_dev_if == dif || + sk->sk_bound_dev_if == sdif); + + if (exact_dif && !dev_match) return -1; - score += 4; + if (sk->sk_bound_dev_if && dev_match) + score += 4; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) score++; @@ -208,7 +212,7 @@ struct sock *__inet_lookup_listener(struct net *net, struct sk_buff *skb, int doff, const __be32 saddr, __be16 sport, const __be32 daddr, const unsigned short hnum, - const int dif) + const int dif, const int sdif) { unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; @@ -218,7 +222,8 @@ struct sock *__inet_lookup_listener(struct net *net, u32 phash = 0; sk_for_each_rcu(sk, &ilb->head) { - score = compute_score(sk, net, hnum, daddr, dif, exact_dif); + score = compute_score(sk, net, hnum, daddr, + dif, sdif, exact_dif); if (score > hiscore) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -268,7 +273,7 @@ struct sock *__inet_lookup_established(struct net *net, struct inet_hashinfo *hashinfo, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 hnum, - const int dif) + const int dif, const int sdif) { INET_ADDR_COOKIE(acookie, saddr, daddr); const __portpair ports = INET_COMBINED_PORTS(sport, hnum); @@ -286,11 +291,12 @@ begin: if (sk->sk_hash != hash) continue; if (likely(INET_MATCH(sk, net, acookie, - saddr, daddr, ports, dif))) { + saddr, daddr, ports, dif, sdif))) { if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) goto out; if (unlikely(!INET_MATCH(sk, net, acookie, - saddr, daddr, ports, dif))) { + saddr, daddr, ports, + dif, sdif))) { sock_gen_put(sk); goto begin; } @@ -321,9 +327,10 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, __be32 daddr = inet->inet_rcv_saddr; __be32 saddr = inet->inet_daddr; int dif = sk->sk_bound_dev_if; + struct net *net = sock_net(sk); + int sdif = l3mdev_master_ifindex_by_index(net, dif); INET_ADDR_COOKIE(acookie, saddr, daddr); const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); - struct net *net = sock_net(sk); unsigned int hash = inet_ehashfn(net, daddr, lport, saddr, inet->inet_dport); struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); @@ -339,7 +346,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, continue; if (likely(INET_MATCH(sk2, net, acookie, - saddr, daddr, ports, dif))) { + saddr, daddr, ports, dif, sdif))) { if (sk2->sk_state == TCP_TIME_WAIT) { tw = inet_twsk(sk2); if (twsk_unique(sk, sk2, twp)) diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5f708c85110e..c8784ab37852 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -383,7 +383,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, th->dest, iph->saddr, ntohs(th->source), - inet_iif(icmp_skb)); + inet_iif(icmp_skb), 0); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; @@ -659,7 +659,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0, ip_hdr(skb)->saddr, th->source, ip_hdr(skb)->daddr, - ntohs(th->source), inet_iif(skb)); + ntohs(th->source), inet_iif(skb), + tcp_v4_sdif(skb)); /* don't send rst if it can't find key */ if (!sk1) goto out; @@ -1523,7 +1524,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, ntohs(th->dest), - skb->skb_iif); + skb->skb_iif, inet_sdif(skb)); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; @@ -1588,6 +1589,7 @@ EXPORT_SYMBOL(tcp_filter); int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); + int sdif = inet_sdif(skb); const struct iphdr *iph; const struct tcphdr *th; bool refcounted; @@ -1638,7 +1640,7 @@ int tcp_v4_rcv(struct sk_buff *skb) lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, - th->dest, &refcounted); + th->dest, sdif, &refcounted); if (!sk) goto no_tcp_socket; @@ -1766,7 +1768,8 @@ do_time_wait: __tcp_hdrlen(th), iph->saddr, th->source, iph->daddr, th->dest, - inet_iif(skb)); + inet_iif(skb), + sdif); if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index fe14429e4a6c..99f25bfec606 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -2196,7 +2196,7 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, static struct sock *__udp4_lib_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, - int dif) + int dif, int sdif) { unsigned short hnum = ntohs(loc_port); unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); @@ -2208,7 +2208,7 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { if (INET_MATCH(sk, net, acookie, rmt_addr, - loc_addr, ports, dif)) + loc_addr, ports, dif, sdif)) return sk; /* Only check first socket in chain */ break; @@ -2254,7 +2254,7 @@ void udp_v4_early_demux(struct sk_buff *skb) dif, sdif); } else if (skb->pkt_type == PACKET_HOST) { sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, - uh->source, iph->saddr, dif); + uh->source, iph->saddr, dif, sdif); } if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index d767e35fff6b..94fb0fd0c667 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -125,7 +125,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp, __tcp_hdrlen(tcph), saddr, sport, daddr, dport, - in->ifindex); + in->ifindex, 0); if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; -- cgit v1.2.3-55-g7522 From 67359930e185c491b47cb958d5f1d6c1af4598a2 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 08:44:18 -0700 Subject: net: ipv4: add second dif to raw socket lookups Add a second device index, sdif, to raw socket lookups. sdif is the index for ingress devices enslaved to an l3mdev. It allows the lookups to consider the enslaved device as well as the L3 domain when searching for a socket. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/raw.h | 2 +- net/ipv4/raw.c | 16 +++++++++++----- net/ipv4/raw_diag.c | 2 +- 3 files changed, 13 insertions(+), 7 deletions(-) diff --git a/include/net/raw.h b/include/net/raw.h index 57c33dd22ec4..99d26d0c4a19 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -26,7 +26,7 @@ extern struct proto raw_prot; extern struct raw_hashinfo raw_v4_hashinfo; struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, unsigned short num, __be32 raddr, - __be32 laddr, int dif); + __be32 laddr, int dif, int sdif); int raw_abort(struct sock *sk, int err); void raw_icmp_error(struct sk_buff *, int, u32); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index b0bb5d0a30bd..2726aecf224b 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -122,7 +122,8 @@ void raw_unhash_sk(struct sock *sk) EXPORT_SYMBOL_GPL(raw_unhash_sk); struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, - unsigned short num, __be32 raddr, __be32 laddr, int dif) + unsigned short num, __be32 raddr, __be32 laddr, + int dif, int sdif) { sk_for_each_from(sk) { struct inet_sock *inet = inet_sk(sk); @@ -130,7 +131,8 @@ struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, if (net_eq(sock_net(sk), net) && inet->inet_num == num && !(inet->inet_daddr && inet->inet_daddr != raddr) && !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) && - !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) + !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && + sk->sk_bound_dev_if != sdif)) goto found; /* gotcha */ } sk = NULL; @@ -171,6 +173,7 @@ static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) */ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) { + int sdif = inet_sdif(skb); struct sock *sk; struct hlist_head *head; int delivered = 0; @@ -184,7 +187,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) net = dev_net(skb->dev); sk = __raw_v4_lookup(net, __sk_head(head), iph->protocol, iph->saddr, iph->daddr, - skb->dev->ifindex); + skb->dev->ifindex, sdif); while (sk) { delivered = 1; @@ -199,7 +202,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) } sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol, iph->saddr, iph->daddr, - skb->dev->ifindex); + skb->dev->ifindex, sdif); } out: read_unlock(&raw_v4_hashinfo.lock); @@ -297,12 +300,15 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info) read_lock(&raw_v4_hashinfo.lock); raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); if (raw_sk) { + int dif = skb->dev->ifindex; + int sdif = inet_sdif(skb); + iph = (const struct iphdr *)skb->data; net = dev_net(skb->dev); while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, iph->daddr, iph->saddr, - skb->dev->ifindex)) != NULL) { + dif, sdif)) != NULL) { raw_err(raw_sk, skb, info); raw_sk = sk_next(raw_sk); iph = (const struct iphdr *)skb->data; diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c index e1a51ca68d23..c600d3c71d4d 100644 --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@ -46,7 +46,7 @@ static struct sock *raw_lookup(struct net *net, struct sock *from, sk = __raw_v4_lookup(net, from, r->sdiag_raw_protocol, r->id.idiag_dst[0], r->id.idiag_src[0], - r->id.idiag_if); + r->id.idiag_if, 0); #if IS_ENABLED(CONFIG_IPV6) else sk = __raw_v6_lookup(net, from, r->sdiag_raw_protocol, -- cgit v1.2.3-55-g7522 From 60d9b03141243589dacd3136f3fcb4e6976df954 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 08:44:19 -0700 Subject: net: ipv4: add second dif to multicast source filter Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/linux/igmp.h | 3 ++- net/ipv4/igmp.c | 6 ++++-- net/ipv4/raw.c | 2 +- net/ipv4/udp.c | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 97caf1821de8..f8231854b5d6 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -118,7 +118,8 @@ extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf, struct ip_msfilter __user *optval, int __user *optlen); extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf, struct group_filter __user *optval, int __user *optlen); -extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, int dif); +extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt, + int dif, int sdif); extern void ip_mc_init_dev(struct in_device *); extern void ip_mc_destroy_dev(struct in_device *); extern void ip_mc_up(struct in_device *); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 28f14afd0dd3..5bc8570c2ec3 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2549,7 +2549,8 @@ done: /* * check if a multicast source filter allows delivery for a given */ -int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) +int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, + int dif, int sdif) { struct inet_sock *inet = inet_sk(sk); struct ip_mc_socklist *pmc; @@ -2564,7 +2565,8 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) rcu_read_lock(); for_each_pmc_rcu(inet, pmc) { if (pmc->multi.imr_multiaddr.s_addr == loc_addr && - pmc->multi.imr_ifindex == dif) + (pmc->multi.imr_ifindex == dif || + (sdif && pmc->multi.imr_ifindex == sdif))) break; } ret = inet->mc_all; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 2726aecf224b..33b70bfd1122 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -193,7 +193,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash) delivered = 1; if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) && ip_mc_sf_allow(sk, iph->daddr, iph->saddr, - skb->dev->ifindex)) { + skb->dev->ifindex, sdif)) { struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); /* Not releasing hash table! */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 99f25bfec606..cac59d7420cd 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -606,7 +606,7 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif && sk->sk_bound_dev_if != sdif)) return false; - if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) + if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif)) return false; return true; } -- cgit v1.2.3-55-g7522 From 1801b570dd2ae50b90231f283e79a9a94fbe7875 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 08:44:20 -0700 Subject: net: ipv6: add second dif to udp socket lookups Add a second device index, sdif, to udp socket lookups. sdif is the index for ingress devices enslaved to an l3mdev. It allows the lookups to consider the enslaved device as well as the L3 domain when searching for a socket. Early demux lookups are handled in the next patch as part of INET_MATCH changes. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/linux/ipv6.h | 10 ++++++++++ include/net/udp.h | 2 +- net/ipv4/udp_diag.c | 4 ++-- net/ipv6/udp.c | 40 ++++++++++++++++++++++------------------ 4 files changed, 35 insertions(+), 21 deletions(-) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 474d6bbc158c..ac2da4e11d5e 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -158,6 +158,16 @@ static inline bool inet6_is_jumbogram(const struct sk_buff *skb) return !!(IP6CB(skb)->flags & IP6SKB_JUMBOGRAM); } +/* can not be used in TCP layer after tcp_v6_fill_cb */ +static inline int inet6_sdif(const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) + return IP6CB(skb)->iif; +#endif + return 0; +} + /* can not be used in TCP layer after tcp_v6_fill_cb */ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) { diff --git a/include/net/udp.h b/include/net/udp.h index 826c713d5a48..20dcdca4e85c 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -298,7 +298,7 @@ struct sock *udp6_lib_lookup(struct net *net, struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, - int dif, struct udp_table *tbl, + int dif, int sdif, struct udp_table *tbl, struct sk_buff *skb); struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport); diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 1f07fe109535..d0390d844ac8 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -53,7 +53,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, req->id.idiag_sport, (struct in6_addr *)req->id.idiag_dst, req->id.idiag_dport, - req->id.idiag_if, tbl, NULL); + req->id.idiag_if, 0, tbl, NULL); #endif if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; @@ -198,7 +198,7 @@ static int __udp_diag_destroy(struct sk_buff *in_skb, req->id.idiag_dport, (struct in6_addr *)req->id.idiag_src, req->id.idiag_sport, - req->id.idiag_if, tbl, NULL); + req->id.idiag_if, 0, tbl, NULL); } #endif else { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 578142b7ca3e..d96a877798a7 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -129,7 +129,7 @@ static void udp_v6_rehash(struct sock *sk) static int compute_score(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, - int dif, bool exact_dif) + int dif, int sdif, bool exact_dif) { int score; struct inet_sock *inet; @@ -161,9 +161,13 @@ static int compute_score(struct sock *sk, struct net *net, } if (sk->sk_bound_dev_if || exact_dif) { - if (sk->sk_bound_dev_if != dif) + bool dev_match = (sk->sk_bound_dev_if == dif || + sk->sk_bound_dev_if == sdif); + + if (exact_dif && !dev_match) return -1; - score++; + if (sk->sk_bound_dev_if && dev_match) + score++; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) @@ -175,9 +179,9 @@ static int compute_score(struct sock *sk, struct net *net, /* called with rcu_read_lock() */ static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, unsigned int hnum, int dif, - bool exact_dif, struct udp_hslot *hslot2, - struct sk_buff *skb) + const struct in6_addr *daddr, unsigned int hnum, + int dif, int sdif, bool exact_dif, + struct udp_hslot *hslot2, struct sk_buff *skb) { struct sock *sk, *result; int score, badness, matches = 0, reuseport = 0; @@ -187,7 +191,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, badness = -1; udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { score = compute_score(sk, net, saddr, sport, - daddr, hnum, dif, exact_dif); + daddr, hnum, dif, sdif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -214,10 +218,10 @@ static struct sock *udp6_lib_lookup2(struct net *net, /* rcu_read_lock() must be held */ struct sock *__udp6_lib_lookup(struct net *net, - const struct in6_addr *saddr, __be16 sport, - const struct in6_addr *daddr, __be16 dport, - int dif, struct udp_table *udptable, - struct sk_buff *skb) + const struct in6_addr *saddr, __be16 sport, + const struct in6_addr *daddr, __be16 dport, + int dif, int sdif, struct udp_table *udptable, + struct sk_buff *skb) { struct sock *sk, *result; unsigned short hnum = ntohs(dport); @@ -235,7 +239,7 @@ struct sock *__udp6_lib_lookup(struct net *net, goto begin; result = udp6_lib_lookup2(net, saddr, sport, - daddr, hnum, dif, exact_dif, + daddr, hnum, dif, sdif, exact_dif, hslot2, skb); if (!result) { unsigned int old_slot2 = slot2; @@ -250,7 +254,7 @@ struct sock *__udp6_lib_lookup(struct net *net, goto begin; result = udp6_lib_lookup2(net, saddr, sport, - daddr, hnum, dif, + daddr, hnum, dif, sdif, exact_dif, hslot2, skb); } @@ -261,7 +265,7 @@ begin: badness = -1; sk_for_each_rcu(sk, &hslot->head) { score = compute_score(sk, net, saddr, sport, daddr, hnum, dif, - exact_dif); + sdif, exact_dif); if (score > badness) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -294,7 +298,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), - udptable, skb); + inet6_sdif(skb), udptable, skb); } struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, @@ -304,7 +308,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb, return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), - &udp_table, skb); + inet6_sdif(skb), &udp_table, skb); } EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb); @@ -320,7 +324,7 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be struct sock *sk; sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport, - dif, &udp_table, NULL); + dif, 0, &udp_table, NULL); if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; @@ -501,7 +505,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct net *net = dev_net(skb->dev); sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, - inet6_iif(skb), udptable, skb); + inet6_iif(skb), 0, udptable, skb); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); -- cgit v1.2.3-55-g7522 From 4297a0ef085729af98adab9131d128c576ed3044 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 08:44:21 -0700 Subject: net: ipv6: add second dif to inet6 socket lookups Add a second device index, sdif, to inet6 socket lookups. sdif is the index for ingress devices enslaved to an l3mdev. It allows the lookups to consider the enslaved device as well as the L3 domain when searching for a socket. TCP moves the data in the cb. Prior to tcp_v4_rcv (e.g., early demux) the ingress index is obtained from IPCB using inet_sdif and after tcp_v4_rcv tcp_v4_sdif is used. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/inet6_hashtables.h | 22 +++++++++++++--------- include/net/tcp.h | 10 ++++++++++ net/dccp/ipv6.c | 4 ++-- net/ipv6/inet6_hashtables.c | 28 +++++++++++++++++----------- net/ipv6/tcp_ipv6.c | 13 ++++++++----- net/ipv6/udp.c | 7 ++++--- net/netfilter/xt_TPROXY.c | 4 ++-- 7 files changed, 56 insertions(+), 32 deletions(-) diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h index b87becacd9d3..6e91e38a31da 100644 --- a/include/net/inet6_hashtables.h +++ b/include/net/inet6_hashtables.h @@ -49,7 +49,8 @@ struct sock *__inet6_lookup_established(struct net *net, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, - const u16 hnum, const int dif); + const u16 hnum, const int dif, + const int sdif); struct sock *inet6_lookup_listener(struct net *net, struct inet_hashinfo *hashinfo, @@ -57,7 +58,8 @@ struct sock *inet6_lookup_listener(struct net *net, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, - const unsigned short hnum, const int dif); + const unsigned short hnum, + const int dif, const int sdif); static inline struct sock *__inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, @@ -66,24 +68,25 @@ static inline struct sock *__inet6_lookup(struct net *net, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, - const int dif, + const int dif, const int sdif, bool *refcounted) { struct sock *sk = __inet6_lookup_established(net, hashinfo, saddr, - sport, daddr, hnum, dif); + sport, daddr, hnum, + dif, sdif); *refcounted = true; if (sk) return sk; *refcounted = false; return inet6_lookup_listener(net, hashinfo, skb, doff, saddr, sport, - daddr, hnum, dif); + daddr, hnum, dif, sdif); } static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, struct sk_buff *skb, int doff, const __be16 sport, const __be16 dport, - int iif, + int iif, int sdif, bool *refcounted) { struct sock *sk = skb_steal_sock(skb); @@ -95,7 +98,7 @@ static inline struct sock *__inet6_lookup_skb(struct inet_hashinfo *hashinfo, return __inet6_lookup(dev_net(skb_dst(skb)->dev), hashinfo, skb, doff, &ipv6_hdr(skb)->saddr, sport, &ipv6_hdr(skb)->daddr, ntohs(dport), - iif, refcounted); + iif, sdif, refcounted); } struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, @@ -107,13 +110,14 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, int inet6_hash(struct sock *sk); #endif /* IS_ENABLED(CONFIG_IPV6) */ -#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \ +#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif, __sdif) \ (((__sk)->sk_portpair == (__ports)) && \ ((__sk)->sk_family == AF_INET6) && \ ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \ ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \ (!(__sk)->sk_bound_dev_if || \ - ((__sk)->sk_bound_dev_if == (__dif))) && \ + ((__sk)->sk_bound_dev_if == (__dif)) || \ + ((__sk)->sk_bound_dev_if == (__sdif))) && \ net_eq(sock_net(__sk), (__net))) #endif /* _INET6_HASHTABLES_H */ diff --git a/include/net/tcp.h b/include/net/tcp.h index 2b89f1ab8552..999f3efe572b 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -827,6 +827,16 @@ static inline int tcp_v6_iif(const struct sk_buff *skb) return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif; } + +/* TCP_SKB_CB reference means this can not be used from early demux */ +static inline int tcp_v6_sdif(const struct sk_buff *skb) +{ +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) + if (skb && ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags)) + return TCP_SKB_CB(skb)->header.h6.iif; +#endif + return 0; +} #endif /* TCP_SKB_CB reference means this can not be used from early demux */ diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 1b58eac8aad3..47a7b59b355e 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -89,7 +89,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, sk = __inet6_lookup_established(net, &dccp_hashinfo, &hdr->daddr, dh->dccph_dport, &hdr->saddr, ntohs(dh->dccph_sport), - inet6_iif(skb)); + inet6_iif(skb), 0); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), @@ -687,7 +687,7 @@ static int dccp_v6_rcv(struct sk_buff *skb) lookup: sk = __inet6_lookup_skb(&dccp_hashinfo, skb, __dccp_hdr_len(dh), dh->dccph_sport, dh->dccph_dport, - inet6_iif(skb), &refcounted); + inet6_iif(skb), 0, &refcounted); if (!sk) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c index b13b8f93079d..b01858f5deb1 100644 --- a/net/ipv6/inet6_hashtables.c +++ b/net/ipv6/inet6_hashtables.c @@ -56,7 +56,7 @@ struct sock *__inet6_lookup_established(struct net *net, const __be16 sport, const struct in6_addr *daddr, const u16 hnum, - const int dif) + const int dif, const int sdif) { struct sock *sk; const struct hlist_nulls_node *node; @@ -73,12 +73,12 @@ begin: sk_nulls_for_each_rcu(sk, node, &head->chain) { if (sk->sk_hash != hash) continue; - if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif)) + if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif)) continue; if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt))) goto out; - if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) { + if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif, sdif))) { sock_gen_put(sk); goto begin; } @@ -96,7 +96,7 @@ EXPORT_SYMBOL(__inet6_lookup_established); static inline int compute_score(struct sock *sk, struct net *net, const unsigned short hnum, const struct in6_addr *daddr, - const int dif, bool exact_dif) + const int dif, const int sdif, bool exact_dif) { int score = -1; @@ -110,9 +110,13 @@ static inline int compute_score(struct sock *sk, struct net *net, score++; } if (sk->sk_bound_dev_if || exact_dif) { - if (sk->sk_bound_dev_if != dif) + bool dev_match = (sk->sk_bound_dev_if == dif || + sk->sk_bound_dev_if == sdif); + + if (exact_dif && !dev_match) return -1; - score++; + if (sk->sk_bound_dev_if && dev_match) + score++; } if (sk->sk_incoming_cpu == raw_smp_processor_id()) score++; @@ -126,7 +130,7 @@ struct sock *inet6_lookup_listener(struct net *net, struct sk_buff *skb, int doff, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, - const unsigned short hnum, const int dif) + const unsigned short hnum, const int dif, const int sdif) { unsigned int hash = inet_lhashfn(net, hnum); struct inet_listen_hashbucket *ilb = &hashinfo->listening_hash[hash]; @@ -136,7 +140,7 @@ struct sock *inet6_lookup_listener(struct net *net, u32 phash = 0; sk_for_each(sk, &ilb->head) { - score = compute_score(sk, net, hnum, daddr, dif, exact_dif); + score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif); if (score > hiscore) { reuseport = sk->sk_reuseport; if (reuseport) { @@ -171,7 +175,7 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo, bool refcounted; sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr, - ntohs(dport), dif, &refcounted); + ntohs(dport), dif, 0, &refcounted); if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; return sk; @@ -187,8 +191,9 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, const struct in6_addr *daddr = &sk->sk_v6_rcv_saddr; const struct in6_addr *saddr = &sk->sk_v6_daddr; const int dif = sk->sk_bound_dev_if; - const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); struct net *net = sock_net(sk); + const int sdif = l3mdev_master_ifindex_by_index(net, dif); + const __portpair ports = INET_COMBINED_PORTS(inet->inet_dport, lport); const unsigned int hash = inet6_ehashfn(net, daddr, lport, saddr, inet->inet_dport); struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); @@ -203,7 +208,8 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, if (sk2->sk_hash != hash) continue; - if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) { + if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, + dif, sdif))) { if (sk2->sk_state == TCP_TIME_WAIT) { tw = inet_twsk(sk2); if (twsk_unique(sk, sk2, twp)) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ced5dcf37465..f776ec4ecf6d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -350,7 +350,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, sk = __inet6_lookup_established(net, &tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr, ntohs(th->source), - skb->dev->ifindex); + skb->dev->ifindex, inet6_sdif(skb)); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), @@ -918,7 +918,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) &tcp_hashinfo, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, - ntohs(th->source), tcp_v6_iif(skb)); + ntohs(th->source), tcp_v6_iif(skb), + tcp_v6_sdif(skb)); if (!sk1) goto out; @@ -1397,6 +1398,7 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, static int tcp_v6_rcv(struct sk_buff *skb) { + int sdif = inet6_sdif(skb); const struct tcphdr *th; const struct ipv6hdr *hdr; bool refcounted; @@ -1430,7 +1432,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) lookup: sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), - th->source, th->dest, inet6_iif(skb), + th->source, th->dest, inet6_iif(skb), sdif, &refcounted); if (!sk) goto no_tcp_socket; @@ -1563,7 +1565,8 @@ do_time_wait: skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, - ntohs(th->dest), tcp_v6_iif(skb)); + ntohs(th->dest), tcp_v6_iif(skb), + sdif); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); inet_twsk_deschedule_put(tw); @@ -1610,7 +1613,7 @@ static void tcp_v6_early_demux(struct sk_buff *skb) sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, &hdr->saddr, th->source, &hdr->daddr, ntohs(th->dest), - inet6_iif(skb)); + inet6_iif(skb), inet6_sdif(skb)); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index d96a877798a7..19afcaf4a22e 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -897,7 +897,7 @@ discard: static struct sock *__udp6_lib_demux_lookup(struct net *net, __be16 loc_port, const struct in6_addr *loc_addr, __be16 rmt_port, const struct in6_addr *rmt_addr, - int dif) + int dif, int sdif) { unsigned short hnum = ntohs(loc_port); unsigned int hash2 = udp6_portaddr_hash(net, loc_addr, hnum); @@ -908,7 +908,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) { if (sk->sk_state == TCP_ESTABLISHED && - INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif)) + INET6_MATCH(sk, net, rmt_addr, loc_addr, ports, dif, sdif)) return sk; /* Only check first socket in chain */ break; @@ -923,6 +923,7 @@ static void udp_v6_early_demux(struct sk_buff *skb) struct sock *sk; struct dst_entry *dst; int dif = skb->dev->ifindex; + int sdif = inet6_sdif(skb); if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) @@ -934,7 +935,7 @@ static void udp_v6_early_demux(struct sk_buff *skb) sk = __udp6_lib_demux_lookup(net, uh->dest, &ipv6_hdr(skb)->daddr, uh->source, &ipv6_hdr(skb)->saddr, - dif); + dif, sdif); else return; diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 94fb0fd0c667..ade4c10c28c6 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -195,7 +195,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, thoff + __tcp_hdrlen(tcph), saddr, sport, daddr, ntohs(dport), - in->ifindex); + in->ifindex, 0); if (sk && !refcount_inc_not_zero(&sk->sk_refcnt)) sk = NULL; @@ -208,7 +208,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp, case NFT_LOOKUP_ESTABLISHED: sk = __inet6_lookup_established(net, &tcp_hashinfo, saddr, sport, daddr, ntohs(dport), - in->ifindex); + in->ifindex, 0); break; default: BUG(); -- cgit v1.2.3-55-g7522 From 5108ab4bf446fa9ad2c71f5fc1d839067b72636f Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 08:44:22 -0700 Subject: net: ipv6: add second dif to raw socket lookups Add a second device index, sdif, to raw socket lookups. sdif is the index for ingress devices enslaved to an l3mdev. It allows the lookups to consider the enslaved device as well as the L3 domain when searching for a socket. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- include/net/rawv6.h | 2 +- net/ipv4/raw_diag.c | 2 +- net/ipv6/raw.c | 13 ++++++++----- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/include/net/rawv6.h b/include/net/rawv6.h index cbe4e9de1894..4addc5c988e0 100644 --- a/include/net/rawv6.h +++ b/include/net/rawv6.h @@ -6,7 +6,7 @@ extern struct raw_hashinfo raw_v6_hashinfo; struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, unsigned short num, const struct in6_addr *loc_addr, - const struct in6_addr *rmt_addr, int dif); + const struct in6_addr *rmt_addr, int dif, int sdif); int raw_abort(struct sock *sk, int err); diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c index c600d3c71d4d..c200065ef9a5 100644 --- a/net/ipv4/raw_diag.c +++ b/net/ipv4/raw_diag.c @@ -52,7 +52,7 @@ static struct sock *raw_lookup(struct net *net, struct sock *from, sk = __raw_v6_lookup(net, from, r->sdiag_raw_protocol, (const struct in6_addr *)r->id.idiag_src, (const struct in6_addr *)r->id.idiag_dst, - r->id.idiag_if); + r->id.idiag_if, 0); #endif return sk; } diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 60be012fe708..e4462b0ff801 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(raw_v6_hashinfo); struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, unsigned short num, const struct in6_addr *loc_addr, - const struct in6_addr *rmt_addr, int dif) + const struct in6_addr *rmt_addr, int dif, int sdif) { bool is_multicast = ipv6_addr_is_multicast(loc_addr); @@ -86,7 +86,9 @@ struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) continue; - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) + if (sk->sk_bound_dev_if && + sk->sk_bound_dev_if != dif && + sk->sk_bound_dev_if != sdif) continue; if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { @@ -178,7 +180,8 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) goto out; net = dev_net(skb->dev); - sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, inet6_iif(skb)); + sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, + inet6_iif(skb), inet6_sdif(skb)); while (sk) { int filtered; @@ -222,7 +225,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) } } sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr, - inet6_iif(skb)); + inet6_iif(skb), inet6_sdif(skb)); } out: read_unlock(&raw_v6_hashinfo.lock); @@ -378,7 +381,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr, net = dev_net(skb->dev); while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, - inet6_iif(skb)))) { + inet6_iif(skb), inet6_iif(skb)))) { rawv6_err(sk, skb, NULL, type, code, inner_offset, info); sk = sk_next(sk); -- cgit v1.2.3-55-g7522 From e1cea2e7396820730ca98d53e986bd5241ef14cc Mon Sep 17 00:00:00 2001 From: John Allen Date: Mon, 7 Aug 2017 15:42:30 -0500 Subject: ibmvnic: Report rx buffer return codes as netdev_dbg Reporting any return code for a receive buffer as an "rx error" only produces alarming noise and the only values that have been observed to be used in this field are not error conditions. Change this to a netdev_dbg with a more descriptive message. Signed-off-by: John Allen Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5932160eb815..99576ba4187f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1579,7 +1579,8 @@ restart_poll: rx_comp.correlator); /* do error checking */ if (next->rx_comp.rc) { - netdev_err(netdev, "rx error %x\n", next->rx_comp.rc); + netdev_dbg(netdev, "rx buffer returned with rc %x\n", + be16_to_cpu(next->rx_comp.rc)); /* free the entry */ next->rx_comp.first = 0; remove_buff_from_pool(adapter, rx_buff); -- cgit v1.2.3-55-g7522 From d226a2b84d0528da7e35e7e19e052293889cdd21 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sat, 5 Aug 2017 00:43:43 +0300 Subject: of_mdio: use of_property_read_u32_array() The "fixed-link" prop support predated of_property_read_u32_array(), so basically had to open-code it. Using the modern API saves 24 bytes of the object code (ARM gcc 4.8.5); the only behavior change would be that the prop length check is now less strict (however the strict pre-check done in of_phy_is_fixed_link() is left intact anyway)... Signed-off-by: Sergei Shtylyov Reviewed-by: Andrew Lunn Reviewed-by: Rob Herring Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index a0d27c04e22f..94ca3470e943 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -421,10 +421,10 @@ int of_phy_register_fixed_link(struct device_node *np) { struct fixed_phy_status status = {}; struct device_node *fixed_link_node; - const __be32 *fixed_link_prop; + u32 fixed_link_prop[5]; struct phy_device *phy; const char *managed; - int link_gpio, len; + int link_gpio; if (of_property_read_string(np, "managed", &managed) == 0) { if (strcmp(managed, "in-band-status") == 0) { @@ -459,13 +459,13 @@ int of_phy_register_fixed_link(struct device_node *np) } /* Old binding */ - fixed_link_prop = of_get_property(np, "fixed-link", &len); - if (fixed_link_prop && len == (5 * sizeof(__be32))) { + if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop, + ARRAY_SIZE(fixed_link_prop)) == 0) { status.link = 1; - status.duplex = be32_to_cpu(fixed_link_prop[1]); - status.speed = be32_to_cpu(fixed_link_prop[2]); - status.pause = be32_to_cpu(fixed_link_prop[3]); - status.asym_pause = be32_to_cpu(fixed_link_prop[4]); + status.duplex = fixed_link_prop[1]; + status.speed = fixed_link_prop[2]; + status.pause = fixed_link_prop[3]; + status.asym_pause = fixed_link_prop[4]; phy = fixed_phy_register(PHY_POLL, &status, -1, np); return PTR_ERR_OR_ZERO(phy); } -- cgit v1.2.3-55-g7522 From cf5f5cea270655dd49370760576c64b228583b79 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 4 Aug 2017 16:00:09 -0700 Subject: bpf: add support for sys_enter_* and sys_exit_* tracepoints Currently, bpf programs cannot be attached to sys_enter_* and sys_exit_* style tracepoints. The iovisor/bcc issue #748 (https://github.com/iovisor/bcc/issues/748) documents this issue. For example, if you try to attach a bpf program to tracepoints syscalls/sys_enter_newfstat, you will get the following error: # ./tools/trace.py t:syscalls:sys_enter_newfstat Ioctl(PERF_EVENT_IOC_SET_BPF): Invalid argument Failed to attach BPF to tracepoint The main reason is that syscalls/sys_enter_* and syscalls/sys_exit_* tracepoints are treated differently from other tracepoints and there is no bpf hook to it. This patch adds bpf support for these syscalls tracepoints by . permitting bpf attachment in ioctl PERF_EVENT_IOC_SET_BPF . calling bpf programs in perf_syscall_enter and perf_syscall_exit The legality of bpf program ctx access is also checked. Function trace_event_get_offsets returns correct max offset for each specific syscall tracepoint, which is compared against the maximum offset access in bpf program. Signed-off-by: Yonghong Song Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/syscalls.h | 12 ++++++++++ kernel/events/core.c | 10 ++++---- kernel/trace/trace_syscalls.c | 53 +++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 69 insertions(+), 6 deletions(-) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 3cb15ea48aee..c9170218e9e6 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -172,8 +172,20 @@ extern struct trace_event_functions exit_syscall_print_funcs; static struct syscall_metadata __used \ __attribute__((section("__syscalls_metadata"))) \ *__p_syscall_meta_##sname = &__syscall_meta_##sname; + +static inline int is_syscall_trace_event(struct trace_event_call *tp_event) +{ + return tp_event->class == &event_class_syscall_enter || + tp_event->class == &event_class_syscall_exit; +} + #else #define SYSCALL_METADATA(sname, nb, ...) + +static inline int is_syscall_trace_event(struct trace_event_call *tp_event) +{ + return 0; +} #endif #define SYSCALL_DEFINE0(sname) \ diff --git a/kernel/events/core.c b/kernel/events/core.c index 426c2ffba16d..a7a6c1d19a49 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -8050,7 +8050,7 @@ static void perf_event_free_bpf_handler(struct perf_event *event) static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) { - bool is_kprobe, is_tracepoint; + bool is_kprobe, is_tracepoint, is_syscall_tp; struct bpf_prog *prog; if (event->attr.type != PERF_TYPE_TRACEPOINT) @@ -8061,7 +8061,8 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_UKPROBE; is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; - if (!is_kprobe && !is_tracepoint) + is_syscall_tp = is_syscall_trace_event(event->tp_event); + if (!is_kprobe && !is_tracepoint && !is_syscall_tp) /* bpf programs can only be attached to u/kprobe or tracepoint */ return -EINVAL; @@ -8070,13 +8071,14 @@ static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd) return PTR_ERR(prog); if ((is_kprobe && prog->type != BPF_PROG_TYPE_KPROBE) || - (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT)) { + (is_tracepoint && prog->type != BPF_PROG_TYPE_TRACEPOINT) || + (is_syscall_tp && prog->type != BPF_PROG_TYPE_TRACEPOINT)) { /* valid fd, but invalid bpf program type */ bpf_prog_put(prog); return -EINVAL; } - if (is_tracepoint) { + if (is_tracepoint || is_syscall_tp) { int off = trace_event_get_offsets(event->tp_event); if (prog->aux->max_ctx_offset > off) { diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 5e10395da88e..7a1a92036563 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -559,11 +559,29 @@ static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); static int sys_perf_refcount_enter; static int sys_perf_refcount_exit; +static int perf_call_bpf_enter(struct bpf_prog *prog, struct pt_regs *regs, + struct syscall_metadata *sys_data, + struct syscall_trace_enter *rec) { + struct syscall_tp_t { + unsigned long long regs; + unsigned long syscall_nr; + unsigned long args[sys_data->nb_args]; + } param; + int i; + + *(struct pt_regs **)¶m = regs; + param.syscall_nr = rec->nr; + for (i = 0; i < sys_data->nb_args; i++) + param.args[i] = rec->args[i]; + return trace_call_bpf(prog, ¶m); +} + static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) { struct syscall_metadata *sys_data; struct syscall_trace_enter *rec; struct hlist_head *head; + struct bpf_prog *prog; int syscall_nr; int rctx; int size; @@ -578,8 +596,9 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) if (!sys_data) return; + prog = READ_ONCE(sys_data->enter_event->prog); head = this_cpu_ptr(sys_data->enter_event->perf_events); - if (hlist_empty(head)) + if (!prog && hlist_empty(head)) return; /* get the size after alignment with the u32 buffer size field */ @@ -594,6 +613,13 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) rec->nr = syscall_nr; syscall_get_arguments(current, regs, 0, sys_data->nb_args, (unsigned long *)&rec->args); + + if ((prog && !perf_call_bpf_enter(prog, regs, sys_data, rec)) || + hlist_empty(head)) { + perf_swevent_put_recursion_context(rctx); + return; + } + perf_trace_buf_submit(rec, size, rctx, sys_data->enter_event->event.type, 1, regs, head, NULL); @@ -633,11 +659,26 @@ static void perf_sysenter_disable(struct trace_event_call *call) mutex_unlock(&syscall_trace_lock); } +static int perf_call_bpf_exit(struct bpf_prog *prog, struct pt_regs *regs, + struct syscall_trace_exit *rec) { + struct syscall_tp_t { + unsigned long long regs; + unsigned long syscall_nr; + unsigned long ret; + } param; + + *(struct pt_regs **)¶m = regs; + param.syscall_nr = rec->nr; + param.ret = rec->ret; + return trace_call_bpf(prog, ¶m); +} + static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) { struct syscall_metadata *sys_data; struct syscall_trace_exit *rec; struct hlist_head *head; + struct bpf_prog *prog; int syscall_nr; int rctx; int size; @@ -652,8 +693,9 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) if (!sys_data) return; + prog = READ_ONCE(sys_data->exit_event->prog); head = this_cpu_ptr(sys_data->exit_event->perf_events); - if (hlist_empty(head)) + if (!prog && hlist_empty(head)) return; /* We can probably do that at build time */ @@ -666,6 +708,13 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) rec->nr = syscall_nr; rec->ret = syscall_get_return_value(current, regs); + + if ((prog && !perf_call_bpf_exit(prog, regs, rec)) || + hlist_empty(head)) { + perf_swevent_put_recursion_context(rctx); + return; + } + perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type, 1, regs, head, NULL); } -- cgit v1.2.3-55-g7522 From 1da236b6be9632255ab034f22aca5b78d7c3c007 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 4 Aug 2017 16:00:10 -0700 Subject: bpf: add a test case for syscalls/sys_{enter|exit}_* tracepoints Signed-off-by: Yonghong Song Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- samples/bpf/Makefile | 4 +++ samples/bpf/syscall_tp_kern.c | 62 +++++++++++++++++++++++++++++++++++++ samples/bpf/syscall_tp_user.c | 71 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 137 insertions(+) create mode 100644 samples/bpf/syscall_tp_kern.c create mode 100644 samples/bpf/syscall_tp_user.c diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 770d46cdf9f4..f1010fe759fe 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -39,6 +39,7 @@ hostprogs-y += per_socket_stats_example hostprogs-y += load_sock_ops hostprogs-y += xdp_redirect hostprogs-y += xdp_redirect_map +hostprogs-y += syscall_tp # Libbpf dependencies LIBBPF := ../../tools/lib/bpf/bpf.o @@ -82,6 +83,7 @@ test_map_in_map-objs := bpf_load.o $(LIBBPF) test_map_in_map_user.o per_socket_stats_example-objs := $(LIBBPF) cookie_uid_helper_example.o xdp_redirect-objs := bpf_load.o $(LIBBPF) xdp_redirect_user.o xdp_redirect_map-objs := bpf_load.o $(LIBBPF) xdp_redirect_map_user.o +syscall_tp-objs := bpf_load.o $(LIBBPF) syscall_tp_user.o # Tell kbuild to always build the programs always := $(hostprogs-y) @@ -125,6 +127,7 @@ always += tcp_iw_kern.o always += tcp_clamp_kern.o always += xdp_redirect_kern.o always += xdp_redirect_map_kern.o +always += syscall_tp_kern.o HOSTCFLAGS += -I$(objtree)/usr/include HOSTCFLAGS += -I$(srctree)/tools/lib/ @@ -163,6 +166,7 @@ HOSTLOADLIBES_xdp_tx_iptunnel += -lelf HOSTLOADLIBES_test_map_in_map += -lelf HOSTLOADLIBES_xdp_redirect += -lelf HOSTLOADLIBES_xdp_redirect_map += -lelf +HOSTLOADLIBES_syscall_tp += -lelf # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c new file mode 100644 index 000000000000..9149c524d279 --- /dev/null +++ b/samples/bpf/syscall_tp_kern.c @@ -0,0 +1,62 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include "bpf_helpers.h" + +struct syscalls_enter_open_args { + unsigned long long unused; + long syscall_nr; + long filename_ptr; + long flags; + long mode; +}; + +struct syscalls_exit_open_args { + unsigned long long unused; + long syscall_nr; + long ret; +}; + +struct bpf_map_def SEC("maps") enter_open_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(u32), + .max_entries = 1, +}; + +struct bpf_map_def SEC("maps") exit_open_map = { + .type = BPF_MAP_TYPE_ARRAY, + .key_size = sizeof(u32), + .value_size = sizeof(u32), + .max_entries = 1, +}; + +static __always_inline void count(void *map) +{ + u32 key = 0; + u32 *value, init_val = 1; + + value = bpf_map_lookup_elem(map, &key); + if (value) + *value += 1; + else + bpf_map_update_elem(map, &key, &init_val, BPF_NOEXIST); +} + +SEC("tracepoint/syscalls/sys_enter_open") +int trace_enter_open(struct syscalls_enter_open_args *ctx) +{ + count((void *)&enter_open_map); + return 0; +} + +SEC("tracepoint/syscalls/sys_exit_open") +int trace_enter_exit(struct syscalls_exit_open_args *ctx) +{ + count((void *)&exit_open_map); + return 0; +} diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c new file mode 100644 index 000000000000..a3cb91ebf4e7 --- /dev/null +++ b/samples/bpf/syscall_tp_user.c @@ -0,0 +1,71 @@ +/* Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "libbpf.h" +#include "bpf_load.h" + +/* This program verifies bpf attachment to tracepoint sys_enter_* and sys_exit_*. + * This requires kernel CONFIG_FTRACE_SYSCALLS to be set. + */ + +static void verify_map(int map_id) +{ + __u32 key = 0; + __u32 val; + + if (bpf_map_lookup_elem(map_id, &key, &val) != 0) { + fprintf(stderr, "map_lookup failed: %s\n", strerror(errno)); + return; + } + if (val == 0) + fprintf(stderr, "failed: map #%d returns value 0\n", map_id); +} + +int main(int argc, char **argv) +{ + struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; + char filename[256]; + int fd; + + setrlimit(RLIMIT_MEMLOCK, &r); + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + if (load_bpf_file(filename)) { + fprintf(stderr, "%s", bpf_log_buf); + return 1; + } + + /* current load_bpf_file has perf_event_open default pid = -1 + * and cpu = 0, which permits attached bpf execution on + * all cpus for all pid's. bpf program execution ignores + * cpu affinity. + */ + /* trigger some "open" operations */ + fd = open(filename, O_RDONLY); + if (fd < 0) { + fprintf(stderr, "open failed: %s\n", strerror(errno)); + return 1; + } + close(fd); + + /* verify the map */ + verify_map(map_fd[0]); + verify_map(map_fd[1]); + + return 0; +} -- cgit v1.2.3-55-g7522 From 08bd10ffb44d11000c6b2781e8d9066567e50e27 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Fri, 4 Aug 2017 18:19:18 -0700 Subject: lwtunnel: replace EXPORT_SYMBOL with EXPORT_SYMBOL_GPL Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller --- net/core/lwtunnel.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index d9cb3532f1dd..435f35f9a61c 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -65,7 +65,7 @@ struct lwtunnel_state *lwtunnel_state_alloc(int encap_len) return lws; } -EXPORT_SYMBOL(lwtunnel_state_alloc); +EXPORT_SYMBOL_GPL(lwtunnel_state_alloc); static const struct lwtunnel_encap_ops __rcu * lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly; @@ -80,7 +80,7 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops, &lwtun_encaps[num], NULL, ops) ? 0 : -1; } -EXPORT_SYMBOL(lwtunnel_encap_add_ops); +EXPORT_SYMBOL_GPL(lwtunnel_encap_add_ops); int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops, unsigned int encap_type) @@ -99,7 +99,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops, return ret; } -EXPORT_SYMBOL(lwtunnel_encap_del_ops); +EXPORT_SYMBOL_GPL(lwtunnel_encap_del_ops); int lwtunnel_build_state(u16 encap_type, struct nlattr *encap, unsigned int family, @@ -138,7 +138,7 @@ int lwtunnel_build_state(u16 encap_type, return ret; } -EXPORT_SYMBOL(lwtunnel_build_state); +EXPORT_SYMBOL_GPL(lwtunnel_build_state); int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) { @@ -175,7 +175,7 @@ int lwtunnel_valid_encap_type(u16 encap_type, struct netlink_ext_ack *extack) return ret; } -EXPORT_SYMBOL(lwtunnel_valid_encap_type); +EXPORT_SYMBOL_GPL(lwtunnel_valid_encap_type); int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, struct netlink_ext_ack *extack) @@ -205,7 +205,7 @@ int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining, return 0; } -EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr); +EXPORT_SYMBOL_GPL(lwtunnel_valid_encap_type_attr); void lwtstate_free(struct lwtunnel_state *lws) { @@ -219,7 +219,7 @@ void lwtstate_free(struct lwtunnel_state *lws) } module_put(ops->owner); } -EXPORT_SYMBOL(lwtstate_free); +EXPORT_SYMBOL_GPL(lwtstate_free); int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate) { @@ -259,7 +259,7 @@ nla_put_failure: return (ret == -EOPNOTSUPP ? 0 : ret); } -EXPORT_SYMBOL(lwtunnel_fill_encap); +EXPORT_SYMBOL_GPL(lwtunnel_fill_encap); int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate) { @@ -281,7 +281,7 @@ int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate) return ret; } -EXPORT_SYMBOL(lwtunnel_get_encap_size); +EXPORT_SYMBOL_GPL(lwtunnel_get_encap_size); int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) { @@ -309,7 +309,7 @@ int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b) return ret; } -EXPORT_SYMBOL(lwtunnel_cmp_encap); +EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap); int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) { @@ -343,7 +343,7 @@ drop: return ret; } -EXPORT_SYMBOL(lwtunnel_output); +EXPORT_SYMBOL_GPL(lwtunnel_output); int lwtunnel_xmit(struct sk_buff *skb) { @@ -378,7 +378,7 @@ drop: return ret; } -EXPORT_SYMBOL(lwtunnel_xmit); +EXPORT_SYMBOL_GPL(lwtunnel_xmit); int lwtunnel_input(struct sk_buff *skb) { @@ -412,4 +412,4 @@ drop: return ret; } -EXPORT_SYMBOL(lwtunnel_input); +EXPORT_SYMBOL_GPL(lwtunnel_input); -- cgit v1.2.3-55-g7522 From 54df2cf819a23dba4bb2c4134ed62659a7d324f5 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Fri, 4 Aug 2017 21:31:42 -0700 Subject: net_sched: refactor notification code for RTM_DELTFILTER It is confusing to use 'unsigned long fh' as both a handle and a pointer, especially commit 9ee7837449b3 ("net sched filters: fix notification of filter delete with proper handle"). This patch introduces tfilter_del_notify() so that we can pass it as a pointer as before, and we don't need to check RTM_DELTFILTER in tcf_fill_node() any more. This prepares for the next patch. Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/cls_api.c | 44 +++++++++++++++++++++++++++++++++++++++----- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index e655221c654e..afd099727aea 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -104,6 +104,10 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, unsigned long fh, int event, bool unicast); +static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, struct tcf_proto *tp, + unsigned long fh, bool unicast, bool *last); + static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_chain *chain, int event) @@ -595,11 +599,10 @@ replay: } break; case RTM_DELTFILTER: - err = tp->ops->delete(tp, fh, &last); + err = tfilter_del_notify(net, skb, n, tp, fh, false, + &last); if (err) goto errout; - tfilter_notify(net, skb, n, tp, t->tcm_handle, - RTM_DELTFILTER, false); if (last) { tcf_chain_tp_remove(chain, &chain_info, tp); tcf_proto_destroy(tp); @@ -659,9 +662,9 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) goto nla_put_failure; - tcm->tcm_handle = fh; - if (RTM_DELTFILTER != event) { + if (!fh) { tcm->tcm_handle = 0; + } else { if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) goto nla_put_failure; } @@ -698,6 +701,37 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, n->nlmsg_flags & NLM_F_ECHO); } +static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, struct tcf_proto *tp, + unsigned long fh, bool unicast, bool *last) +{ + struct sk_buff *skb; + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; + int err; + + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, + n->nlmsg_flags, RTM_DELTFILTER) <= 0) { + kfree_skb(skb); + return -EINVAL; + } + + err = tp->ops->delete(tp, fh, last); + if (err) { + kfree_skb(skb); + return err; + } + + if (unicast) + return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); + + return rtnetlink_send(skb, net, portid, RTNLGRP_TC, + n->nlmsg_flags & NLM_F_ECHO); +} + struct tcf_dump_args { struct tcf_walker w; struct sk_buff *skb; -- cgit v1.2.3-55-g7522 From 8113c095672f6504b23eba6edf4a57b5f7f744af Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Fri, 4 Aug 2017 21:31:43 -0700 Subject: net_sched: use void pointer for filter handle Now we use 'unsigned long fh' as a pointer in every place, it is safe to convert it to a void pointer now. This gets rid of many casts to pointer. Cc: Jamal Hadi Salim Cc: Jiri Pirko Signed-off-by: Cong Wang Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 +- include/net/sch_generic.h | 8 ++++---- net/sched/cls_api.c | 17 ++++++++--------- net/sched/cls_basic.c | 22 ++++++++++------------ net/sched/cls_bpf.c | 27 ++++++++++++--------------- net/sched/cls_cgroup.c | 12 ++++++------ net/sched/cls_flow.c | 24 ++++++++++++------------ net/sched/cls_flower.c | 22 +++++++++++----------- net/sched/cls_fw.c | 26 +++++++++++++------------- net/sched/cls_matchall.c | 16 ++++++++-------- net/sched/cls_route.c | 26 +++++++++++++------------- net/sched/cls_rsvp.h | 24 ++++++++++++------------ net/sched/cls_tcindex.c | 36 ++++++++++++++++-------------------- net/sched/cls_u32.c | 30 +++++++++++++++--------------- 14 files changed, 141 insertions(+), 151 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 572083af02ac..0f78e6560b2d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -11,7 +11,7 @@ struct tcf_walker { int stop; int skip; int count; - int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *); + int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *); }; int register_tcf_proto_ops(struct tcf_proto_ops *ops); diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 1c123e2b2415..e79f5ad1c5f3 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -213,16 +213,16 @@ struct tcf_proto_ops { int (*init)(struct tcf_proto*); void (*destroy)(struct tcf_proto*); - unsigned long (*get)(struct tcf_proto*, u32 handle); + void* (*get)(struct tcf_proto*, u32 handle); int (*change)(struct net *net, struct sk_buff *, struct tcf_proto*, unsigned long, u32 handle, struct nlattr **, - unsigned long *, bool); - int (*delete)(struct tcf_proto*, unsigned long, bool*); + void **, bool); + int (*delete)(struct tcf_proto*, void *, bool*); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); /* rtnetlink specific */ - int (*dump)(struct net*, struct tcf_proto*, unsigned long, + int (*dump)(struct net*, struct tcf_proto*, void *, struct sk_buff *skb, struct tcmsg*); struct module *owner; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index afd099727aea..668afb6e9885 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -102,11 +102,11 @@ EXPORT_SYMBOL(unregister_tcf_proto_ops); static int tfilter_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, - unsigned long fh, int event, bool unicast); + void *fh, int event, bool unicast); static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, - unsigned long fh, bool unicast, bool *last); + void *fh, bool unicast, bool *last); static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, @@ -432,7 +432,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, struct tcf_proto *tp; const struct Qdisc_class_ops *cops; unsigned long cl; - unsigned long fh; + void *fh; int err; int tp_created; @@ -571,7 +571,7 @@ replay: fh = tp->ops->get(tp, t->tcm_handle); - if (fh == 0) { + if (!fh) { if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { tcf_chain_tp_remove(chain, &chain_info, tp); tfilter_notify(net, skb, n, tp, fh, @@ -641,7 +641,7 @@ errout: } static int tcf_fill_node(struct net *net, struct sk_buff *skb, - struct tcf_proto *tp, unsigned long fh, u32 portid, + struct tcf_proto *tp, void *fh, u32 portid, u32 seq, u16 flags, int event) { struct tcmsg *tcm; @@ -679,7 +679,7 @@ nla_put_failure: static int tfilter_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, - unsigned long fh, int event, bool unicast) + void *fh, int event, bool unicast) { struct sk_buff *skb; u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; @@ -703,7 +703,7 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, struct nlmsghdr *n, struct tcf_proto *tp, - unsigned long fh, bool unicast, bool *last) + void *fh, bool unicast, bool *last) { struct sk_buff *skb; u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; @@ -738,8 +738,7 @@ struct tcf_dump_args { struct netlink_callback *cb; }; -static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, - struct tcf_walker *arg) +static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg) { struct tcf_dump_args *a = (void *)arg; struct net *net = sock_net(a->skb->sk); diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index 7c7a82138f76..73cc7f167a38 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -56,20 +56,18 @@ static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, return -1; } -static unsigned long basic_get(struct tcf_proto *tp, u32 handle) +static void *basic_get(struct tcf_proto *tp, u32 handle) { - unsigned long l = 0UL; struct basic_head *head = rtnl_dereference(tp->root); struct basic_filter *f; list_for_each_entry(f, &head->flist, link) { if (f->handle == handle) { - l = (unsigned long) f; - break; + return f; } } - return l; + return NULL; } static int basic_init(struct tcf_proto *tp) @@ -106,10 +104,10 @@ static void basic_destroy(struct tcf_proto *tp) kfree_rcu(head, rcu); } -static int basic_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int basic_delete(struct tcf_proto *tp, void *arg, bool *last) { struct basic_head *head = rtnl_dereference(tp->root); - struct basic_filter *f = (struct basic_filter *) arg; + struct basic_filter *f = arg; list_del_rcu(&f->link); tcf_unbind_filter(tp, &f->res); @@ -149,7 +147,7 @@ static int basic_set_parms(struct net *net, struct tcf_proto *tp, static int basic_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, - struct nlattr **tca, unsigned long *arg, bool ovr) + struct nlattr **tca, void **arg, bool ovr) { int err; struct basic_head *head = rtnl_dereference(tp->root); @@ -202,7 +200,7 @@ static int basic_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto errout; - *arg = (unsigned long)fnew; + *arg = fnew; if (fold) { list_replace_rcu(&fold->link, &fnew->link); @@ -228,7 +226,7 @@ static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg) if (arg->count < arg->skip) goto skip; - if (arg->fn(tp, (unsigned long) f, arg) < 0) { + if (arg->fn(tp, f, arg) < 0) { arg->stop = 1; break; } @@ -237,10 +235,10 @@ skip: } } -static int basic_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { - struct basic_filter *f = (struct basic_filter *) fh; + struct basic_filter *f = fh; struct nlattr *nest; if (f == NULL) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 2d4d06e41cd9..db17b68df94e 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -270,11 +270,11 @@ static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu); } -static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last) { struct cls_bpf_head *head = rtnl_dereference(tp->root); - __cls_bpf_delete(tp, (struct cls_bpf_prog *) arg); + __cls_bpf_delete(tp, arg); *last = list_empty(&head->plist); return 0; } @@ -290,20 +290,17 @@ static void cls_bpf_destroy(struct tcf_proto *tp) kfree_rcu(head, rcu); } -static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) +static void *cls_bpf_get(struct tcf_proto *tp, u32 handle) { struct cls_bpf_head *head = rtnl_dereference(tp->root); struct cls_bpf_prog *prog; - unsigned long ret = 0UL; list_for_each_entry(prog, &head->plist, link) { - if (prog->handle == handle) { - ret = (unsigned long) prog; - break; - } + if (prog->handle == handle) + return prog; } - return ret; + return NULL; } static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog) @@ -448,10 +445,10 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp, static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, - unsigned long *arg, bool ovr) + void **arg, bool ovr) { struct cls_bpf_head *head = rtnl_dereference(tp->root); - struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg; + struct cls_bpf_prog *oldprog = *arg; struct nlattr *tb[TCA_BPF_MAX + 1]; struct cls_bpf_prog *prog; int ret; @@ -509,7 +506,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, list_add_rcu(&prog->link, &head->plist); } - *arg = (unsigned long) prog; + *arg = prog; return 0; errout: @@ -557,10 +554,10 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog, return 0; } -static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *tm) { - struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh; + struct cls_bpf_prog *prog = fh; struct nlattr *nest; u32 bpf_flags = 0; int ret; @@ -618,7 +615,7 @@ static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) list_for_each_entry(prog, &head->plist, link) { if (arg->count < arg->skip) goto skip; - if (arg->fn(tp, (unsigned long) prog, arg) < 0) { + if (arg->fn(tp, prog, arg) < 0) { arg->stop = 1; break; } diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index df7a582775df..d48452f87975 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -43,9 +43,9 @@ static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, return tcf_exts_exec(skb, &head->exts, res); } -static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle) +static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle) { - return 0UL; + return NULL; } static int cls_cgroup_init(struct tcf_proto *tp) @@ -71,7 +71,7 @@ static void cls_cgroup_destroy_rcu(struct rcu_head *root) static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, - unsigned long *arg, bool ovr) + void **arg, bool ovr) { struct nlattr *tb[TCA_CGROUP_MAX + 1]; struct cls_cgroup_head *head = rtnl_dereference(tp->root); @@ -128,7 +128,7 @@ static void cls_cgroup_destroy(struct tcf_proto *tp) call_rcu(&head->rcu, cls_cgroup_destroy_rcu); } -static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last) { return -EOPNOTSUPP; } @@ -140,7 +140,7 @@ static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg) if (arg->count < arg->skip) goto skip; - if (arg->fn(tp, (unsigned long) head, arg) < 0) { + if (arg->fn(tp, head, arg) < 0) { arg->stop = 1; return; } @@ -148,7 +148,7 @@ skip: arg->count++; } -static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { struct cls_cgroup_head *head = rtnl_dereference(tp->root); diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 55e281b20140..2a3a60ec5b86 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -382,7 +382,7 @@ static void flow_destroy_filter(struct rcu_head *head) static int flow_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, - unsigned long *arg, bool ovr) + void **arg, bool ovr) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *fold, *fnew; @@ -439,7 +439,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto err2; - fold = (struct flow_filter *)*arg; + fold = *arg; if (fold) { err = -EINVAL; if (fold->handle != handle && handle) @@ -532,12 +532,12 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (perturb_period) mod_timer(&fnew->perturb_timer, jiffies + perturb_period); - if (*arg == 0) + if (!*arg) list_add_tail_rcu(&fnew->list, &head->filters); else list_replace_rcu(&fold->list, &fnew->list); - *arg = (unsigned long)fnew; + *arg = fnew; if (fold) call_rcu(&fold->rcu, flow_destroy_filter); @@ -551,10 +551,10 @@ err1: return err; } -static int flow_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int flow_delete(struct tcf_proto *tp, void *arg, bool *last) { struct flow_head *head = rtnl_dereference(tp->root); - struct flow_filter *f = (struct flow_filter *)arg; + struct flow_filter *f = arg; list_del_rcu(&f->list); call_rcu(&f->rcu, flow_destroy_filter); @@ -586,21 +586,21 @@ static void flow_destroy(struct tcf_proto *tp) kfree_rcu(head, rcu); } -static unsigned long flow_get(struct tcf_proto *tp, u32 handle) +static void *flow_get(struct tcf_proto *tp, u32 handle) { struct flow_head *head = rtnl_dereference(tp->root); struct flow_filter *f; list_for_each_entry(f, &head->filters, list) if (f->handle == handle) - return (unsigned long)f; - return 0; + return f; + return NULL; } -static int flow_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { - struct flow_filter *f = (struct flow_filter *)fh; + struct flow_filter *f = fh; struct nlattr *nest; if (f == NULL) @@ -666,7 +666,7 @@ static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg) list_for_each_entry(f, &head->filters, list) { if (arg->count < arg->skip) goto skip; - if (arg->fn(tp, (unsigned long)f, arg) < 0) { + if (arg->fn(tp, f, arg) < 0) { arg->stop = 1; break; } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 1474bacf4df4..d2551a03c542 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -332,15 +332,15 @@ static void fl_destroy(struct tcf_proto *tp) call_rcu(&head->rcu, fl_destroy_rcu); } -static unsigned long fl_get(struct tcf_proto *tp, u32 handle) +static void *fl_get(struct tcf_proto *tp, u32 handle) { struct cls_fl_head *head = rtnl_dereference(tp->root); struct cls_fl_filter *f; list_for_each_entry(f, &head->filters, list) if (f->handle == handle) - return (unsigned long) f; - return 0; + return f; + return NULL; } static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { @@ -883,10 +883,10 @@ static u32 fl_grab_new_handle(struct tcf_proto *tp, static int fl_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, - unsigned long *arg, bool ovr) + void **arg, bool ovr) { struct cls_fl_head *head = rtnl_dereference(tp->root); - struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg; + struct cls_fl_filter *fold = *arg; struct cls_fl_filter *fnew; struct nlattr **tb; struct fl_flow_mask mask = {}; @@ -977,7 +977,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, fl_hw_destroy_filter(tp, fold); } - *arg = (unsigned long) fnew; + *arg = fnew; if (fold) { list_replace_rcu(&fold->list, &fnew->list); @@ -998,10 +998,10 @@ errout_tb: return err; } -static int fl_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int fl_delete(struct tcf_proto *tp, void *arg, bool *last) { struct cls_fl_head *head = rtnl_dereference(tp->root); - struct cls_fl_filter *f = (struct cls_fl_filter *) arg; + struct cls_fl_filter *f = arg; if (!tc_skip_sw(f->flags)) rhashtable_remove_fast(&head->ht, &f->ht_node, @@ -1019,7 +1019,7 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg) list_for_each_entry_rcu(f, &head->filters, list) { if (arg->count < arg->skip) goto skip; - if (arg->fn(tp, (unsigned long) f, arg) < 0) { + if (arg->fn(tp, f, arg) < 0) { arg->stop = 1; break; } @@ -1154,11 +1154,11 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); } -static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { struct cls_fl_head *head = rtnl_dereference(tp->root); - struct cls_fl_filter *f = (struct cls_fl_filter *) fh; + struct cls_fl_filter *f = fh; struct nlattr *nest; struct fl_flow_key *key, *mask; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 11f178f1b2be..192255ec50bd 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -95,20 +95,20 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, return -1; } -static unsigned long fw_get(struct tcf_proto *tp, u32 handle) +static void *fw_get(struct tcf_proto *tp, u32 handle) { struct fw_head *head = rtnl_dereference(tp->root); struct fw_filter *f; if (head == NULL) - return 0; + return NULL; f = rtnl_dereference(head->ht[fw_hash(handle)]); for (; f; f = rtnl_dereference(f->next)) { if (f->id == handle) - return (unsigned long)f; + return f; } - return 0; + return NULL; } static int fw_init(struct tcf_proto *tp) @@ -147,10 +147,10 @@ static void fw_destroy(struct tcf_proto *tp) kfree_rcu(head, rcu); } -static int fw_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int fw_delete(struct tcf_proto *tp, void *arg, bool *last) { struct fw_head *head = rtnl_dereference(tp->root); - struct fw_filter *f = (struct fw_filter *)arg; + struct fw_filter *f = arg; struct fw_filter __rcu **fp; struct fw_filter *pfp; int ret = -EINVAL; @@ -230,11 +230,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, static int fw_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, - u32 handle, struct nlattr **tca, unsigned long *arg, + u32 handle, struct nlattr **tca, void **arg, bool ovr) { struct fw_head *head = rtnl_dereference(tp->root); - struct fw_filter *f = (struct fw_filter *) *arg; + struct fw_filter *f = *arg; struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_FW_MAX + 1]; int err; @@ -288,7 +288,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, tcf_unbind_filter(tp, &f->res); call_rcu(&f->rcu, fw_delete_filter); - *arg = (unsigned long)fnew; + *arg = fnew; return err; } @@ -325,7 +325,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]); rcu_assign_pointer(head->ht[fw_hash(handle)], f); - *arg = (unsigned long)f; + *arg = f; return 0; errout: @@ -354,7 +354,7 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) arg->count++; continue; } - if (arg->fn(tp, (unsigned long)f, arg) < 0) { + if (arg->fn(tp, f, arg) < 0) { arg->stop = 1; return; } @@ -363,11 +363,11 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg) } } -static int fw_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int fw_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { struct fw_head *head = rtnl_dereference(tp->root); - struct fw_filter *f = (struct fw_filter *)fh; + struct fw_filter *f = fh; struct nlattr *nest; if (f == NULL) diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index c9f6500b8080..d44e26fdae84 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -98,9 +98,9 @@ static void mall_destroy(struct tcf_proto *tp) call_rcu(&head->rcu, mall_destroy_rcu); } -static unsigned long mall_get(struct tcf_proto *tp, u32 handle) +static void *mall_get(struct tcf_proto *tp, u32 handle) { - return 0UL; + return NULL; } static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = { @@ -129,7 +129,7 @@ static int mall_set_parms(struct net *net, struct tcf_proto *tp, static int mall_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, - unsigned long *arg, bool ovr) + void **arg, bool ovr) { struct cls_mall_head *head = rtnl_dereference(tp->root); struct net_device *dev = tp->q->dev_queue->dev; @@ -185,7 +185,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (!tc_in_hw(new->flags)) new->flags |= TCA_CLS_FLAGS_NOT_IN_HW; - *arg = (unsigned long) head; + *arg = head; rcu_assign_pointer(tp->root, new); return 0; @@ -197,7 +197,7 @@ err_exts_init: return err; } -static int mall_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int mall_delete(struct tcf_proto *tp, void *arg, bool *last) { return -EOPNOTSUPP; } @@ -208,16 +208,16 @@ static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg) if (arg->count < arg->skip) goto skip; - if (arg->fn(tp, (unsigned long) head, arg) < 0) + if (arg->fn(tp, head, arg) < 0) arg->stop = 1; skip: arg->count++; } -static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { - struct cls_mall_head *head = (struct cls_mall_head *) fh; + struct cls_mall_head *head = fh; struct nlattr *nest; if (!head) diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index f1e7d7850b44..3b70982394ce 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -216,7 +216,7 @@ static inline u32 from_hash(u32 id) return 16 + (id & 0xF); } -static unsigned long route4_get(struct tcf_proto *tp, u32 handle) +static void *route4_get(struct tcf_proto *tp, u32 handle) { struct route4_head *head = rtnl_dereference(tp->root); struct route4_bucket *b; @@ -225,11 +225,11 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) h1 = to_hash(handle); if (h1 > 256) - return 0; + return NULL; h2 = from_hash(handle >> 16); if (h2 > 32) - return 0; + return NULL; b = rtnl_dereference(head->table[h1]); if (b) { @@ -237,9 +237,9 @@ static unsigned long route4_get(struct tcf_proto *tp, u32 handle) f; f = rtnl_dereference(f->next)) if (f->handle == handle) - return (unsigned long)f; + return f; } - return 0; + return NULL; } static int route4_init(struct tcf_proto *tp) @@ -294,10 +294,10 @@ static void route4_destroy(struct tcf_proto *tp) kfree_rcu(head, rcu); } -static int route4_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int route4_delete(struct tcf_proto *tp, void *arg, bool *last) { struct route4_head *head = rtnl_dereference(tp->root); - struct route4_filter *f = (struct route4_filter *)arg; + struct route4_filter *f = arg; struct route4_filter __rcu **fp; struct route4_filter *nf; struct route4_bucket *b; @@ -448,7 +448,7 @@ static int route4_set_parms(struct net *net, struct tcf_proto *tp, static int route4_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, - struct nlattr **tca, unsigned long *arg, bool ovr) + struct nlattr **tca, void **arg, bool ovr) { struct route4_head *head = rtnl_dereference(tp->root); struct route4_filter __rcu **fp; @@ -467,7 +467,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (err < 0) return err; - fold = (struct route4_filter *)*arg; + fold = *arg; if (fold && handle && fold->handle != handle) return -EINVAL; @@ -525,7 +525,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, } route4_reset_fastmap(head); - *arg = (unsigned long)f; + *arg = f; if (fold) { tcf_unbind_filter(tp, &fold->res); call_rcu(&fold->rcu, route4_delete_filter); @@ -564,7 +564,7 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) arg->count++; continue; } - if (arg->fn(tp, (unsigned long)f, arg) < 0) { + if (arg->fn(tp, f, arg) < 0) { arg->stop = 1; return; } @@ -575,10 +575,10 @@ static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg) } } -static int route4_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { - struct route4_filter *f = (struct route4_filter *)fh; + struct route4_filter *f = fh; struct nlattr *nest; u32 id; diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index 4adb67a73491..26203ff817f3 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h @@ -248,7 +248,7 @@ static void rsvp_replace(struct tcf_proto *tp, struct rsvp_filter *n, u32 h) BUG_ON(1); } -static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) +static void *rsvp_get(struct tcf_proto *tp, u32 handle) { struct rsvp_head *head = rtnl_dereference(tp->root); struct rsvp_session *s; @@ -257,17 +257,17 @@ static unsigned long rsvp_get(struct tcf_proto *tp, u32 handle) unsigned int h2 = (handle >> 8) & 0xFF; if (h2 > 16) - return 0; + return NULL; for (s = rtnl_dereference(head->ht[h1]); s; s = rtnl_dereference(s->next)) { for (f = rtnl_dereference(s->ht[h2]); f; f = rtnl_dereference(f->next)) { if (f->handle == handle) - return (unsigned long)f; + return f; } } - return 0; + return NULL; } static int rsvp_init(struct tcf_proto *tp) @@ -328,10 +328,10 @@ static void rsvp_destroy(struct tcf_proto *tp) kfree_rcu(data, rcu); } -static int rsvp_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int rsvp_delete(struct tcf_proto *tp, void *arg, bool *last) { struct rsvp_head *head = rtnl_dereference(tp->root); - struct rsvp_filter *nfp, *f = (struct rsvp_filter *)arg; + struct rsvp_filter *nfp, *f = arg; struct rsvp_filter __rcu **fp; unsigned int h = f->handle; struct rsvp_session __rcu **sp; @@ -464,7 +464,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, struct nlattr **tca, - unsigned long *arg, bool ovr) + void **arg, bool ovr) { struct rsvp_head *data = rtnl_dereference(tp->root); struct rsvp_filter *f, *nfp; @@ -493,7 +493,7 @@ static int rsvp_change(struct net *net, struct sk_buff *in_skb, if (err < 0) goto errout2; - f = (struct rsvp_filter *)*arg; + f = *arg; if (f) { /* Node exists: adjust only classid */ struct rsvp_filter *n; @@ -604,7 +604,7 @@ insert: RCU_INIT_POINTER(f->next, nfp); rcu_assign_pointer(*fp, f); - *arg = (unsigned long)f; + *arg = f; return 0; } } @@ -663,7 +663,7 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) arg->count++; continue; } - if (arg->fn(tp, (unsigned long)f, arg) < 0) { + if (arg->fn(tp, f, arg) < 0) { arg->stop = 1; return; } @@ -674,10 +674,10 @@ static void rsvp_walk(struct tcf_proto *tp, struct tcf_walker *arg) } } -static int rsvp_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int rsvp_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { - struct rsvp_filter *f = (struct rsvp_filter *)fh; + struct rsvp_filter *f = fh; struct rsvp_session *s; struct nlattr *nest; struct tc_rsvp_pinfo pinfo; diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index d69f828f3fed..fb281b9b2c52 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -104,16 +104,16 @@ static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp, } -static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle) +static void *tcindex_get(struct tcf_proto *tp, u32 handle) { struct tcindex_data *p = rtnl_dereference(tp->root); struct tcindex_filter_result *r; pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle); if (p->perfect && handle >= p->alloc_hash) - return 0; + return NULL; r = tcindex_lookup(p, handle); - return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL; + return r && tcindex_filter_is_set(r) ? r : NULL; } static int tcindex_init(struct tcf_proto *tp) @@ -150,14 +150,14 @@ static void tcindex_destroy_fexts(struct rcu_head *head) kfree(f); } -static int tcindex_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last) { struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg; + struct tcindex_filter_result *r = arg; struct tcindex_filter __rcu **walk; struct tcindex_filter *f = NULL; - pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p\n", tp, arg, p); + pr_debug("tcindex_delete(tp %p,arg %p),p %p\n", tp, arg, p); if (p->perfect) { if (!r->res.class) return -ENOENT; @@ -192,8 +192,7 @@ found: } static int tcindex_destroy_element(struct tcf_proto *tp, - unsigned long arg, - struct tcf_walker *walker) + void *arg, struct tcf_walker *walker) { bool last; @@ -471,17 +470,17 @@ errout: static int tcindex_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, - struct nlattr **tca, unsigned long *arg, bool ovr) + struct nlattr **tca, void **arg, bool ovr) { struct nlattr *opt = tca[TCA_OPTIONS]; struct nlattr *tb[TCA_TCINDEX_MAX + 1]; struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg; + struct tcindex_filter_result *r = *arg; int err; pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p," - "p %p,r %p,*arg 0x%lx\n", - tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L); + "p %p,r %p,*arg %p\n", + tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL); if (!opt) return 0; @@ -506,9 +505,7 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) if (!p->perfect[i].res.class) continue; if (walker->count >= walker->skip) { - if (walker->fn(tp, - (unsigned long) (p->perfect+i), walker) - < 0) { + if (walker->fn(tp, p->perfect + i, walker) < 0) { walker->stop = 1; return; } @@ -522,8 +519,7 @@ static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) for (f = rtnl_dereference(p->h[i]); f; f = next) { next = rtnl_dereference(f->next); if (walker->count >= walker->skip) { - if (walker->fn(tp, (unsigned long) &f->result, - walker) < 0) { + if (walker->fn(tp, &f->result, walker) < 0) { walker->stop = 1; return; } @@ -548,14 +544,14 @@ static void tcindex_destroy(struct tcf_proto *tp) } -static int tcindex_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int tcindex_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { struct tcindex_data *p = rtnl_dereference(tp->root); - struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; + struct tcindex_filter_result *r = fh; struct nlattr *nest; - pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p\n", + pr_debug("tcindex_dump(tp %p,fh %p,skb %p,t %p),p %p,r %p\n", tp, fh, skb, t, p, r); pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 4ed51d347d0a..5a3f78181526 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -289,7 +289,7 @@ out: } -static unsigned long u32_get(struct tcf_proto *tp, u32 handle) +static void *u32_get(struct tcf_proto *tp, u32 handle) { struct tc_u_hnode *ht; struct tc_u_common *tp_c = tp->data; @@ -300,12 +300,12 @@ static unsigned long u32_get(struct tcf_proto *tp, u32 handle) ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); if (!ht) - return 0; + return NULL; if (TC_U32_KEY(handle) == 0) - return (unsigned long)ht; + return ht; - return (unsigned long)u32_lookup_key(ht, handle); + return u32_lookup_key(ht, handle); } static u32 gen_new_htid(struct tc_u_common *tp_c) @@ -605,9 +605,9 @@ static void u32_destroy(struct tcf_proto *tp) tp->data = NULL; } -static int u32_delete(struct tcf_proto *tp, unsigned long arg, bool *last) +static int u32_delete(struct tcf_proto *tp, void *arg, bool *last) { - struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; + struct tc_u_hnode *ht = arg; struct tc_u_hnode *root_ht = rtnl_dereference(tp->root); struct tc_u_common *tp_c = tp->data; int ret = 0; @@ -831,7 +831,7 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp, static int u32_change(struct net *net, struct sk_buff *in_skb, struct tcf_proto *tp, unsigned long base, u32 handle, - struct nlattr **tca, unsigned long *arg, bool ovr) + struct nlattr **tca, void **arg, bool ovr) { struct tc_u_common *tp_c = tp->data; struct tc_u_hnode *ht; @@ -858,7 +858,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } - n = (struct tc_u_knode *)*arg; + n = *arg; if (n) { struct tc_u_knode *new; @@ -925,7 +925,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, RCU_INIT_POINTER(ht->next, tp_c->hlist); rcu_assign_pointer(tp_c->hlist, ht); - *arg = (unsigned long)ht; + *arg = ht; return 0; } @@ -1020,7 +1020,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, RCU_INIT_POINTER(n->next, pins); rcu_assign_pointer(*ins, n); - *arg = (unsigned long)n; + *arg = n; return 0; } @@ -1054,7 +1054,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) if (ht->prio != tp->prio) continue; if (arg->count >= arg->skip) { - if (arg->fn(tp, (unsigned long)ht, arg) < 0) { + if (arg->fn(tp, ht, arg) < 0) { arg->stop = 1; return; } @@ -1068,7 +1068,7 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) arg->count++; continue; } - if (arg->fn(tp, (unsigned long)n, arg) < 0) { + if (arg->fn(tp, n, arg) < 0) { arg->stop = 1; return; } @@ -1078,10 +1078,10 @@ static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) } } -static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, +static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t) { - struct tc_u_knode *n = (struct tc_u_knode *)fh; + struct tc_u_knode *n = fh; struct tc_u_hnode *ht_up, *ht_down; struct nlattr *nest; @@ -1095,7 +1095,7 @@ static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, goto nla_put_failure; if (TC_U32_KEY(n->handle) == 0) { - struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; + struct tc_u_hnode *ht = fh; u32 divisor = ht->divisor + 1; if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) -- cgit v1.2.3-55-g7522 From 4cc7b9544b9a904add353406ed1bacbf56f75c52 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Fri, 4 Aug 2017 22:02:19 -0700 Subject: bpf: devmap fix mutex in rcu critical section Originally we used a mutex to protect concurrent devmap update and delete operations from racing with netdev unregister notifier callbacks. The notifier hook is needed because we increment the netdev ref count when a dev is added to the devmap. This ensures the netdev reference is valid in the datapath. However, we don't want to block unregister events, hence the initial mutex and notifier handler. The concern was in the notifier hook we search the map for dev entries that hold a refcnt on the net device being torn down. But, in order to do this we require two steps, (i) dereference the netdev: dev = rcu_dereference(map[i]) (ii) test ifindex: dev->ifindex == removing_ifindex and then finally we can swap in the NULL dev in the map via an xchg operation, xchg(map[i], NULL) The danger here is a concurrent update could run a different xchg op concurrently leading us to replace the new dev with a NULL dev incorrectly. CPU 1 CPU 2 notifier hook bpf devmap update dev = rcu_dereference(map[i]) dev = rcu_dereference(map[i]) xchg(map[i]), new_dev); rcu_call(dev,...) xchg(map[i], NULL) The above flow would create the incorrect state with the dev reference in the update path being lost. To resolve this the original code used a mutex around the above block. However, updates, deletes, and lookups occur inside rcu critical sections so we can't use a mutex in this context safely. Fortunately, by writing slightly better code we can avoid the mutex altogether. If CPU 1 in the above example uses a cmpxchg and _only_ replaces the dev reference in the map when it is in fact the expected dev the race is removed completely. The two cases being illustrated here, first the race condition, CPU 1 CPU 2 notifier hook bpf devmap update dev = rcu_dereference(map[i]) dev = rcu_dereference(map[i]) xchg(map[i]), new_dev); rcu_call(dev,...) odev = cmpxchg(map[i], dev, NULL) Now we can test the cmpxchg return value, detect odev != dev and abort. Or in the good case, CPU 1 CPU 2 notifier hook bpf devmap update dev = rcu_dereference(map[i]) odev = cmpxchg(map[i], dev, NULL) [...] Now 'odev == dev' and we can do proper cleanup. And viola the original race we tried to solve with a mutex is corrected and the trace noted by Sasha below is resolved due to removal of the mutex. Note: When walking the devmap and removing dev references as needed we depend on the core to fail any calls to dev_get_by_index() using the ifindex of the device being removed. This way we do not race with the user while searching the devmap. Additionally, the mutex was also protecting list add/del/read on the list of maps in-use. This patch converts this to an RCU list and spinlock implementation. This protects the list from concurrent alloc/free operations. The notifier hook walks this list so it uses RCU read semantics. BUG: sleeping function called from invalid context at kernel/locking/mutex.c:747 in_atomic(): 1, irqs_disabled(): 0, pid: 16315, name: syz-executor1 1 lock held by syz-executor1/16315: #0: (rcu_read_lock){......}, at: [] map_delete_elem kernel/bpf/syscall.c:577 [inline] #0: (rcu_read_lock){......}, at: [] SYSC_bpf kernel/bpf/syscall.c:1427 [inline] #0: (rcu_read_lock){......}, at: [] SyS_bpf+0x1d32/0x4ba0 kernel/bpf/syscall.c:1388 Fixes: 2ddf71e23cc2 ("net: add notifier hooks for devmap bpf map") Reported-by: Sasha Levin Signed-off-by: Daniel Borkmann Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- kernel/bpf/devmap.c | 48 +++++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index d439ee0eadb1..7192fb67d4de 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -40,11 +40,12 @@ * contain a reference to the net device and remove them. This is a two step * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) * check to see if the ifindex is the same as the net_device being removed. - * Unfortunately, the xchg() operations do not protect against this. To avoid - * potentially removing incorrect objects the dev_map_list_mutex protects - * conflicting netdev unregister and BPF syscall operations. Updates and - * deletes from a BPF program (done in rcu critical section) are blocked - * because of this mutex. + * When removing the dev a cmpxchg() is used to ensure the correct dev is + * removed, in the case of a concurrent update or delete operation it is + * possible that the initially referenced dev is no longer in the map. As the + * notifier hook walks the map we know that new dev references can not be + * added by the user because core infrastructure ensures dev_get_by_index() + * calls will fail at this point. */ #include #include @@ -68,7 +69,7 @@ struct bpf_dtab { struct list_head list; }; -static DEFINE_MUTEX(dev_map_list_mutex); +static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); static struct bpf_map *dev_map_alloc(union bpf_attr *attr) @@ -128,9 +129,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) if (!dtab->netdev_map) goto free_dtab; - mutex_lock(&dev_map_list_mutex); - list_add_tail(&dtab->list, &dev_map_list); - mutex_unlock(&dev_map_list_mutex); + spin_lock(&dev_map_lock); + list_add_tail_rcu(&dtab->list, &dev_map_list); + spin_unlock(&dev_map_lock); return &dtab->map; free_dtab: @@ -169,7 +170,6 @@ static void dev_map_free(struct bpf_map *map) * at this point we we can still race with netdev notifier, hence the * lock. */ - mutex_lock(&dev_map_list_mutex); for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; @@ -184,8 +184,9 @@ static void dev_map_free(struct bpf_map *map) /* At this point bpf program is detached and all pending operations * _must_ be complete */ - list_del(&dtab->list); - mutex_unlock(&dev_map_list_mutex); + spin_lock(&dev_map_lock); + list_del_rcu(&dtab->list); + spin_unlock(&dev_map_lock); free_percpu(dtab->flush_needed); bpf_map_area_free(dtab->netdev_map); kfree(dtab); @@ -322,11 +323,9 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) * the driver tear down ensures all soft irqs are complete before * removing the net device in the case of dev_put equals zero. */ - mutex_lock(&dev_map_list_mutex); old_dev = xchg(&dtab->netdev_map[k], NULL); if (old_dev) call_rcu(&old_dev->rcu, __dev_map_entry_free); - mutex_unlock(&dev_map_list_mutex); return 0; } @@ -369,11 +368,9 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, * Remembering the driver side flush operation will happen before the * net device is removed. */ - mutex_lock(&dev_map_list_mutex); old_dev = xchg(&dtab->netdev_map[i], dev); if (old_dev) call_rcu(&old_dev->rcu, __dev_map_entry_free); - mutex_unlock(&dev_map_list_mutex); return 0; } @@ -396,22 +393,27 @@ static int dev_map_notification(struct notifier_block *notifier, switch (event) { case NETDEV_UNREGISTER: - mutex_lock(&dev_map_list_mutex); - list_for_each_entry(dtab, &dev_map_list, list) { + /* This rcu_read_lock/unlock pair is needed because + * dev_map_list is an RCU list AND to ensure a delete + * operation does not free a netdev_map entry while we + * are comparing it against the netdev being unregistered. + */ + rcu_read_lock(); + list_for_each_entry_rcu(dtab, &dev_map_list, list) { for (i = 0; i < dtab->map.max_entries; i++) { - struct bpf_dtab_netdev *dev; + struct bpf_dtab_netdev *dev, *odev; - dev = dtab->netdev_map[i]; + dev = READ_ONCE(dtab->netdev_map[i]); if (!dev || dev->dev->ifindex != netdev->ifindex) continue; - dev = xchg(&dtab->netdev_map[i], NULL); - if (dev) + odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); + if (dev == odev) call_rcu(&dev->rcu, __dev_map_entry_free); } } - mutex_unlock(&dev_map_list_mutex); + rcu_read_unlock(); break; default: break; -- cgit v1.2.3-55-g7522 From 925615ceda94532ba612e641549c7b1e41ebff10 Mon Sep 17 00:00:00 2001 From: David Lebrun Date: Sat, 5 Aug 2017 12:38:24 +0200 Subject: ipv6: sr: allow SRH insertion with arbitrary segments_left value The seg6_validate_srh() function only allows SRHs whose active segment is the first segment of the path. However, an application may insert an SRH whose active segment is not the first one. Such an application might be for example an SR-aware Virtual Network Function. This patch enables to insert SRHs with an arbitrary active segment. Signed-off-by: David Lebrun Signed-off-by: David S. Miller --- net/ipv6/exthdrs.c | 4 ++-- net/ipv6/seg6.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index 3cec529c6113..95516138e861 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -882,7 +882,7 @@ static void ipv6_push_rthdr4(struct sk_buff *skb, u8 *proto, (hops - 1) * sizeof(struct in6_addr)); sr_phdr->segments[0] = **addr_p; - *addr_p = &sr_ihdr->segments[hops - 1]; + *addr_p = &sr_ihdr->segments[sr_ihdr->segments_left]; #ifdef CONFIG_IPV6_SEG6_HMAC if (sr_has_hmac(sr_phdr)) { @@ -1174,7 +1174,7 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6, { struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)opt->srcrt; - fl6->daddr = srh->segments[srh->first_segment]; + fl6->daddr = srh->segments[srh->segments_left]; break; } default: diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 15fba55e3da8..81c2339b3285 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -40,7 +40,7 @@ bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len) if (((srh->hdrlen + 1) << 3) != len) return false; - if (srh->segments_left != srh->first_segment) + if (srh->segments_left > srh->first_segment) return false; tlv_offset = sizeof(*srh) + ((srh->first_segment + 1) << 4); -- cgit v1.2.3-55-g7522 From b04c80d3a7e228cfb832cdb1c9ce8151f174669c Mon Sep 17 00:00:00 2001 From: David Lebrun Date: Sat, 5 Aug 2017 12:38:25 +0200 Subject: ipv6: sr: export SRH insertion functions This patch exports the seg6_do_srh_encap() and seg6_do_srh_inline() functions. It also removes the CONFIG_IPV6_SEG6_INLINE knob that enabled the compilation of seg6_do_srh_inline(). This function is now built-in. Signed-off-by: David Lebrun Signed-off-by: David S. Miller --- include/net/seg6.h | 2 ++ net/ipv6/Kconfig | 12 ------------ net/ipv6/seg6_iptunnel.c | 12 ++++-------- 3 files changed, 6 insertions(+), 20 deletions(-) diff --git a/include/net/seg6.h b/include/net/seg6.h index 4e0357517d79..a32abb040e1d 100644 --- a/include/net/seg6.h +++ b/include/net/seg6.h @@ -58,5 +58,7 @@ extern int seg6_iptunnel_init(void); extern void seg6_iptunnel_exit(void); extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len); +extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh); +extern int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh); #endif diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 48c452959d2c..50181a96923e 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -315,18 +315,6 @@ config IPV6_SEG6_LWTUNNEL If unsure, say N. -config IPV6_SEG6_INLINE - bool "IPv6: direct Segment Routing Header insertion " - depends on IPV6_SEG6_LWTUNNEL - ---help--- - Support for direct insertion of the Segment Routing Header, - also known as inline mode. Be aware that direct insertion of - extension headers (as opposed to encapsulation) may break - multiple mechanisms such as PMTUD or IPSec AH. Use this feature - only if you know exactly what you are doing. - - If unsure, say N. - config IPV6_SEG6_HMAC bool "IPv6: Segment Routing HMAC support" depends on IPV6 diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 264d772d3c7d..501233040570 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -91,7 +91,7 @@ static void set_tun_src(struct net *net, struct net_device *dev, } /* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ -static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) +int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) { struct net *net = dev_net(skb_dst(skb)->dev); struct ipv6hdr *hdr, *inner_hdr; @@ -141,10 +141,10 @@ static int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) return 0; } +EXPORT_SYMBOL_GPL(seg6_do_srh_encap); /* insert an SRH within an IPv6 packet, just after the IPv6 header */ -#ifdef CONFIG_IPV6_SEG6_INLINE -static int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) +int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) { struct ipv6hdr *hdr, *oldhdr; struct ipv6_sr_hdr *isrh; @@ -193,7 +193,7 @@ static int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) return 0; } -#endif +EXPORT_SYMBOL_GPL(seg6_do_srh_inline); static int seg6_do_srh(struct sk_buff *skb) { @@ -209,12 +209,10 @@ static int seg6_do_srh(struct sk_buff *skb) } switch (tinfo->mode) { -#ifdef CONFIG_IPV6_SEG6_INLINE case SEG6_IPTUN_MODE_INLINE: err = seg6_do_srh_inline(skb, tinfo->srh); skb_reset_inner_headers(skb); break; -#endif case SEG6_IPTUN_MODE_ENCAP: err = seg6_do_srh_encap(skb, tinfo->srh); break; @@ -357,10 +355,8 @@ static int seg6_build_state(struct nlattr *nla, return -EINVAL; switch (tuninfo->mode) { -#ifdef CONFIG_IPV6_SEG6_INLINE case SEG6_IPTUN_MODE_INLINE: break; -#endif case SEG6_IPTUN_MODE_ENCAP: break; default: -- cgit v1.2.3-55-g7522 From d1df6fd8a1d22d37cffa0075ab8ad423ce656777 Mon Sep 17 00:00:00 2001 From: David Lebrun Date: Sat, 5 Aug 2017 12:38:26 +0200 Subject: ipv6: sr: define core operations for seg6local lightweight tunnel This patch implements a new type of lightweight tunnel named seg6local. A seg6local lwt is defined by a type of action and a set of parameters. The action represents the operation to perform on the packets matching the lwt's route, and is not necessarily an encapsulation. The set of parameters are arguments for the processing function. Each action is defined in a struct seg6_action_desc within seg6_action_table[]. This structure contains the action, mandatory attributes, the processing function, and a static headroom size required by the action. The mandatory attributes are encoded as a bitmask field. The static headroom is set to a non-zero value when the processing function always add a constant number of bytes to the skb (e.g. the header size for encapsulations). To facilitate rtnetlink-related operations such as parsing, fill_encap, and cmp_encap, each type of action parameter is associated to three function pointers, in seg6_action_params[]. All actions defined in seg6_local.h are detailed in [1]. [1] https://tools.ietf.org/html/draft-filsfils-spring-srv6-network-programming-01 Signed-off-by: David Lebrun Signed-off-by: David S. Miller --- include/linux/seg6_local.h | 6 + include/net/seg6.h | 2 + include/uapi/linux/lwtunnel.h | 1 + include/uapi/linux/seg6_local.h | 68 +++++++++ net/core/lwtunnel.c | 2 + net/ipv6/Kconfig | 3 +- net/ipv6/Makefile | 2 +- net/ipv6/seg6.c | 5 + net/ipv6/seg6_local.c | 320 ++++++++++++++++++++++++++++++++++++++++ 9 files changed, 407 insertions(+), 2 deletions(-) create mode 100644 include/linux/seg6_local.h create mode 100644 include/uapi/linux/seg6_local.h create mode 100644 net/ipv6/seg6_local.c diff --git a/include/linux/seg6_local.h b/include/linux/seg6_local.h new file mode 100644 index 000000000000..ee63e76fe0c7 --- /dev/null +++ b/include/linux/seg6_local.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_SEG6_LOCAL_H +#define _LINUX_SEG6_LOCAL_H + +#include + +#endif diff --git a/include/net/seg6.h b/include/net/seg6.h index a32abb040e1d..5379f550f521 100644 --- a/include/net/seg6.h +++ b/include/net/seg6.h @@ -56,6 +56,8 @@ extern int seg6_init(void); extern void seg6_exit(void); extern int seg6_iptunnel_init(void); extern void seg6_iptunnel_exit(void); +extern int seg6_local_init(void); +extern void seg6_local_exit(void); extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len); extern int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh); diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h index 92724cba1eba..7fdd19ca7511 100644 --- a/include/uapi/linux/lwtunnel.h +++ b/include/uapi/linux/lwtunnel.h @@ -11,6 +11,7 @@ enum lwtunnel_encap_types { LWTUNNEL_ENCAP_IP6, LWTUNNEL_ENCAP_SEG6, LWTUNNEL_ENCAP_BPF, + LWTUNNEL_ENCAP_SEG6_LOCAL, __LWTUNNEL_ENCAP_MAX, }; diff --git a/include/uapi/linux/seg6_local.h b/include/uapi/linux/seg6_local.h new file mode 100644 index 000000000000..ef2d8c3e76c1 --- /dev/null +++ b/include/uapi/linux/seg6_local.h @@ -0,0 +1,68 @@ +/* + * SR-IPv6 implementation + * + * Author: + * David Lebrun + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_SEG6_LOCAL_H +#define _UAPI_LINUX_SEG6_LOCAL_H + +#include + +enum { + SEG6_LOCAL_UNSPEC, + SEG6_LOCAL_ACTION, + SEG6_LOCAL_SRH, + SEG6_LOCAL_TABLE, + SEG6_LOCAL_NH4, + SEG6_LOCAL_NH6, + SEG6_LOCAL_IIF, + SEG6_LOCAL_OIF, + __SEG6_LOCAL_MAX, +}; +#define SEG6_LOCAL_MAX (__SEG6_LOCAL_MAX - 1) + +enum { + SEG6_LOCAL_ACTION_UNSPEC = 0, + /* node segment */ + SEG6_LOCAL_ACTION_END = 1, + /* adjacency segment (IPv6 cross-connect) */ + SEG6_LOCAL_ACTION_END_X = 2, + /* lookup of next seg NH in table */ + SEG6_LOCAL_ACTION_END_T = 3, + /* decap and L2 cross-connect */ + SEG6_LOCAL_ACTION_END_DX2 = 4, + /* decap and IPv6 cross-connect */ + SEG6_LOCAL_ACTION_END_DX6 = 5, + /* decap and IPv4 cross-connect */ + SEG6_LOCAL_ACTION_END_DX4 = 6, + /* decap and lookup of DA in v6 table */ + SEG6_LOCAL_ACTION_END_DT6 = 7, + /* decap and lookup of DA in v4 table */ + SEG6_LOCAL_ACTION_END_DT4 = 8, + /* binding segment with insertion */ + SEG6_LOCAL_ACTION_END_B6 = 9, + /* binding segment with encapsulation */ + SEG6_LOCAL_ACTION_END_B6_ENCAP = 10, + /* binding segment with MPLS encap */ + SEG6_LOCAL_ACTION_END_BM = 11, + /* lookup last seg in table */ + SEG6_LOCAL_ACTION_END_S = 12, + /* forward to SR-unaware VNF with static proxy */ + SEG6_LOCAL_ACTION_END_AS = 13, + /* forward to SR-unaware VNF with masquerading */ + SEG6_LOCAL_ACTION_END_AM = 14, + + __SEG6_LOCAL_ACTION_MAX, +}; + +#define SEG6_LOCAL_ACTION_MAX (__SEG6_LOCAL_ACTION_MAX - 1) + +#endif diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 435f35f9a61c..0b171756453c 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -44,6 +44,8 @@ static const char *lwtunnel_encap_str(enum lwtunnel_encap_types encap_type) return "SEG6"; case LWTUNNEL_ENCAP_BPF: return "BPF"; + case LWTUNNEL_ENCAP_SEG6_LOCAL: + return "SEG6LOCAL"; case LWTUNNEL_ENCAP_IP6: case LWTUNNEL_ENCAP_IP: case LWTUNNEL_ENCAP_NONE: diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 50181a96923e..0d722396dce6 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -311,7 +311,8 @@ config IPV6_SEG6_LWTUNNEL ---help--- Support for encapsulation of packets within an outer IPv6 header and a Segment Routing Header using the lightweight - tunnels mechanism. + tunnels mechanism. Also enable support for advanced local + processing of SRv6 packets based on their active segment. If unsure, say N. diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile index f8b24c2e0d77..10e342363793 100644 --- a/net/ipv6/Makefile +++ b/net/ipv6/Makefile @@ -23,7 +23,7 @@ ipv6-$(CONFIG_IPV6_MULTIPLE_TABLES) += fib6_rules.o ipv6-$(CONFIG_PROC_FS) += proc.o ipv6-$(CONFIG_SYN_COOKIES) += syncookies.o ipv6-$(CONFIG_NETLABEL) += calipso.o -ipv6-$(CONFIG_IPV6_SEG6_LWTUNNEL) += seg6_iptunnel.o +ipv6-$(CONFIG_IPV6_SEG6_LWTUNNEL) += seg6_iptunnel.o seg6_local.o ipv6-$(CONFIG_IPV6_SEG6_HMAC) += seg6_hmac.o ipv6-objs += $(ipv6-y) diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 81c2339b3285..c81407770956 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -456,6 +456,10 @@ int __init seg6_init(void) err = seg6_iptunnel_init(); if (err) goto out_unregister_pernet; + + err = seg6_local_init(); + if (err) + goto out_unregister_pernet; #endif #ifdef CONFIG_IPV6_SEG6_HMAC @@ -471,6 +475,7 @@ out: #ifdef CONFIG_IPV6_SEG6_HMAC out_unregister_iptun: #ifdef CONFIG_IPV6_SEG6_LWTUNNEL + seg6_local_exit(); seg6_iptunnel_exit(); #endif #endif diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c new file mode 100644 index 000000000000..53615d7e0723 --- /dev/null +++ b/net/ipv6/seg6_local.c @@ -0,0 +1,320 @@ +/* + * SR-IPv6 implementation + * + * Author: + * David Lebrun + * + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_IPV6_SEG6_HMAC +#include +#endif + +struct seg6_local_lwt; + +struct seg6_action_desc { + int action; + unsigned long attrs; + int (*input)(struct sk_buff *skb, struct seg6_local_lwt *slwt); + int static_headroom; +}; + +struct seg6_local_lwt { + int action; + struct ipv6_sr_hdr *srh; + int table; + struct in_addr nh4; + struct in6_addr nh6; + int iif; + int oif; + + int headroom; + struct seg6_action_desc *desc; +}; + +static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt) +{ + return (struct seg6_local_lwt *)lwt->data; +} + +static struct seg6_action_desc seg6_action_table[] = { + { + .action = SEG6_LOCAL_ACTION_END, + .attrs = 0, + }, +}; + +static struct seg6_action_desc *__get_action_desc(int action) +{ + struct seg6_action_desc *desc; + int i, count; + + count = sizeof(seg6_action_table) / sizeof(struct seg6_action_desc); + for (i = 0; i < count; i++) { + desc = &seg6_action_table[i]; + if (desc->action == action) + return desc; + } + + return NULL; +} + +static int seg6_local_input(struct sk_buff *skb) +{ + struct dst_entry *orig_dst = skb_dst(skb); + struct seg6_action_desc *desc; + struct seg6_local_lwt *slwt; + + slwt = seg6_local_lwtunnel(orig_dst->lwtstate); + desc = slwt->desc; + + return desc->input(skb, slwt); +} + +static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = { + [SEG6_LOCAL_ACTION] = { .type = NLA_U32 }, + [SEG6_LOCAL_SRH] = { .type = NLA_BINARY }, + [SEG6_LOCAL_TABLE] = { .type = NLA_U32 }, + [SEG6_LOCAL_NH4] = { .type = NLA_BINARY, + .len = sizeof(struct in_addr) }, + [SEG6_LOCAL_NH6] = { .type = NLA_BINARY, + .len = sizeof(struct in6_addr) }, + [SEG6_LOCAL_IIF] = { .type = NLA_U32 }, + [SEG6_LOCAL_OIF] = { .type = NLA_U32 }, +}; + +struct seg6_action_param { + int (*parse)(struct nlattr **attrs, struct seg6_local_lwt *slwt); + int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt); + int (*cmp)(struct seg6_local_lwt *a, struct seg6_local_lwt *b); +}; + +static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = { + [SEG6_LOCAL_SRH] = { .parse = NULL, + .put = NULL, + .cmp = NULL }, + + [SEG6_LOCAL_TABLE] = { .parse = NULL, + .put = NULL, + .cmp = NULL }, + + [SEG6_LOCAL_NH4] = { .parse = NULL, + .put = NULL, + .cmp = NULL }, + + [SEG6_LOCAL_NH6] = { .parse = NULL, + .put = NULL, + .cmp = NULL }, + + [SEG6_LOCAL_IIF] = { .parse = NULL, + .put = NULL, + .cmp = NULL }, + + [SEG6_LOCAL_OIF] = { .parse = NULL, + .put = NULL, + .cmp = NULL }, +}; + +static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt) +{ + struct seg6_action_param *param; + struct seg6_action_desc *desc; + int i, err; + + desc = __get_action_desc(slwt->action); + if (!desc) + return -EINVAL; + + if (!desc->input) + return -EOPNOTSUPP; + + slwt->desc = desc; + slwt->headroom += desc->static_headroom; + + for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) { + if (desc->attrs & (1 << i)) { + if (!attrs[i]) + return -EINVAL; + + param = &seg6_action_params[i]; + + err = param->parse(attrs, slwt); + if (err < 0) + return err; + } + } + + return 0; +} + +static int seg6_local_build_state(struct nlattr *nla, unsigned int family, + const void *cfg, struct lwtunnel_state **ts, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[SEG6_LOCAL_MAX + 1]; + struct lwtunnel_state *newts; + struct seg6_local_lwt *slwt; + int err; + + err = nla_parse_nested(tb, SEG6_LOCAL_MAX, nla, seg6_local_policy, + extack); + + if (err < 0) + return err; + + if (!tb[SEG6_LOCAL_ACTION]) + return -EINVAL; + + newts = lwtunnel_state_alloc(sizeof(*slwt)); + if (!newts) + return -ENOMEM; + + slwt = seg6_local_lwtunnel(newts); + slwt->action = nla_get_u32(tb[SEG6_LOCAL_ACTION]); + + err = parse_nla_action(tb, slwt); + if (err < 0) + goto out_free; + + newts->type = LWTUNNEL_ENCAP_SEG6_LOCAL; + newts->flags = LWTUNNEL_STATE_INPUT_REDIRECT; + newts->headroom = slwt->headroom; + + *ts = newts; + + return 0; + +out_free: + kfree(slwt->srh); + kfree(newts); + return err; +} + +static void seg6_local_destroy_state(struct lwtunnel_state *lwt) +{ + struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt); + + kfree(slwt->srh); +} + +static int seg6_local_fill_encap(struct sk_buff *skb, + struct lwtunnel_state *lwt) +{ + struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt); + struct seg6_action_param *param; + int i, err; + + if (nla_put_u32(skb, SEG6_LOCAL_ACTION, slwt->action)) + return -EMSGSIZE; + + for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) { + if (slwt->desc->attrs & (1 << i)) { + param = &seg6_action_params[i]; + err = param->put(skb, slwt); + if (err < 0) + return err; + } + } + + return 0; +} + +static int seg6_local_get_encap_size(struct lwtunnel_state *lwt) +{ + struct seg6_local_lwt *slwt = seg6_local_lwtunnel(lwt); + unsigned long attrs; + int nlsize; + + nlsize = nla_total_size(4); /* action */ + + attrs = slwt->desc->attrs; + + if (attrs & (1 << SEG6_LOCAL_SRH)) + nlsize += nla_total_size((slwt->srh->hdrlen + 1) << 3); + + if (attrs & (1 << SEG6_LOCAL_TABLE)) + nlsize += nla_total_size(4); + + if (attrs & (1 << SEG6_LOCAL_NH4)) + nlsize += nla_total_size(4); + + if (attrs & (1 << SEG6_LOCAL_NH6)) + nlsize += nla_total_size(16); + + if (attrs & (1 << SEG6_LOCAL_IIF)) + nlsize += nla_total_size(4); + + if (attrs & (1 << SEG6_LOCAL_OIF)) + nlsize += nla_total_size(4); + + return nlsize; +} + +static int seg6_local_cmp_encap(struct lwtunnel_state *a, + struct lwtunnel_state *b) +{ + struct seg6_local_lwt *slwt_a, *slwt_b; + struct seg6_action_param *param; + int i; + + slwt_a = seg6_local_lwtunnel(a); + slwt_b = seg6_local_lwtunnel(b); + + if (slwt_a->action != slwt_b->action) + return 1; + + if (slwt_a->desc->attrs != slwt_b->desc->attrs) + return 1; + + for (i = 0; i < SEG6_LOCAL_MAX + 1; i++) { + if (slwt_a->desc->attrs & (1 << i)) { + param = &seg6_action_params[i]; + if (param->cmp(slwt_a, slwt_b)) + return 1; + } + } + + return 0; +} + +static const struct lwtunnel_encap_ops seg6_local_ops = { + .build_state = seg6_local_build_state, + .destroy_state = seg6_local_destroy_state, + .input = seg6_local_input, + .fill_encap = seg6_local_fill_encap, + .get_encap_size = seg6_local_get_encap_size, + .cmp_encap = seg6_local_cmp_encap, + .owner = THIS_MODULE, +}; + +int __init seg6_local_init(void) +{ + return lwtunnel_encap_add_ops(&seg6_local_ops, + LWTUNNEL_ENCAP_SEG6_LOCAL); +} + +void seg6_local_exit(void) +{ + lwtunnel_encap_del_ops(&seg6_local_ops, LWTUNNEL_ENCAP_SEG6_LOCAL); +} -- cgit v1.2.3-55-g7522 From 2d9cc60aee6bfbd72a06516615af9cde9cb6189a Mon Sep 17 00:00:00 2001 From: David Lebrun Date: Sat, 5 Aug 2017 12:38:27 +0200 Subject: ipv6: sr: add rtnetlink functions for seg6local action parameters This patch adds the necessary functions to parse, fill, and compare seg6local rtnetlink attributes, for all defined action parameters. - The SRH parameter defines an SRH to be inserted or encapsulated. - The TABLE parameter defines the table to use for the route lookup of the next segment or the inner decapsulated packet. - The NH4 parameter defines the IPv4 next-hop for an inner decapsulated IPv4 packet. - The NH6 parameter defines the IPv6 next-hop for the next segment or for an inner decapsulated IPv6 packet - The IIF parameter defines an ingress interface index. - The OIF parameter defines an egress interface index. Signed-off-by: David Lebrun Signed-off-by: David S. Miller --- net/ipv6/seg6_local.c | 211 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 193 insertions(+), 18 deletions(-) diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index 53615d7e0723..ab1fc1b17ddf 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -104,6 +104,181 @@ static const struct nla_policy seg6_local_policy[SEG6_LOCAL_MAX + 1] = { [SEG6_LOCAL_OIF] = { .type = NLA_U32 }, }; +static int parse_nla_srh(struct nlattr **attrs, struct seg6_local_lwt *slwt) +{ + struct ipv6_sr_hdr *srh; + int len; + + srh = nla_data(attrs[SEG6_LOCAL_SRH]); + len = nla_len(attrs[SEG6_LOCAL_SRH]); + + /* SRH must contain at least one segment */ + if (len < sizeof(*srh) + sizeof(struct in6_addr)) + return -EINVAL; + + if (!seg6_validate_srh(srh, len)) + return -EINVAL; + + slwt->srh = kmalloc(len, GFP_KERNEL); + if (!slwt->srh) + return -ENOMEM; + + memcpy(slwt->srh, srh, len); + + slwt->headroom += len; + + return 0; +} + +static int put_nla_srh(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + struct ipv6_sr_hdr *srh; + struct nlattr *nla; + int len; + + srh = slwt->srh; + len = (srh->hdrlen + 1) << 3; + + nla = nla_reserve(skb, SEG6_LOCAL_SRH, len); + if (!nla) + return -EMSGSIZE; + + memcpy(nla_data(nla), srh, len); + + return 0; +} + +static int cmp_nla_srh(struct seg6_local_lwt *a, struct seg6_local_lwt *b) +{ + int len = (a->srh->hdrlen + 1) << 3; + + if (len != ((b->srh->hdrlen + 1) << 3)) + return 1; + + return memcmp(a->srh, b->srh, len); +} + +static int parse_nla_table(struct nlattr **attrs, struct seg6_local_lwt *slwt) +{ + slwt->table = nla_get_u32(attrs[SEG6_LOCAL_TABLE]); + + return 0; +} + +static int put_nla_table(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + if (nla_put_u32(skb, SEG6_LOCAL_TABLE, slwt->table)) + return -EMSGSIZE; + + return 0; +} + +static int cmp_nla_table(struct seg6_local_lwt *a, struct seg6_local_lwt *b) +{ + if (a->table != b->table) + return 1; + + return 0; +} + +static int parse_nla_nh4(struct nlattr **attrs, struct seg6_local_lwt *slwt) +{ + memcpy(&slwt->nh4, nla_data(attrs[SEG6_LOCAL_NH4]), + sizeof(struct in_addr)); + + return 0; +} + +static int put_nla_nh4(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + struct nlattr *nla; + + nla = nla_reserve(skb, SEG6_LOCAL_NH4, sizeof(struct in_addr)); + if (!nla) + return -EMSGSIZE; + + memcpy(nla_data(nla), &slwt->nh4, sizeof(struct in_addr)); + + return 0; +} + +static int cmp_nla_nh4(struct seg6_local_lwt *a, struct seg6_local_lwt *b) +{ + return memcmp(&a->nh4, &b->nh4, sizeof(struct in_addr)); +} + +static int parse_nla_nh6(struct nlattr **attrs, struct seg6_local_lwt *slwt) +{ + memcpy(&slwt->nh6, nla_data(attrs[SEG6_LOCAL_NH6]), + sizeof(struct in6_addr)); + + return 0; +} + +static int put_nla_nh6(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + struct nlattr *nla; + + nla = nla_reserve(skb, SEG6_LOCAL_NH6, sizeof(struct in6_addr)); + if (!nla) + return -EMSGSIZE; + + memcpy(nla_data(nla), &slwt->nh6, sizeof(struct in6_addr)); + + return 0; +} + +static int cmp_nla_nh6(struct seg6_local_lwt *a, struct seg6_local_lwt *b) +{ + return memcmp(&a->nh6, &b->nh6, sizeof(struct in6_addr)); +} + +static int parse_nla_iif(struct nlattr **attrs, struct seg6_local_lwt *slwt) +{ + slwt->iif = nla_get_u32(attrs[SEG6_LOCAL_IIF]); + + return 0; +} + +static int put_nla_iif(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + if (nla_put_u32(skb, SEG6_LOCAL_IIF, slwt->iif)) + return -EMSGSIZE; + + return 0; +} + +static int cmp_nla_iif(struct seg6_local_lwt *a, struct seg6_local_lwt *b) +{ + if (a->iif != b->iif) + return 1; + + return 0; +} + +static int parse_nla_oif(struct nlattr **attrs, struct seg6_local_lwt *slwt) +{ + slwt->oif = nla_get_u32(attrs[SEG6_LOCAL_OIF]); + + return 0; +} + +static int put_nla_oif(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + if (nla_put_u32(skb, SEG6_LOCAL_OIF, slwt->oif)) + return -EMSGSIZE; + + return 0; +} + +static int cmp_nla_oif(struct seg6_local_lwt *a, struct seg6_local_lwt *b) +{ + if (a->oif != b->oif) + return 1; + + return 0; +} + struct seg6_action_param { int (*parse)(struct nlattr **attrs, struct seg6_local_lwt *slwt); int (*put)(struct sk_buff *skb, struct seg6_local_lwt *slwt); @@ -111,29 +286,29 @@ struct seg6_action_param { }; static struct seg6_action_param seg6_action_params[SEG6_LOCAL_MAX + 1] = { - [SEG6_LOCAL_SRH] = { .parse = NULL, - .put = NULL, - .cmp = NULL }, + [SEG6_LOCAL_SRH] = { .parse = parse_nla_srh, + .put = put_nla_srh, + .cmp = cmp_nla_srh }, - [SEG6_LOCAL_TABLE] = { .parse = NULL, - .put = NULL, - .cmp = NULL }, + [SEG6_LOCAL_TABLE] = { .parse = parse_nla_table, + .put = put_nla_table, + .cmp = cmp_nla_table }, - [SEG6_LOCAL_NH4] = { .parse = NULL, - .put = NULL, - .cmp = NULL }, + [SEG6_LOCAL_NH4] = { .parse = parse_nla_nh4, + .put = put_nla_nh4, + .cmp = cmp_nla_nh4 }, - [SEG6_LOCAL_NH6] = { .parse = NULL, - .put = NULL, - .cmp = NULL }, + [SEG6_LOCAL_NH6] = { .parse = parse_nla_nh6, + .put = put_nla_nh6, + .cmp = cmp_nla_nh6 }, - [SEG6_LOCAL_IIF] = { .parse = NULL, - .put = NULL, - .cmp = NULL }, + [SEG6_LOCAL_IIF] = { .parse = parse_nla_iif, + .put = put_nla_iif, + .cmp = cmp_nla_iif }, - [SEG6_LOCAL_OIF] = { .parse = NULL, - .put = NULL, - .cmp = NULL }, + [SEG6_LOCAL_OIF] = { .parse = parse_nla_oif, + .put = put_nla_oif, + .cmp = cmp_nla_oif }, }; static int parse_nla_action(struct nlattr **attrs, struct seg6_local_lwt *slwt) -- cgit v1.2.3-55-g7522 From 140f04c33bbcf89440dcaf8bef04918abff5a52d Mon Sep 17 00:00:00 2001 From: David Lebrun Date: Sat, 5 Aug 2017 12:39:48 +0200 Subject: ipv6: sr: implement several seg6local actions This patch implements the following seg6local actions. - SEG6_LOCAL_ACTION_END: regular SRH processing. The DA of the packet is updated to the next segment and forwarded accordingly. - SEG6_LOCAL_ACTION_END_X: same as above, except that the packet is forwarded to the specified IPv6 next-hop. - SEG6_LOCAL_ACTION_END_DX6: decapsulate the packet and forward to inner IPv6 packet to the specified IPv6 next-hop. - SEG6_LOCAL_ACTION_END_B6: insert the specified SRH directly after the IPv6 header of the packet. - SEG6_LOCAL_ACTION_END_B6_ENCAP: encapsulate the packet within an outer IPv6 header, containing the specified SRH. Signed-off-by: David Lebrun Signed-off-by: David S. Miller --- net/ipv6/seg6_local.c | 271 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 271 insertions(+) diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index ab1fc1b17ddf..147680e7a00c 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -58,11 +58,282 @@ static struct seg6_local_lwt *seg6_local_lwtunnel(struct lwtunnel_state *lwt) return (struct seg6_local_lwt *)lwt->data; } +static struct ipv6_sr_hdr *get_srh(struct sk_buff *skb) +{ + struct ipv6_sr_hdr *srh; + struct ipv6hdr *hdr; + int len; + + hdr = ipv6_hdr(skb); + if (hdr->nexthdr != IPPROTO_ROUTING) + return NULL; + + srh = (struct ipv6_sr_hdr *)(hdr + 1); + len = (srh->hdrlen + 1) << 3; + + if (!pskb_may_pull(skb, sizeof(*hdr) + len)) + return NULL; + + if (!seg6_validate_srh(srh, len)) + return NULL; + + return srh; +} + +static struct ipv6_sr_hdr *get_and_validate_srh(struct sk_buff *skb) +{ + struct ipv6_sr_hdr *srh; + + srh = get_srh(skb); + if (!srh) + return NULL; + + if (srh->segments_left == 0) + return NULL; + +#ifdef CONFIG_IPV6_SEG6_HMAC + if (!seg6_hmac_validate_skb(skb)) + return NULL; +#endif + + return srh; +} + +/* regular endpoint function */ +static int input_action_end(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + struct ipv6_sr_hdr *srh; + struct in6_addr *addr; + + srh = get_and_validate_srh(skb); + if (!srh) + goto drop; + + srh->segments_left--; + addr = srh->segments + srh->segments_left; + + ipv6_hdr(skb)->daddr = *addr; + + skb_dst_drop(skb); + ip6_route_input(skb); + + return dst_input(skb); + +drop: + kfree_skb(skb); + return -EINVAL; +} + +/* regular endpoint, and forward to specified nexthop */ +static int input_action_end_x(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + struct net *net = dev_net(skb->dev); + struct ipv6_sr_hdr *srh; + struct dst_entry *dst; + struct in6_addr *addr; + struct ipv6hdr *hdr; + struct flowi6 fl6; + int flags; + + srh = get_and_validate_srh(skb); + if (!srh) + goto drop; + + srh->segments_left--; + addr = srh->segments + srh->segments_left; + + hdr = ipv6_hdr(skb); + hdr->daddr = *addr; + + skb_dst_drop(skb); + + fl6.flowi6_iif = skb->dev->ifindex; + fl6.daddr = slwt->nh6; + fl6.saddr = hdr->saddr; + fl6.flowlabel = ip6_flowinfo(hdr); + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = hdr->nexthdr; + + flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE | + RT6_LOOKUP_F_REACHABLE; + + dst = ip6_route_input_lookup(net, skb->dev, &fl6, flags); + if (dst->dev->flags & IFF_LOOPBACK) + goto drop; + + skb_dst_set(skb, dst); + + return dst_input(skb); + +drop: + kfree_skb(skb); + return -EINVAL; +} + +/* decapsulate and forward to specified nexthop */ +static int input_action_end_dx6(struct sk_buff *skb, + struct seg6_local_lwt *slwt) +{ + struct net *net = dev_net(skb->dev); + struct ipv6hdr *inner_hdr; + struct ipv6_sr_hdr *srh; + struct dst_entry *dst; + unsigned int off = 0; + struct flowi6 fl6; + bool use_nh; + int flags; + + /* this function accepts IPv6 encapsulated packets, with either + * an SRH with SL=0, or no SRH. + */ + + srh = get_srh(skb); + if (srh && srh->segments_left > 0) + goto drop; + +#ifdef CONFIG_IPV6_SEG6_HMAC + if (srh && !seg6_hmac_validate_skb(skb)) + goto drop; +#endif + + if (ipv6_find_hdr(skb, &off, IPPROTO_IPV6, NULL, NULL) < 0) + goto drop; + + if (!pskb_pull(skb, off)) + goto drop; + + skb_postpull_rcsum(skb, skb_network_header(skb), off); + + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + skb->encapsulation = 0; + + inner_hdr = ipv6_hdr(skb); + + /* The inner packet is not associated to any local interface, + * so we do not call netif_rx(). + * + * If slwt->nh6 is set to ::, then lookup the nexthop for the + * inner packet's DA. Otherwise, use the specified nexthop. + */ + + use_nh = !ipv6_addr_any(&slwt->nh6); + + skb_dst_drop(skb); + + fl6.flowi6_iif = skb->dev->ifindex; + fl6.daddr = use_nh ? slwt->nh6 : inner_hdr->daddr; + fl6.saddr = inner_hdr->saddr; + fl6.flowlabel = ip6_flowinfo(inner_hdr); + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = inner_hdr->nexthdr; + + flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_REACHABLE; + if (use_nh) + flags |= RT6_LOOKUP_F_IFACE; + + dst = ip6_route_input_lookup(net, skb->dev, &fl6, flags); + if (dst->dev->flags & IFF_LOOPBACK) + goto drop; + + skb_dst_set(skb, dst); + + return dst_input(skb); +drop: + kfree_skb(skb); + return -EINVAL; +} + +/* push an SRH on top of the current one */ +static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt) +{ + struct ipv6_sr_hdr *srh; + int err = -EINVAL; + + srh = get_and_validate_srh(skb); + if (!srh) + goto drop; + + err = seg6_do_srh_inline(skb, slwt->srh); + if (err) + goto drop; + + ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + skb_dst_drop(skb); + ip6_route_input(skb); + + return dst_input(skb); + +drop: + kfree_skb(skb); + return err; +} + +/* encapsulate within an outer IPv6 header and a specified SRH */ +static int input_action_end_b6_encap(struct sk_buff *skb, + struct seg6_local_lwt *slwt) +{ + struct ipv6_sr_hdr *srh; + struct in6_addr *addr; + int err = -EINVAL; + + srh = get_and_validate_srh(skb); + if (!srh) + goto drop; + + srh->segments_left--; + addr = srh->segments + srh->segments_left; + ipv6_hdr(skb)->daddr = *addr; + + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + + err = seg6_do_srh_encap(skb, slwt->srh); + if (err) + goto drop; + + ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + skb_dst_drop(skb); + ip6_route_input(skb); + + return dst_input(skb); + +drop: + kfree_skb(skb); + return err; +} + static struct seg6_action_desc seg6_action_table[] = { { .action = SEG6_LOCAL_ACTION_END, .attrs = 0, + .input = input_action_end, + }, + { + .action = SEG6_LOCAL_ACTION_END_X, + .attrs = (1 << SEG6_LOCAL_NH6), + .input = input_action_end_x, }, + { + .action = SEG6_LOCAL_ACTION_END_DX6, + .attrs = (1 << SEG6_LOCAL_NH6), + .input = input_action_end_dx6, + }, + { + .action = SEG6_LOCAL_ACTION_END_B6, + .attrs = (1 << SEG6_LOCAL_SRH), + .input = input_action_end_b6, + }, + { + .action = SEG6_LOCAL_ACTION_END_B6_ENCAP, + .attrs = (1 << SEG6_LOCAL_SRH), + .input = input_action_end_b6_encap, + .static_headroom = sizeof(struct ipv6hdr), + } }; static struct seg6_action_desc *__get_action_desc(int action) -- cgit v1.2.3-55-g7522 From 451d3ca0a0192def659c00f024718d8b59a3ef47 Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sat, 5 Aug 2017 13:05:46 +0200 Subject: net: dsa: lan9303: Change lan9303_xxx_packet_processing() port param. lan9303_enable_packet_processing, lan9303_disable_packet_processing() Pass port number (0,1,2) as parameter instead of port offset. Because other functions in the module pass port numbers. And to enable simplifications in following patch. Introduce lan9303_write_switch_port(). Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 46 +++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 8e430d1ee297..77f3cee3af0d 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -159,9 +159,7 @@ # define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT1 (BIT(9) | BIT(8)) # define LAN9303_BM_EGRSS_PORT_TYPE_SPECIAL_TAG_PORT0 (BIT(1) | BIT(0)) -#define LAN9303_PORT_0_OFFSET 0x400 -#define LAN9303_PORT_1_OFFSET 0x800 -#define LAN9303_PORT_2_OFFSET 0xc00 +#define LAN9303_SWITCH_PORT_REG(port, reg0) (0x400 * (port) + (reg0)) /* the built-in PHYs are of type LAN911X */ #define MII_LAN911X_SPECIAL_MODES 0x12 @@ -428,6 +426,13 @@ on_error: return ret; } +static int lan9303_write_switch_port(struct lan9303 *chip, int port, + u16 regnum, u32 val) +{ + return lan9303_write_switch_reg( + chip, LAN9303_SWITCH_PORT_REG(port, regnum), val); +} + static int lan9303_detect_phy_setup(struct lan9303 *chip) { int reg; @@ -458,22 +463,19 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) return 0; } -#define LAN9303_MAC_RX_CFG_OFFS (LAN9303_MAC_RX_CFG_0 - LAN9303_PORT_0_OFFSET) -#define LAN9303_MAC_TX_CFG_OFFS (LAN9303_MAC_TX_CFG_0 - LAN9303_PORT_0_OFFSET) - static int lan9303_disable_packet_processing(struct lan9303 *chip, unsigned int port) { int ret; /* disable RX, but keep register reset default values else */ - ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, - LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES); + ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES); if (ret) return ret; /* disable TX, but keep register reset default values else */ - return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0, LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE); } @@ -484,14 +486,14 @@ static int lan9303_enable_packet_processing(struct lan9303 *chip, int ret; /* enable RX and keep register reset default values else */ - ret = lan9303_write_switch_reg(chip, LAN9303_MAC_RX_CFG_OFFS + port, - LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES | - LAN9303_MAC_RX_CFG_X_RX_ENABLE); + ret = lan9303_write_switch_port(chip, port, LAN9303_MAC_RX_CFG_0, + LAN9303_MAC_RX_CFG_X_REJECT_MAC_TYPES | + LAN9303_MAC_RX_CFG_X_RX_ENABLE); if (ret) return ret; /* enable TX and keep register reset default values else */ - return lan9303_write_switch_reg(chip, LAN9303_MAC_TX_CFG_OFFS + port, + return lan9303_write_switch_port(chip, port, LAN9303_MAC_TX_CFG_0, LAN9303_MAC_TX_CFG_X_TX_IFG_CONFIG_DEFAULT | LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE | LAN9303_MAC_TX_CFG_X_TX_ENABLE); @@ -558,13 +560,13 @@ static int lan9303_disable_processing(struct lan9303 *chip) { int ret; - ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_0_OFFSET); + ret = lan9303_disable_packet_processing(chip, 0); if (ret) return ret; - ret = lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); + ret = lan9303_disable_packet_processing(chip, 1); if (ret) return ret; - return lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); + return lan9303_disable_packet_processing(chip, 2); } static int lan9303_check_device(struct lan9303 *chip) @@ -634,7 +636,7 @@ static int lan9303_setup(struct dsa_switch *ds) if (ret) dev_err(chip->dev, "failed to separate ports %d\n", ret); - ret = lan9303_enable_packet_processing(chip, LAN9303_PORT_0_OFFSET); + ret = lan9303_enable_packet_processing(chip, 0); if (ret) dev_err(chip->dev, "failed to re-enable switching %d\n", ret); @@ -757,11 +759,9 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, /* enable internal packet processing */ switch (port) { case 1: - return lan9303_enable_packet_processing(chip, - LAN9303_PORT_1_OFFSET); + return lan9303_enable_packet_processing(chip, port); case 2: - return lan9303_enable_packet_processing(chip, - LAN9303_PORT_2_OFFSET); + return lan9303_enable_packet_processing(chip, port); default: dev_dbg(chip->dev, "Error: request to power up invalid port %d\n", port); @@ -778,12 +778,12 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, /* disable internal packet processing */ switch (port) { case 1: - lan9303_disable_packet_processing(chip, LAN9303_PORT_1_OFFSET); + lan9303_disable_packet_processing(chip, port); lan9303_phy_write(ds, chip->phy_addr_sel_strap + 1, MII_BMCR, BMCR_PDOWN); break; case 2: - lan9303_disable_packet_processing(chip, LAN9303_PORT_2_OFFSET); + lan9303_disable_packet_processing(chip, port); lan9303_phy_write(ds, chip->phy_addr_sel_strap + 2, MII_BMCR, BMCR_PDOWN); break; -- cgit v1.2.3-55-g7522 From a368ca537889d6baff24fd05a903c3625f2906cc Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sat, 5 Aug 2017 13:05:47 +0200 Subject: net: dsa: lan9303: define LAN9303_NUM_PORTS 3 Will be used instead of '3' in upcomming patches. Signed-off-by: Egil Hjelmeland Signed-off-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 77f3cee3af0d..49e3a0cb1b5b 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -20,6 +20,8 @@ #include "lan9303.h" +#define LAN9303_NUM_PORTS 3 + /* 13.2 System Control and Status Registers * Multiply register number by 4 to get address offset. */ -- cgit v1.2.3-55-g7522 From b3d14a2b2f988aad27152d7d3b5e4af2f4b69914 Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sat, 5 Aug 2017 13:05:48 +0200 Subject: net: dsa: lan9303: Simplify lan9303_xxx_packet_processing() usage Simplify usage of lan9303_enable_packet_processing, lan9303_disable_packet_processing() Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 49e3a0cb1b5b..daae267f6b05 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -560,15 +560,16 @@ static int lan9303_handle_reset(struct lan9303 *chip) /* stop processing packets for all ports */ static int lan9303_disable_processing(struct lan9303 *chip) { - int ret; + int p; - ret = lan9303_disable_packet_processing(chip, 0); - if (ret) - return ret; - ret = lan9303_disable_packet_processing(chip, 1); - if (ret) - return ret; - return lan9303_disable_packet_processing(chip, 2); + for (p = 0; p < LAN9303_NUM_PORTS; p++) { + int ret = lan9303_disable_packet_processing(chip, p); + + if (ret) + return ret; + } + + return 0; } static int lan9303_check_device(struct lan9303 *chip) @@ -761,7 +762,6 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, /* enable internal packet processing */ switch (port) { case 1: - return lan9303_enable_packet_processing(chip, port); case 2: return lan9303_enable_packet_processing(chip, port); default: @@ -780,13 +780,9 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, /* disable internal packet processing */ switch (port) { case 1: - lan9303_disable_packet_processing(chip, port); - lan9303_phy_write(ds, chip->phy_addr_sel_strap + 1, - MII_BMCR, BMCR_PDOWN); - break; case 2: lan9303_disable_packet_processing(chip, port); - lan9303_phy_write(ds, chip->phy_addr_sel_strap + 2, + lan9303_phy_write(ds, chip->phy_addr_sel_strap + port, MII_BMCR, BMCR_PDOWN); break; default: -- cgit v1.2.3-55-g7522 From 9c84258ed68a1dc3c58a16741747a19df9256c5e Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sat, 5 Aug 2017 13:05:49 +0200 Subject: net: dsa: lan9303: Rename lan9303_xxx_packet_processing() The lan9303_enable_packet_processing, lan9303_disable_packet_processing functions operate on port, so the names should reflect that. And to align with lan9303_disable_processing(), rename: lan9303_enable_packet_processing -> lan9303_enable_processing_port lan9303_disable_packet_processing -> lan9303_disable_processing_port Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index daae267f6b05..444958bb19f6 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -465,8 +465,8 @@ static int lan9303_detect_phy_setup(struct lan9303 *chip) return 0; } -static int lan9303_disable_packet_processing(struct lan9303 *chip, - unsigned int port) +static int lan9303_disable_processing_port(struct lan9303 *chip, + unsigned int port) { int ret; @@ -482,8 +482,8 @@ static int lan9303_disable_packet_processing(struct lan9303 *chip, LAN9303_MAC_TX_CFG_X_TX_PAD_ENABLE); } -static int lan9303_enable_packet_processing(struct lan9303 *chip, - unsigned int port) +static int lan9303_enable_processing_port(struct lan9303 *chip, + unsigned int port) { int ret; @@ -563,7 +563,7 @@ static int lan9303_disable_processing(struct lan9303 *chip) int p; for (p = 0; p < LAN9303_NUM_PORTS; p++) { - int ret = lan9303_disable_packet_processing(chip, p); + int ret = lan9303_disable_processing_port(chip, p); if (ret) return ret; @@ -639,7 +639,7 @@ static int lan9303_setup(struct dsa_switch *ds) if (ret) dev_err(chip->dev, "failed to separate ports %d\n", ret); - ret = lan9303_enable_packet_processing(chip, 0); + ret = lan9303_enable_processing_port(chip, 0); if (ret) dev_err(chip->dev, "failed to re-enable switching %d\n", ret); @@ -763,7 +763,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port, switch (port) { case 1: case 2: - return lan9303_enable_packet_processing(chip, port); + return lan9303_enable_processing_port(chip, port); default: dev_dbg(chip->dev, "Error: request to power up invalid port %d\n", port); @@ -781,7 +781,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, switch (port) { case 1: case 2: - lan9303_disable_packet_processing(chip, port); + lan9303_disable_processing_port(chip, port); lan9303_phy_write(ds, chip->phy_addr_sel_strap + port, MII_BMCR, BMCR_PDOWN); break; -- cgit v1.2.3-55-g7522 From 0a967b4a8e4d5e4e5e50e799bb9d8aa4218c6b2f Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Sat, 5 Aug 2017 13:05:50 +0200 Subject: net: dsa: lan9303: refactor lan9303_get_ethtool_stats In lan9303_get_ethtool_stats: Get rid of 0x400 constant magic by using new lan9303_read_switch_reg() inside loop. Reduced scope of two variables. Signed-off-by: Egil Hjelmeland Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 444958bb19f6..15befd155251 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -435,6 +435,13 @@ static int lan9303_write_switch_port(struct lan9303 *chip, int port, chip, LAN9303_SWITCH_PORT_REG(port, regnum), val); } +static int lan9303_read_switch_port(struct lan9303 *chip, int port, + u16 regnum, u32 *val) +{ + return lan9303_read_switch_reg( + chip, LAN9303_SWITCH_PORT_REG(port, regnum), val); +} + static int lan9303_detect_phy_setup(struct lan9303 *chip) { int reg; @@ -705,19 +712,18 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { struct lan9303 *chip = ds->priv; - u32 reg; - unsigned int u, poff; - int ret; - - poff = port * 0x400; + unsigned int u; for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) { - ret = lan9303_read_switch_reg(chip, - lan9303_mib[u].offset + poff, - ®); + u32 reg; + int ret; + + ret = lan9303_read_switch_port( + chip, port, lan9303_mib[u].offset, ®); + if (ret) - dev_warn(chip->dev, "Reading status reg %u failed\n", - lan9303_mib[u].offset + poff); + dev_warn(chip->dev, "Reading status port %d reg %u failed\n", + port, lan9303_mib[u].offset); data[u] = reg; } } -- cgit v1.2.3-55-g7522 From e00e21979dc8a821840f23a8f1d62bd823755192 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 5 Aug 2017 14:46:35 +0100 Subject: net: hns3: fix spelling mistake: "capabilty" -> "capability" Trivial fix to spelling mistake in dev_err error message and also split overly long line to avoid a checkpatch warning. Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 7440e85b607c..6fb7648bb2f2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -4089,7 +4089,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_get_cap(hdev); if (ret) { - dev_err(&pdev->dev, "get hw capabilty error, ret = %d.\n", ret); + dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", + ret); return ret; } -- cgit v1.2.3-55-g7522 From 206e41fe7f441542e8b92b314cfcdab21c709233 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sat, 5 Aug 2017 16:20:17 -0400 Subject: net: dsa: remove useless argument in legacy setup dsa_switch_alloc() already assigns ds-dev, which can be used in dsa_switch_setup_one and dsa_cpu_dsa_setups instead of requiring an additional struct device argument. Signed-off-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/legacy.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index 1d7a3282f2a7..fa162030a69c 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c @@ -78,7 +78,7 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, } /* basic switch operations **************************************************/ -static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev) +static int dsa_cpu_dsa_setups(struct dsa_switch *ds) { struct dsa_port *dport; int ret, port; @@ -88,15 +88,15 @@ static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev) continue; dport = &ds->ports[port]; - ret = dsa_cpu_dsa_setup(ds, dev, dport, port); + ret = dsa_cpu_dsa_setup(ds, ds->dev, dport, port); if (ret) return ret; } return 0; } -static int dsa_switch_setup_one(struct dsa_switch *ds, struct net_device *master, - struct device *parent) +static int dsa_switch_setup_one(struct dsa_switch *ds, + struct net_device *master) { const struct dsa_switch_ops *ops = ds->ops; struct dsa_switch_tree *dst = ds->dst; @@ -176,7 +176,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct net_device *master } if (!ds->slave_mii_bus && ops->phy_read) { - ds->slave_mii_bus = devm_mdiobus_alloc(parent); + ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev); if (!ds->slave_mii_bus) return -ENOMEM; dsa_slave_mii_bus_init(ds); @@ -196,14 +196,14 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct net_device *master if (!(ds->enabled_port_mask & (1 << i))) continue; - ret = dsa_slave_create(ds, parent, i, cd->port_names[i]); + ret = dsa_slave_create(ds, ds->dev, i, cd->port_names[i]); if (ret < 0) netdev_err(master, "[%d]: can't create dsa slave device for port %d(%s): %d\n", index, i, cd->port_names[i], ret); } /* Perform configuration of the CPU and DSA ports */ - ret = dsa_cpu_dsa_setups(ds, parent); + ret = dsa_cpu_dsa_setups(ds); if (ret < 0) netdev_err(master, "[%d] : can't configure CPU and DSA ports\n", index); @@ -252,7 +252,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, struct net_device *master, ds->ops = ops; ds->priv = priv; - ret = dsa_switch_setup_one(ds, master, parent); + ret = dsa_switch_setup_one(ds, master); if (ret) return ERR_PTR(ret); -- cgit v1.2.3-55-g7522 From 47d0dcc35412feb354aace4c5dcc07c03cb4ccb0 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sat, 5 Aug 2017 16:20:18 -0400 Subject: net: dsa: remove useless args of dsa_cpu_dsa_setup dsa_cpu_dsa_setup currently takes 4 arguments but they are all available from the dsa_port argument. Remove all others. Signed-off-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 10 +++++----- net/dsa/dsa2.c | 4 ++-- net/dsa/dsa_priv.h | 3 +-- net/dsa/legacy.c | 4 +--- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index a91e520e735f..4118848b8e58 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -67,17 +67,17 @@ const struct dsa_device_ops *dsa_device_ops[DSA_TAG_LAST] = { [DSA_TAG_PROTO_NONE] = &none_ops, }; -int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, - struct dsa_port *dport, int port) +int dsa_cpu_dsa_setup(struct dsa_port *port) { - struct device_node *port_dn = dport->dn; + struct device_node *port_dn = port->dn; + struct dsa_switch *ds = port->ds; struct phy_device *phydev; int ret, mode; if (of_phy_is_fixed_link(port_dn)) { ret = of_phy_register_fixed_link(port_dn); if (ret) { - dev_err(dev, "failed to register fixed PHY\n"); + dev_err(ds->dev, "failed to register fixed PHY\n"); return ret; } phydev = of_phy_find_device(port_dn); @@ -90,7 +90,7 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, genphy_config_init(phydev); genphy_read_status(phydev); if (ds->ops->adjust_link) - ds->ops->adjust_link(ds, port, phydev); + ds->ops->adjust_link(ds, port->index, phydev); put_device(&phydev->mdio.dev); } diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index c442051d5a55..2a0120493cf1 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -219,7 +219,7 @@ static int dsa_dsa_port_apply(struct dsa_port *port) struct dsa_switch *ds = port->ds; int err; - err = dsa_cpu_dsa_setup(ds, ds->dev, port, port->index); + err = dsa_cpu_dsa_setup(port); if (err) { dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n", port->index, err); @@ -243,7 +243,7 @@ static int dsa_cpu_port_apply(struct dsa_port *port) struct dsa_switch *ds = port->ds; int err; - err = dsa_cpu_dsa_setup(ds, ds->dev, port, port->index); + err = dsa_cpu_dsa_setup(port); if (err) { dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n", port->index, err); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 306cff229def..2873d912b12e 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -101,8 +101,7 @@ struct dsa_slave_priv { }; /* dsa.c */ -int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev, - struct dsa_port *dport, int port); +int dsa_cpu_dsa_setup(struct dsa_port *port); void dsa_cpu_dsa_destroy(struct dsa_port *dport); const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol); int dsa_cpu_port_ethtool_setup(struct dsa_port *cpu_dp); diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index fa162030a69c..903e8d70c792 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c @@ -80,15 +80,13 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr, /* basic switch operations **************************************************/ static int dsa_cpu_dsa_setups(struct dsa_switch *ds) { - struct dsa_port *dport; int ret, port; for (port = 0; port < ds->num_ports; port++) { if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port))) continue; - dport = &ds->ports[port]; - ret = dsa_cpu_dsa_setup(ds, ds->dev, dport, port); + ret = dsa_cpu_dsa_setup(&ds->ports[port]); if (ret) return ret; } -- cgit v1.2.3-55-g7522 From 4cfbf09cf97f265c82f48ca2bd7ea704b6a21a90 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sat, 5 Aug 2017 16:20:19 -0400 Subject: net: dsa: remove useless args of dsa_slave_create dsa_slave_create currently takes 4 arguments while it only needs the related dsa_port and its name. Remove all other arguments. Signed-off-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa2.c | 2 +- net/dsa/dsa_priv.h | 3 +-- net/dsa/legacy.c | 2 +- net/dsa/slave.c | 14 +++++++------- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 2a0120493cf1..cceaa4dd9f53 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -275,7 +275,7 @@ static int dsa_user_port_apply(struct dsa_port *port) if (!name) name = "eth%d"; - err = dsa_slave_create(ds, ds->dev, port->index, name); + err = dsa_slave_create(port, name); if (err) { dev_warn(ds->dev, "Failed to create slave %d: %d\n", port->index, err); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 2873d912b12e..945ded148c9c 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -148,8 +148,7 @@ int dsa_port_vlan_dump(struct dsa_port *dp, extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops); -int dsa_slave_create(struct dsa_switch *ds, struct device *parent, - int port, const char *name); +int dsa_slave_create(struct dsa_port *port, const char *name); void dsa_slave_destroy(struct net_device *slave_dev); int dsa_slave_suspend(struct net_device *slave_dev); int dsa_slave_resume(struct net_device *slave_dev); diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index 903e8d70c792..612acf16d573 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c @@ -194,7 +194,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, if (!(ds->enabled_port_mask & (1 << i))) continue; - ret = dsa_slave_create(ds, ds->dev, i, cd->port_names[i]); + ret = dsa_slave_create(&ds->ports[i], cd->port_names[i]); if (ret < 0) netdev_err(master, "[%d]: can't create dsa slave device for port %d(%s): %d\n", index, i, cd->port_names[i], ret); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index c6b5de2fe413..3b36c47472c6 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1195,9 +1195,9 @@ int dsa_slave_resume(struct net_device *slave_dev) return 0; } -int dsa_slave_create(struct dsa_switch *ds, struct device *parent, - int port, const char *name) +int dsa_slave_create(struct dsa_port *port, const char *name) { + struct dsa_switch *ds = port->ds; struct dsa_switch_tree *dst = ds->dst; struct net_device *master; struct net_device *slave_dev; @@ -1227,8 +1227,8 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one, NULL); - SET_NETDEV_DEV(slave_dev, parent); - slave_dev->dev.of_node = ds->ports[port].dn; + SET_NETDEV_DEV(slave_dev, port->ds->dev); + slave_dev->dev.of_node = port->dn; slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); @@ -1237,7 +1237,7 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, free_netdev(slave_dev); return -ENOMEM; } - p->dp = &ds->ports[port]; + p->dp = port; INIT_LIST_HEAD(&p->mall_tc_list); p->xmit = dst->tag_ops->xmit; @@ -1245,12 +1245,12 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent, p->old_link = -1; p->old_duplex = -1; - ds->ports[port].netdev = slave_dev; + port->netdev = slave_dev; ret = register_netdev(slave_dev); if (ret) { netdev_err(master, "error %d registering interface %s\n", ret, slave_dev->name); - ds->ports[port].netdev = NULL; + port->netdev = NULL; free_percpu(p->stats64); free_netdev(slave_dev); return ret; -- cgit v1.2.3-55-g7522 From 13ead5c4f2e3b649156c74892f3cf4b62c8e3d0c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 6 Aug 2017 10:19:07 +0200 Subject: xfrm: check that cached bundle is still valid Quoting Ilan Tayari: 1. Set up a host-to-host IPSec tunnel (or transport, doesn't matter) 2. Ping over IPSec, or do something to populate the pcpu cache 3. Join a MC group, then leave MC group 4. Try to ping again using same CPU as before -> traffic doesn't egress the machine at all Ilan debugged the problem down to the fact that one of the path dsts devices point to lo due to earlier dst_dev_put(). In this case, dst is marked as DEAD and we cannot reuse the bundle. The cache only asserted that the requested policy and that of the cached bundle match, but its not enough - also verify the path is still valid. Fixes: ec30d78c14a813 ("xfrm: add xdst pcpu cache") Reported-by: Ayham Masood Tested-by: Ilan Tayari Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/xfrm/xfrm_policy.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 06c3bf7ab86b..8da428f56aec 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1818,7 +1818,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, xdst->num_pols == num_pols && !xfrm_pol_dead(xdst) && memcmp(xdst->pols, pols, - sizeof(struct xfrm_policy *) * num_pols) == 0) { + sizeof(struct xfrm_policy *) * num_pols) == 0 && + xfrm_bundle_ok(xdst)) { dst_hold(&xdst->u.dst); return xdst; } -- cgit v1.2.3-55-g7522 From 511aeaf46655d595f50e5e9f113836f19a78116b Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sun, 6 Aug 2017 14:21:45 +0530 Subject: hamradio: baycom: make hdlcdrv_ops const Make hdlcdrv_ops structures const as they are only passed to hdlcdrv_register function. The corresponding argument is of type const, so make the structures const. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/net/hamradio/baycom_par.c | 2 +- drivers/net/hamradio/baycom_ser_fdx.c | 2 +- drivers/net/hamradio/baycom_ser_hdx.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 92b13b39f426..e1783832d304 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c @@ -386,7 +386,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops par96_ops = { +static const struct hdlcdrv_ops par96_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = par96_open, diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index d9a646acca20..190f66c88479 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -508,7 +508,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops ser12_ops = { +static const struct hdlcdrv_ops ser12_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = ser12_open, diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index f1c8a9ff3891..3c823c648cf5 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c @@ -542,7 +542,7 @@ static int baycom_ioctl(struct net_device *dev, struct ifreq *ifr, /* --------------------------------------------------------------------- */ -static struct hdlcdrv_ops ser12_ops = { +static const struct hdlcdrv_ops ser12_ops = { .drvname = bc_drvname, .drvinfo = bc_drvinfo, .open = ser12_open, -- cgit v1.2.3-55-g7522 From 6c2c1dcb185f1e44e1c895781dbaba40195234f9 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:39 +0300 Subject: net: dsa: Change DSA slave FDB API to be switchdev independent In order to support FDB add/del to be on a notifier chain the slave API need to be changed to be switchdev independent. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 12 +++++------- drivers/net/dsa/b53/b53_priv.h | 8 +++----- drivers/net/dsa/microchip/ksz_common.c | 34 ++++++++++++++++------------------ drivers/net/dsa/mt7530.c | 14 ++++++-------- drivers/net/dsa/mv88e6xxx/chip.c | 12 +++++------- drivers/net/dsa/qca8k.c | 15 ++++++--------- include/net/dsa.h | 8 +++----- net/dsa/switch.c | 8 +++++--- 8 files changed, 49 insertions(+), 62 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 7f36d3e3c98b..53361796607a 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1214,8 +1214,7 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, } int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; @@ -1230,22 +1229,21 @@ int b53_fdb_prepare(struct dsa_switch *ds, int port, EXPORT_SYMBOL(b53_fdb_prepare); void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; - if (b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, true)) + if (b53_arl_op(priv, 0, port, addr, vid, true)) pr_err("%s: failed to add MAC address\n", __func__); } EXPORT_SYMBOL(b53_fdb_add); int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; - return b53_arl_op(priv, 0, port, fdb->addr, fdb->vid, false); + return b53_arl_op(priv, 0, port, addr, vid, false); } EXPORT_SYMBOL(b53_fdb_del); diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index 155a9c48c317..d417bcaec71d 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -397,13 +397,11 @@ int b53_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, switchdev_obj_dump_cb_t *cb); int b53_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); + const unsigned char *addr, u16 vid); void b53_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); + const unsigned char *addr, u16 vid); int b53_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb); + const unsigned char *addr, u16 vid); int b53_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index b313ecdf2919..db828080ee93 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -679,8 +679,7 @@ static int ksz_port_vlan_dump(struct dsa_switch *ds, int port, } static int ksz_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { /* nothing needed */ @@ -707,8 +706,7 @@ struct alu_struct { }; static void ksz_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; @@ -717,12 +715,12 @@ static void ksz_port_fdb_add(struct dsa_switch *ds, int port, mutex_lock(&dev->alu_mutex); /* find any entry with mac & vid */ - data = fdb->vid << ALU_FID_INDEX_S; - data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); ksz_write32(dev, REG_SW_ALU_INDEX_1, data); /* start read operation */ @@ -740,12 +738,12 @@ static void ksz_port_fdb_add(struct dsa_switch *ds, int port, /* update ALU entry */ alu_table[0] = ALU_V_STATIC_VALID; alu_table[1] |= BIT(port); - if (fdb->vid) + if (vid) alu_table[1] |= ALU_V_USE_FID; - alu_table[2] = (fdb->vid << ALU_V_FID_S); - alu_table[2] |= ((fdb->addr[0] << 8) | fdb->addr[1]); - alu_table[3] = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - alu_table[3] |= ((fdb->addr[4] << 8) | fdb->addr[5]); + alu_table[2] = (vid << ALU_V_FID_S); + alu_table[2] |= ((addr[0] << 8) | addr[1]); + alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); + alu_table[3] |= ((addr[4] << 8) | addr[5]); write_table(ds, alu_table); @@ -760,7 +758,7 @@ exit: } static int ksz_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; @@ -770,12 +768,12 @@ static int ksz_port_fdb_del(struct dsa_switch *ds, int port, mutex_lock(&dev->alu_mutex); /* read any entry with mac & vid */ - data = fdb->vid << ALU_FID_INDEX_S; - data |= ((fdb->addr[0] << 8) | fdb->addr[1]); + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - data = ((fdb->addr[2] << 24) | (fdb->addr[3] << 16)); - data |= ((fdb->addr[4] << 8) | fdb->addr[5]); + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); ksz_write32(dev, REG_SW_ALU_INDEX_1, data); /* start read operation */ diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 1e46418a3b74..430e3ab65a49 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -802,8 +802,7 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, static int mt7530_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; int ret; @@ -813,7 +812,7 @@ mt7530_port_fdb_prepare(struct dsa_switch *ds, int port, * is called while the entry is still available. */ mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, 0, fdb->addr, -1, STATIC_ENT); + mt7530_fdb_write(priv, vid, 0, addr, -1, STATIC_ENT); ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); @@ -822,28 +821,27 @@ mt7530_port_fdb_prepare(struct dsa_switch *ds, int port, static void mt7530_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; u8 port_mask = BIT(port); mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_ENT); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); } static int mt7530_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; int ret; u8 port_mask = BIT(port); mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, fdb->vid, port_mask, fdb->addr, -1, STATIC_EMP); + mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_EMP); ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 521738c4cd17..823697526db4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1407,8 +1407,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, } static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. @@ -1417,13 +1416,12 @@ static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, } static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct mv88e6xxx_chip *chip = ds->priv; mutex_lock(&chip->reg_lock); - if (mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid, + if (mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)) dev_err(ds->dev, "p%d: failed to load unicast MAC address\n", port); @@ -1431,13 +1429,13 @@ static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, } static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct mv88e6xxx_chip *chip = ds->priv; int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_load_purge(chip, port, fdb->addr, fdb->vid, + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UNUSED); mutex_unlock(&chip->reg_lock); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 36c169b0c705..2fb5df9dbd64 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -778,8 +778,7 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, static int qca8k_port_fdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; @@ -788,33 +787,31 @@ qca8k_port_fdb_prepare(struct dsa_switch *ds, int port, * when port_fdb_add is called an entry is still available. Otherwise * the last free entry might have been used up by auto learning */ - return qca8k_port_fdb_insert(priv, fdb->addr, 0, fdb->vid); + return qca8k_port_fdb_insert(priv, addr, 0, vid); } static void qca8k_port_fdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); /* Update the FDB entry adding the port_mask */ - qca8k_port_fdb_insert(priv, fdb->addr, port_mask, fdb->vid); + qca8k_port_fdb_insert(priv, addr, port_mask, vid); } static int qca8k_port_fdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb) + const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); - u16 vid = fdb->vid; if (!vid) vid = 1; - return qca8k_fdb_del(priv, fdb->addr, port_mask, vid); + return qca8k_fdb_del(priv, addr, port_mask, vid); } static int diff --git a/include/net/dsa.h b/include/net/dsa.h index 0b1a0622b33c..ba11005dac8c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -386,13 +386,11 @@ struct dsa_switch_ops { * Forwarding database */ int (*port_fdb_prepare)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); + const unsigned char *addr, u16 vid); void (*port_fdb_add)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); + const unsigned char *addr, u16 vid); int (*port_fdb_del)(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_fdb *fdb); + const unsigned char *addr, u16 vid); int (*port_fdb_dump)(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb); diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 97e2e9c8cf3f..a9edfbad3889 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -94,10 +94,11 @@ static int dsa_switch_fdb_add(struct dsa_switch *ds, if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add) return -EOPNOTSUPP; - return ds->ops->port_fdb_prepare(ds, info->port, fdb, trans); + return ds->ops->port_fdb_prepare(ds, info->port, fdb->addr, + fdb->vid); } - ds->ops->port_fdb_add(ds, info->port, fdb, trans); + ds->ops->port_fdb_add(ds, info->port, fdb->addr, fdb->vid); return 0; } @@ -114,7 +115,8 @@ static int dsa_switch_fdb_del(struct dsa_switch *ds, if (!ds->ops->port_fdb_del) return -EOPNOTSUPP; - return ds->ops->port_fdb_del(ds, info->port, fdb); + return ds->ops->port_fdb_del(ds, info->port, fdb->addr, + fdb->vid); } static int dsa_switch_mdb_add(struct dsa_switch *ds, -- cgit v1.2.3-55-g7522 From 1b6dd556c3045ca5fa31cc1e98a4a43afa680e1e Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:40 +0300 Subject: net: dsa: Remove prepare phase for FDB The prepare phase for FDB add is unneeded because most of DSA devices can have failures during bus transactions (SPI, I2C, etc.), thus, the prepare phase cannot guarantee success of the commit stage. The support for learning FDB through notification chain, which will be introduced in the following patches, will provide the ability to notify back the bridge about successful offload. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 17 +++-------------- drivers/net/dsa/b53/b53_priv.h | 6 ++---- drivers/net/dsa/bcm_sf2.c | 1 - drivers/net/dsa/microchip/ksz_common.c | 24 ++++++++++-------------- drivers/net/dsa/mt7530.c | 25 ++++--------------------- drivers/net/dsa/mv88e6xxx/chip.c | 23 +++++++---------------- drivers/net/dsa/qca8k.c | 18 +----------------- include/net/dsa.h | 4 +--- net/dsa/dsa_priv.h | 4 +--- net/dsa/port.c | 4 +--- net/dsa/slave.c | 4 +++- net/dsa/switch.c | 14 +++----------- 12 files changed, 36 insertions(+), 108 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 53361796607a..3cf4f0a04453 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1213,8 +1213,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port, return b53_arl_rw_op(dev, 0); } -int b53_fdb_prepare(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) +int b53_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct b53_device *priv = ds->priv; @@ -1224,17 +1224,7 @@ int b53_fdb_prepare(struct dsa_switch *ds, int port, if (is5325(priv) || is5365(priv)) return -EOPNOTSUPP; - return 0; -} -EXPORT_SYMBOL(b53_fdb_prepare); - -void b53_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - struct b53_device *priv = ds->priv; - - if (b53_arl_op(priv, 0, port, addr, vid, true)) - pr_err("%s: failed to add MAC address\n", __func__); + return b53_arl_op(priv, 0, port, addr, vid, true); } EXPORT_SYMBOL(b53_fdb_add); @@ -1563,7 +1553,6 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, .port_vlan_dump = b53_vlan_dump, - .port_fdb_prepare = b53_fdb_prepare, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index d417bcaec71d..f29c892efa6b 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -396,10 +396,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, int b53_vlan_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_vlan *vlan, switchdev_obj_dump_cb_t *cb); -int b53_fdb_prepare(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); -void b53_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); +int b53_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid); int b53_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int b53_fdb_dump(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 558667c814c9..1907b27297c3 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1022,7 +1022,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, .port_vlan_dump = b53_vlan_dump, - .port_fdb_prepare = b53_fdb_prepare, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index db828080ee93..b55f3649ff93 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -678,14 +678,6 @@ static int ksz_port_vlan_dump(struct dsa_switch *ds, int port, return err; } -static int ksz_port_fdb_prepare(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - /* nothing needed */ - - return 0; -} - struct alu_struct { /* entry 1 */ u8 is_static:1; @@ -705,12 +697,13 @@ struct alu_struct { u8 mac[ETH_ALEN]; }; -static void ksz_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) +static int ksz_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct ksz_device *dev = ds->priv; u32 alu_table[4]; u32 data; + int ret = 0; mutex_lock(&dev->alu_mutex); @@ -727,7 +720,8 @@ static void ksz_port_fdb_add(struct dsa_switch *ds, int port, ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); /* wait to be finished */ - if (wait_alu_ready(dev, ALU_START, 1000) < 0) { + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) { dev_dbg(dev->dev, "Failed to read ALU\n"); goto exit; } @@ -750,11 +744,14 @@ static void ksz_port_fdb_add(struct dsa_switch *ds, int port, ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); /* wait to be finished */ - if (wait_alu_ready(dev, ALU_START, 1000) < 0) - dev_dbg(dev->dev, "Failed to read ALU\n"); + ret = wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to write ALU\n"); exit: mutex_unlock(&dev->alu_mutex); + + return ret; } static int ksz_port_fdb_del(struct dsa_switch *ds, int port, @@ -1128,7 +1125,6 @@ static const struct dsa_switch_ops ksz_switch_ops = { .port_vlan_add = ksz_port_vlan_add, .port_vlan_del = ksz_port_vlan_del, .port_vlan_dump = ksz_port_vlan_dump, - .port_fdb_prepare = ksz_port_fdb_prepare, .port_fdb_dump = ksz_port_fdb_dump, .port_fdb_add = ksz_port_fdb_add, .port_fdb_del = ksz_port_fdb_del, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 430e3ab65a49..f92aae8947e6 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -801,35 +801,19 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port, } static int -mt7530_port_fdb_prepare(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - struct mt7530_priv *priv = ds->priv; - int ret; - - /* Because auto-learned entrie shares the same FDB table. - * an entry is reserved with no port_mask to make sure fdb_add - * is called while the entry is still available. - */ - mutex_lock(&priv->reg_mutex); - mt7530_fdb_write(priv, vid, 0, addr, -1, STATIC_ENT); - ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); - mutex_unlock(&priv->reg_mutex); - - return ret; -} - -static void mt7530_port_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { struct mt7530_priv *priv = ds->priv; + int ret; u8 port_mask = BIT(port); mutex_lock(&priv->reg_mutex); mt7530_fdb_write(priv, vid, port_mask, addr, -1, STATIC_ENT); - mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); + ret = mt7530_fdb_cmd(priv, MT7530_FDB_WRITE, 0); mutex_unlock(&priv->reg_mutex); + + return ret; } static int @@ -1013,7 +997,6 @@ static struct dsa_switch_ops mt7530_switch_ops = { .port_stp_state_set = mt7530_stp_state_set, .port_bridge_join = mt7530_port_bridge_join, .port_bridge_leave = mt7530_port_bridge_leave, - .port_fdb_prepare = mt7530_port_fdb_prepare, .port_fdb_add = mt7530_port_fdb_add, .port_fdb_del = mt7530_port_fdb_del, .port_fdb_dump = mt7530_port_fdb_dump, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 823697526db4..68cc1f6fb75e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1406,26 +1406,18 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, return mv88e6xxx_g1_atu_loadpurge(chip, vlan.fid, &entry); } -static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - /* We don't need any dynamic resource from the kernel (yet), - * so skip the prepare phase. - */ - return 0; -} - -static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) +static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) { struct mv88e6xxx_chip *chip = ds->priv; + int err; mutex_lock(&chip->reg_lock); - if (mv88e6xxx_port_db_load_purge(chip, port, addr, vid, - MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC)) - dev_err(ds->dev, "p%d: failed to load unicast MAC address\n", - port); + err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, + MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); mutex_unlock(&chip->reg_lock); + + return err; } static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, @@ -3905,7 +3897,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, .port_vlan_dump = mv88e6xxx_port_vlan_dump, - .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, .port_fdb_dump = mv88e6xxx_port_fdb_dump, diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 2fb5df9dbd64..f8ef823349bc 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -777,28 +777,13 @@ qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr, } static int -qca8k_port_fdb_prepare(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; - - /* The FDB table for static and auto learned entries is the same. We - * need to reserve an entry with no port_mask set to make sure that - * when port_fdb_add is called an entry is still available. Otherwise - * the last free entry might have been used up by auto learning - */ - return qca8k_port_fdb_insert(priv, addr, 0, vid); -} - -static void qca8k_port_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; u16 port_mask = BIT(port); - /* Update the FDB entry adding the port_mask */ - qca8k_port_fdb_insert(priv, addr, port_mask, vid); + return qca8k_port_fdb_insert(priv, addr, port_mask, vid); } static int @@ -866,7 +851,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = { .port_stp_state_set = qca8k_port_stp_state_set, .port_bridge_join = qca8k_port_bridge_join, .port_bridge_leave = qca8k_port_bridge_leave, - .port_fdb_prepare = qca8k_port_fdb_prepare, .port_fdb_add = qca8k_port_fdb_add, .port_fdb_del = qca8k_port_fdb_del, .port_fdb_dump = qca8k_port_fdb_dump, diff --git a/include/net/dsa.h b/include/net/dsa.h index ba11005dac8c..446fc438cb80 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -385,9 +385,7 @@ struct dsa_switch_ops { /* * Forwarding database */ - int (*port_fdb_prepare)(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid); - void (*port_fdb_add)(struct dsa_switch *ds, int port, + int (*port_fdb_add)(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int (*port_fdb_del)(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 945ded148c9c..04cd711ed2fd 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -44,7 +44,6 @@ struct dsa_notifier_bridge_info { /* DSA_NOTIFIER_FDB_* */ struct dsa_notifier_fdb_info { const struct switchdev_obj_port_fdb *fdb; - struct switchdev_trans *trans; int sw_index; int port; }; @@ -122,8 +121,7 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock, struct switchdev_trans *trans); int dsa_port_fdb_add(struct dsa_port *dp, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans); + const struct switchdev_obj_port_fdb *fdb); int dsa_port_fdb_del(struct dsa_port *dp, const struct switchdev_obj_port_fdb *fdb); int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb, diff --git a/net/dsa/port.c b/net/dsa/port.c index efc3bce3a89d..bd271b9cc1f2 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -147,13 +147,11 @@ int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock, } int dsa_port_fdb_add(struct dsa_port *dp, - const struct switchdev_obj_port_fdb *fdb, - struct switchdev_trans *trans) + const struct switchdev_obj_port_fdb *fdb) { struct dsa_notifier_fdb_info info = { .sw_index = dp->ds->index, .port = dp->index, - .trans = trans, .fdb = fdb, }; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 3b36c47472c6..bb7ab26ef768 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -251,7 +251,9 @@ static int dsa_slave_port_obj_add(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_FDB: - err = dsa_port_fdb_add(dp, SWITCHDEV_OBJ_PORT_FDB(obj), trans); + if (switchdev_trans_ph_prepare(trans)) + return 0; + err = dsa_port_fdb_add(dp, SWITCHDEV_OBJ_PORT_FDB(obj)); break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans); diff --git a/net/dsa/switch.c b/net/dsa/switch.c index a9edfbad3889..eb20e0fee0e1 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -84,23 +84,15 @@ static int dsa_switch_fdb_add(struct dsa_switch *ds, struct dsa_notifier_fdb_info *info) { const struct switchdev_obj_port_fdb *fdb = info->fdb; - struct switchdev_trans *trans = info->trans; /* Do not care yet about other switch chips of the fabric */ if (ds->index != info->sw_index) return 0; - if (switchdev_trans_ph_prepare(trans)) { - if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add) - return -EOPNOTSUPP; - - return ds->ops->port_fdb_prepare(ds, info->port, fdb->addr, - fdb->vid); - } - - ds->ops->port_fdb_add(ds, info->port, fdb->addr, fdb->vid); + if (!ds->ops->port_fdb_add) + return -EOPNOTSUPP; - return 0; + return ds->ops->port_fdb_add(ds, info->port, fdb->addr, fdb->vid); } static int dsa_switch_fdb_del(struct dsa_switch *ds, -- cgit v1.2.3-55-g7522 From 2acf4e6a890b0228ed19b228063d69666f61ee19 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:41 +0300 Subject: net: dsa: Remove switchdev dependency from DSA switch notifier chain Currently, the switchdev objects are embedded inside the DSA notifier info. This patch removes this dependency. This is done as a preparation stage before adding support for learning FDB through the switchdev notification chain. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Florian Fainelli Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- net/dsa/dsa_priv.h | 11 ++++++----- net/dsa/port.c | 15 +++++++++------ net/dsa/slave.c | 6 ++++-- net/dsa/switch.c | 11 ++++------- 4 files changed, 23 insertions(+), 20 deletions(-) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 04cd711ed2fd..c0ee6a7694f8 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -43,9 +43,10 @@ struct dsa_notifier_bridge_info { /* DSA_NOTIFIER_FDB_* */ struct dsa_notifier_fdb_info { - const struct switchdev_obj_port_fdb *fdb; int sw_index; int port; + const unsigned char *addr; + u16 vid; }; /* DSA_NOTIFIER_MDB_* */ @@ -120,10 +121,10 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, struct switchdev_trans *trans); int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock, struct switchdev_trans *trans); -int dsa_port_fdb_add(struct dsa_port *dp, - const struct switchdev_obj_port_fdb *fdb); -int dsa_port_fdb_del(struct dsa_port *dp, - const struct switchdev_obj_port_fdb *fdb); +int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, + u16 vid); +int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, + u16 vid); int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb, switchdev_obj_dump_cb_t *cb); int dsa_port_mdb_add(struct dsa_port *dp, diff --git a/net/dsa/port.c b/net/dsa/port.c index bd271b9cc1f2..86e0585215bf 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -146,25 +146,28 @@ int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock, return dsa_port_notify(dp, DSA_NOTIFIER_AGEING_TIME, &info); } -int dsa_port_fdb_add(struct dsa_port *dp, - const struct switchdev_obj_port_fdb *fdb) +int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, + u16 vid) { struct dsa_notifier_fdb_info info = { .sw_index = dp->ds->index, .port = dp->index, - .fdb = fdb, + .addr = addr, + .vid = vid, }; return dsa_port_notify(dp, DSA_NOTIFIER_FDB_ADD, &info); } -int dsa_port_fdb_del(struct dsa_port *dp, - const struct switchdev_obj_port_fdb *fdb) +int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, + u16 vid) { struct dsa_notifier_fdb_info info = { .sw_index = dp->ds->index, .port = dp->index, - .fdb = fdb, + .addr = addr, + .vid = vid, + }; return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index bb7ab26ef768..6a1d4d6d212b 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -253,7 +253,8 @@ static int dsa_slave_port_obj_add(struct net_device *dev, case SWITCHDEV_OBJ_ID_PORT_FDB: if (switchdev_trans_ph_prepare(trans)) return 0; - err = dsa_port_fdb_add(dp, SWITCHDEV_OBJ_PORT_FDB(obj)); + err = dsa_port_fdb_add(dp, SWITCHDEV_OBJ_PORT_FDB(obj)->addr, + SWITCHDEV_OBJ_PORT_FDB(obj)->vid); break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans); @@ -279,7 +280,8 @@ static int dsa_slave_port_obj_del(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_FDB: - err = dsa_port_fdb_del(dp, SWITCHDEV_OBJ_PORT_FDB(obj)); + err = dsa_port_fdb_del(dp, SWITCHDEV_OBJ_PORT_FDB(obj)->addr, + SWITCHDEV_OBJ_PORT_FDB(obj)->vid); break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); diff --git a/net/dsa/switch.c b/net/dsa/switch.c index eb20e0fee0e1..e6c06aa349a6 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -83,8 +83,6 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, static int dsa_switch_fdb_add(struct dsa_switch *ds, struct dsa_notifier_fdb_info *info) { - const struct switchdev_obj_port_fdb *fdb = info->fdb; - /* Do not care yet about other switch chips of the fabric */ if (ds->index != info->sw_index) return 0; @@ -92,14 +90,13 @@ static int dsa_switch_fdb_add(struct dsa_switch *ds, if (!ds->ops->port_fdb_add) return -EOPNOTSUPP; - return ds->ops->port_fdb_add(ds, info->port, fdb->addr, fdb->vid); + return ds->ops->port_fdb_add(ds, info->port, info->addr, + info->vid); } static int dsa_switch_fdb_del(struct dsa_switch *ds, struct dsa_notifier_fdb_info *info) { - const struct switchdev_obj_port_fdb *fdb = info->fdb; - /* Do not care yet about other switch chips of the fabric */ if (ds->index != info->sw_index) return 0; @@ -107,8 +104,8 @@ static int dsa_switch_fdb_del(struct dsa_switch *ds, if (!ds->ops->port_fdb_del) return -EOPNOTSUPP; - return ds->ops->port_fdb_del(ds, info->port, fdb->addr, - fdb->vid); + return ds->ops->port_fdb_del(ds, info->port, info->addr, + info->vid); } static int dsa_switch_mdb_add(struct dsa_switch *ds, -- cgit v1.2.3-55-g7522 From c9eb3e0f870105242a15a5e628ed202cf32afe0d Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:42 +0300 Subject: net: dsa: Add support for learning FDB through notification Add support for learning FDB through notification. The driver defers the hardware update via ordered work queue. In case of a successful FDB add a notification is sent back to bridge. In case of hw FDB del failure the static FDB will be deleted from the bridge, thus, the interface is moved to down state in order to indicate inconsistent situation. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa.c | 13 ++++++ net/dsa/dsa_priv.h | 1 + net/dsa/slave.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 139 insertions(+), 2 deletions(-) diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 4118848b8e58..99e38af85fc5 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -282,10 +282,22 @@ static struct packet_type dsa_pack_type __read_mostly = { .func = dsa_switch_rcv, }; +static struct workqueue_struct *dsa_owq; + +bool dsa_schedule_work(struct work_struct *work) +{ + return queue_work(dsa_owq, work); +} + static int __init dsa_init_module(void) { int rc; + dsa_owq = alloc_ordered_workqueue("dsa_ordered", + WQ_MEM_RECLAIM); + if (!dsa_owq) + return -ENOMEM; + rc = dsa_slave_register_notifier(); if (rc) return rc; @@ -305,6 +317,7 @@ static void __exit dsa_cleanup_module(void) dsa_slave_unregister_notifier(); dev_remove_pack(&dsa_pack_type); dsa_legacy_unregister(); + destroy_workqueue(dsa_owq); } module_exit(dsa_cleanup_module); diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index c0ee6a7694f8..fe90e6475297 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -106,6 +106,7 @@ void dsa_cpu_dsa_destroy(struct dsa_port *dport); const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol); int dsa_cpu_port_ethtool_setup(struct dsa_port *cpu_dp); void dsa_cpu_port_ethtool_restore(struct dsa_port *cpu_dp); +bool dsa_schedule_work(struct work_struct *work); /* legacy.c */ int dsa_legacy_register(void); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6a1d4d6d212b..064f833a0422 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1332,19 +1332,142 @@ static int dsa_slave_netdevice_event(struct notifier_block *nb, return NOTIFY_DONE; } +struct dsa_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct net_device *dev; + unsigned long event; +}; + +static void dsa_slave_switchdev_event_work(struct work_struct *work) +{ + struct dsa_switchdev_event_work *switchdev_work = + container_of(work, struct dsa_switchdev_event_work, work); + struct net_device *dev = switchdev_work->dev; + struct switchdev_notifier_fdb_info *fdb_info; + struct dsa_slave_priv *p = netdev_priv(dev); + int err; + + rtnl_lock(); + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fdb_info = &switchdev_work->fdb_info; + err = dsa_port_fdb_add(p->dp, fdb_info->addr, fdb_info->vid); + if (err) { + netdev_dbg(dev, "fdb add failed err=%d\n", err); + break; + } + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, dev, + &fdb_info->info); + break; + + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb_info = &switchdev_work->fdb_info; + err = dsa_port_fdb_del(p->dp, fdb_info->addr, fdb_info->vid); + if (err) { + netdev_dbg(dev, "fdb del failed err=%d\n", err); + dev_close(dev); + } + break; + } + rtnl_unlock(); + + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(dev); +} + +static int +dsa_slave_switchdev_fdb_work_init(struct dsa_switchdev_event_work * + switchdev_work, + const struct switchdev_notifier_fdb_info * + fdb_info) +{ + memcpy(&switchdev_work->fdb_info, fdb_info, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + return -ENOMEM; + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + return 0; +} + +/* Called under rcu_read_lock() */ +static int dsa_slave_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct dsa_switchdev_event_work *switchdev_work; + + if (!dsa_slave_dev_check(dev)) + return NOTIFY_DONE; + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, + dsa_slave_switchdev_event_work); + switchdev_work->dev = dev; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */ + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (dsa_slave_switchdev_fdb_work_init(switchdev_work, + ptr)) + goto err_fdb_work_init; + dev_hold(dev); + break; + default: + kfree(switchdev_work); + return NOTIFY_DONE; + } + + dsa_schedule_work(&switchdev_work->work); + return NOTIFY_OK; + +err_fdb_work_init: + kfree(switchdev_work); + return NOTIFY_BAD; +} + static struct notifier_block dsa_slave_nb __read_mostly = { - .notifier_call = dsa_slave_netdevice_event, + .notifier_call = dsa_slave_netdevice_event, +}; + +static struct notifier_block dsa_slave_switchdev_notifier = { + .notifier_call = dsa_slave_switchdev_event, }; int dsa_slave_register_notifier(void) { - return register_netdevice_notifier(&dsa_slave_nb); + int err; + + err = register_netdevice_notifier(&dsa_slave_nb); + if (err) + return err; + + err = register_switchdev_notifier(&dsa_slave_switchdev_notifier); + if (err) + goto err_switchdev_nb; + + return 0; + +err_switchdev_nb: + unregister_netdevice_notifier(&dsa_slave_nb); + return err; } void dsa_slave_unregister_notifier(void) { int err; + err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); + if (err) + pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); + err = unregister_netdevice_notifier(&dsa_slave_nb); if (err) pr_err("DSA: failed to unregister slave notifier (%d)\n", err); -- cgit v1.2.3-55-g7522 From 37b8da1a3c68501c2fba94951f8f59280c7a9752 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:43 +0300 Subject: net: dsa: Move FDB add/del implementation inside DSA Currently DSA uses switchdev's implementation of FDB add/del ndos. This patch moves the implementation inside DSA in order to support the legacy way for static FDB configuration. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/dsa_priv.h | 7 +++++++ net/dsa/legacy.c | 22 ++++++++++++++++++++++ net/dsa/slave.c | 14 ++------------ 3 files changed, 31 insertions(+), 12 deletions(-) diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index fe90e6475297..5d4e668b9471 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -111,6 +111,13 @@ bool dsa_schedule_work(struct work_struct *work); /* legacy.c */ int dsa_legacy_register(void); void dsa_legacy_unregister(void); +int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags); +int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid); /* port.c */ int dsa_port_set_state(struct dsa_port *dp, u8 state, diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index 612acf16d573..91e6f7981d39 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c @@ -739,6 +739,28 @@ static int dsa_resume(struct device *d) } #endif +/* legacy way, bypassing the bridge *****************************************/ +int dsa_legacy_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_port *dp = p->dp; + + return dsa_port_fdb_add(dp, addr, vid); +} + +int dsa_legacy_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_port *dp = p->dp; + + return dsa_port_fdb_del(dp, addr, vid); +} + static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume); static const struct of_device_id dsa_of_match_table[] = { diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 064f833a0422..84a8694c69d0 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -250,12 +250,6 @@ static int dsa_slave_port_obj_add(struct net_device *dev, */ switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_FDB: - if (switchdev_trans_ph_prepare(trans)) - return 0; - err = dsa_port_fdb_add(dp, SWITCHDEV_OBJ_PORT_FDB(obj)->addr, - SWITCHDEV_OBJ_PORT_FDB(obj)->vid); - break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj), trans); break; @@ -279,10 +273,6 @@ static int dsa_slave_port_obj_del(struct net_device *dev, int err; switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = dsa_port_fdb_del(dp, SWITCHDEV_OBJ_PORT_FDB(obj)->addr, - SWITCHDEV_OBJ_PORT_FDB(obj)->vid); - break; case SWITCHDEV_OBJ_ID_PORT_MDB: err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); break; @@ -985,8 +975,8 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_change_rx_flags = dsa_slave_change_rx_flags, .ndo_set_rx_mode = dsa_slave_set_rx_mode, .ndo_set_mac_address = dsa_slave_set_mac_address, - .ndo_fdb_add = switchdev_port_fdb_add, - .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_add = dsa_legacy_fdb_add, + .ndo_fdb_del = dsa_legacy_fdb_del, .ndo_fdb_dump = switchdev_port_fdb_dump, .ndo_do_ioctl = dsa_slave_ioctl, .ndo_get_iflink = dsa_slave_get_iflink, -- cgit v1.2.3-55-g7522 From c9e2105e295ebd123f9cded311f3e5678218f3f4 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:44 +0300 Subject: net: dsa: Add support for querying supported bridge flags The DSA drivers do not support bridge flags offload. Yet, this attribute should be added in order for the bridge to fail when one tries set a flag on the port, as explained in commit dc0ecabd6231 ("net: switchdev: Add support for querying supported bridge flags by hardware"). Signed-off-by: Arkadi Sharshevsky Reviewed-by: Vivien Didelot Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/dsa/slave.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 84a8694c69d0..e9c1d8c5de0f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -324,6 +324,9 @@ static int dsa_slave_port_attr_get(struct net_device *dev, attr->u.ppid.id_len = sizeof(ds->index); memcpy(&attr->u.ppid.id, &ds->index, attr->u.ppid.id_len); break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS_SUPPORT: + attr->u.brport_flags_support = 0; + break; default: return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From a0b6b8c9fa3c73a523735b8b5f87f59211c70a4e Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:45 +0300 Subject: net: dsa: Remove support for vlan dump from DSA's drivers This is done as a preparation before removing support for vlan dump from DSA core. The vlans are synced with the bridge and thus there is no need for special dump operation support. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 44 -------------------------- drivers/net/dsa/b53/b53_priv.h | 3 -- drivers/net/dsa/bcm_sf2.c | 1 - drivers/net/dsa/dsa_loop.c | 38 ----------------------- drivers/net/dsa/microchip/ksz_common.c | 41 ------------------------- drivers/net/dsa/mv88e6xxx/chip.c | 56 ---------------------------------- 6 files changed, 183 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 3cf4f0a04453..0176d8087344 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1053,49 +1053,6 @@ int b53_vlan_del(struct dsa_switch *ds, int port, } EXPORT_SYMBOL(b53_vlan_del); -int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct b53_device *dev = ds->priv; - u16 vid, vid_start = 0, pvid; - struct b53_vlan *vl; - int err = 0; - - if (is5325(dev) || is5365(dev)) - vid_start = 1; - - b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); - - /* Use our software cache for dumps, since we do not have any HW - * operation returning only the used/valid VLANs - */ - for (vid = vid_start; vid < dev->num_vlans; vid++) { - vl = &dev->vlans[vid]; - - if (!vl->valid) - continue; - - if (!(vl->members & BIT(port))) - continue; - - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; - - if (vl->untag & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} -EXPORT_SYMBOL(b53_vlan_dump); - /* Address Resolution Logic routines */ static int b53_arl_op_wait(struct b53_device *dev) { @@ -1552,7 +1509,6 @@ static const struct dsa_switch_ops b53_switch_ops = { .port_vlan_prepare = b53_vlan_prepare, .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, - .port_vlan_dump = b53_vlan_dump, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index f29c892efa6b..af5d6c166bff 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -393,9 +393,6 @@ void b53_vlan_add(struct dsa_switch *ds, int port, struct switchdev_trans *trans); int b53_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); -int b53_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb); int b53_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int b53_fdb_del(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 1907b27297c3..bbcb4053e04e 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1021,7 +1021,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = { .port_vlan_prepare = b53_vlan_prepare, .port_vlan_add = b53_vlan_add, .port_vlan_del = b53_vlan_del, - .port_vlan_dump = b53_vlan_dump, .port_fdb_dump = b53_fdb_dump, .port_fdb_add = b53_fdb_add, .port_fdb_del = b53_fdb_del, diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index fdd8f3872102..76d66604a34e 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -257,43 +257,6 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int dsa_loop_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct dsa_loop_priv *ps = ds->priv; - struct mii_bus *bus = ps->bus; - struct dsa_loop_vlan *vl; - u16 vid, vid_start = 0; - int err = 0; - - dev_dbg(ds->dev, "%s\n", __func__); - - /* Just do a sleeping operation to make lockdep checks effective */ - mdiobus_read(bus, ps->port_base + port, MII_BMSR); - - for (vid = vid_start; vid < DSA_LOOP_VLANS; vid++) { - vl = &ps->vlans[vid]; - - if (!(vl->members & BIT(port))) - continue; - - vlan->vid_begin = vlan->vid_end = vid; - vlan->flags = 0; - - if (vl->untagged & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - if (ps->pvid == vid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} - static struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, @@ -310,7 +273,6 @@ static struct dsa_switch_ops dsa_loop_driver = { .port_vlan_prepare = dsa_loop_port_vlan_prepare, .port_vlan_add = dsa_loop_port_vlan_add, .port_vlan_del = dsa_loop_port_vlan_del, - .port_vlan_dump = dsa_loop_port_vlan_dump, }; static int dsa_loop_drv_probe(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index b55f3649ff93..a53ce59e0442 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -638,46 +638,6 @@ static int ksz_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static int ksz_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct ksz_device *dev = ds->priv; - u16 vid; - u16 data; - struct vlan_table *vlan_cache; - int err = 0; - - mutex_lock(&dev->vlan_mutex); - - /* use dev->vlan_cache due to lack of searching valid vlan entry */ - for (vid = vlan->vid_begin; vid < dev->num_vlans; vid++) { - vlan_cache = &dev->vlan_cache[vid]; - - if (!(vlan_cache->table[0] & VLAN_VALID)) - continue; - - vlan->vid_begin = vid; - vlan->vid_end = vid; - vlan->flags = 0; - if (vlan_cache->table[2] & BIT(port)) { - if (vlan_cache->table[1] & BIT(port)) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &data); - if (vid == (data & 0xFFFFF)) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } - } - - mutex_unlock(&dev->vlan_mutex); - - return err; -} - struct alu_struct { /* entry 1 */ u8 is_static:1; @@ -1124,7 +1084,6 @@ static const struct dsa_switch_ops ksz_switch_ops = { .port_vlan_prepare = ksz_port_vlan_prepare, .port_vlan_add = ksz_port_vlan_add, .port_vlan_del = ksz_port_vlan_del, - .port_vlan_dump = ksz_port_vlan_dump, .port_fdb_dump = ksz_port_fdb_dump, .port_fdb_add = ksz_port_fdb_add, .port_fdb_del = ksz_port_fdb_del, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 68cc1f6fb75e..5bb1138818ef 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1011,61 +1011,6 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip, return chip->info->ops->vtu_loadpurge(chip, entry); } -static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_vtu_entry next = { - .vid = chip->info->max_vid, - }; - u16 pvid; - int err; - - if (!chip->info->max_vid) - return -EOPNOTSUPP; - - mutex_lock(&chip->reg_lock); - - err = mv88e6xxx_port_get_pvid(chip, port, &pvid); - if (err) - goto unlock; - - do { - err = mv88e6xxx_vtu_getnext(chip, &next); - if (err) - break; - - if (!next.valid) - break; - - if (next.member[port] == - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_NON_MEMBER) - continue; - - /* reinit and dump this VLAN obj */ - vlan->vid_begin = next.vid; - vlan->vid_end = next.vid; - vlan->flags = 0; - - if (next.member[port] == - MV88E6XXX_G1_VTU_DATA_MEMBER_TAG_UNTAGGED) - vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; - - if (next.vid == pvid) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - - err = cb(&vlan->obj); - if (err) - break; - } while (next.vid < chip->info->max_vid); - -unlock: - mutex_unlock(&chip->reg_lock); - - return err; -} - static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid) { DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); @@ -3896,7 +3841,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, - .port_vlan_dump = mv88e6xxx_port_vlan_dump, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, .port_fdb_dump = mv88e6xxx_port_fdb_dump, -- cgit v1.2.3-55-g7522 From c069fcd82c571953b8aaf68769afe9ccb1aa7a9f Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:46 +0300 Subject: net: dsa: Remove support for bypass bridge port attributes/vlan set The bridge port attributes/vlan for DSA devices should be set only from bridge code. Furthermore, The vlans are synced totally with the bridge so there is no need for special dump support. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 4 ---- net/dsa/dsa_priv.h | 4 ---- net/dsa/port.c | 12 ------------ net/dsa/slave.c | 6 ------ 4 files changed, 26 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index 446fc438cb80..d7b9bdd0e768 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -378,10 +378,6 @@ struct dsa_switch_ops { struct switchdev_trans *trans); int (*port_vlan_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); - int (*port_vlan_dump)(struct dsa_switch *ds, int port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb); - /* * Forwarding database */ diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 5d4e668b9471..7f297bcefa0c 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -147,10 +147,6 @@ int dsa_port_vlan_add(struct dsa_port *dp, struct switchdev_trans *trans); int dsa_port_vlan_del(struct dsa_port *dp, const struct switchdev_obj_port_vlan *vlan); -int dsa_port_vlan_dump(struct dsa_port *dp, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb); - /* slave.c */ extern const struct dsa_device_ops notag_netdev_ops; void dsa_slave_mii_bus_init(struct dsa_switch *ds); diff --git a/net/dsa/port.c b/net/dsa/port.c index 86e0585215bf..ce1921663cdd 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -246,15 +246,3 @@ int dsa_port_vlan_del(struct dsa_port *dp, return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info); } - -int dsa_port_vlan_dump(struct dsa_port *dp, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - struct dsa_switch *ds = dp->ds; - - if (ds->ops->port_vlan_dump) - return ds->ops->port_vlan_dump(ds, dp->index, vlan, cb); - - return -EOPNOTSUPP; -} diff --git a/net/dsa/slave.c b/net/dsa/slave.c index e9c1d8c5de0f..ccf670679343 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -302,9 +302,6 @@ static int dsa_slave_port_obj_dump(struct net_device *dev, case SWITCHDEV_OBJ_ID_PORT_MDB: err = dsa_port_mdb_dump(dp, SWITCHDEV_OBJ_PORT_MDB(obj), cb); break; - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = dsa_port_vlan_dump(dp, SWITCHDEV_OBJ_PORT_VLAN(obj), cb); - break; default: err = -EOPNOTSUPP; break; @@ -988,9 +985,6 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, .ndo_poll_controller = dsa_slave_poll_controller, #endif - .ndo_bridge_getlink = switchdev_port_bridge_getlink, - .ndo_bridge_setlink = switchdev_port_bridge_setlink, - .ndo_bridge_dellink = switchdev_port_bridge_dellink, .ndo_get_phys_port_name = dsa_slave_get_phys_port_name, .ndo_setup_tc = dsa_slave_setup_tc, .ndo_get_stats64 = dsa_slave_get_stats64, -- cgit v1.2.3-55-g7522 From 180b072eb0c8529d0fd460fd192fc6b4db6c3df9 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:47 +0300 Subject: net: dsa: Remove support for MDB dump from DSA's drivers This is done as a preparation before removing support for MDB dump from DSA core. The MDBs are synced with the bridge and thus there is no need for special dump operation support. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz_common.c | 9 --------- drivers/net/dsa/mv88e6xxx/chip.c | 24 ------------------------ 2 files changed, 33 deletions(-) diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index a53ce59e0442..4de9d90a4bb3 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1020,14 +1020,6 @@ exit: return ret; } -static int ksz_port_mdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb) -{ - /* this is not called by switch layer */ - return 0; -} - static int ksz_port_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress) @@ -1090,7 +1082,6 @@ static const struct dsa_switch_ops ksz_switch_ops = { .port_mdb_prepare = ksz_port_mdb_prepare, .port_mdb_add = ksz_port_mdb_add, .port_mdb_del = ksz_port_mdb_del, - .port_mdb_dump = ksz_port_mdb_dump, .port_mirror_add = ksz_port_mirror_add, .port_mirror_del = ksz_port_mirror_del, }; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 5bb1138818ef..1f5c202b974d 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1414,15 +1414,6 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, fdb->ndm_state = NUD_NOARP; else fdb->ndm_state = NUD_REACHABLE; - } else if (obj->id == SWITCHDEV_OBJ_ID_PORT_MDB) { - struct switchdev_obj_port_mdb *mdb; - - if (!is_multicast_ether_addr(addr.mac)) - continue; - - mdb = SWITCHDEV_OBJ_PORT_MDB(obj); - mdb->vid = vid; - ether_addr_copy(mdb->addr, addr.mac); } else { return -EOPNOTSUPP; } @@ -3800,20 +3791,6 @@ static int mv88e6xxx_port_mdb_del(struct dsa_switch *ds, int port, return err; } -static int mv88e6xxx_port_mdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb) -{ - struct mv88e6xxx_chip *chip = ds->priv; - int err; - - mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_dump(chip, port, &mdb->obj, cb); - mutex_unlock(&chip->reg_lock); - - return err; -} - static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .probe = mv88e6xxx_drv_probe, .get_tag_protocol = mv88e6xxx_get_tag_protocol, @@ -3847,7 +3824,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_mdb_prepare = mv88e6xxx_port_mdb_prepare, .port_mdb_add = mv88e6xxx_port_mdb_add, .port_mdb_del = mv88e6xxx_port_mdb_del, - .port_mdb_dump = mv88e6xxx_port_mdb_dump, .crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join, .crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave, }; -- cgit v1.2.3-55-g7522 From dc0cbff3ff9fe331160c2be2b3f47564e247137d Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:48 +0300 Subject: net: dsa: Remove redundant MDB dump support Currently the MDB HW database is synced with the bridge's one, thus, There is no need to support special dump functionality. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/dsa.h | 4 ---- net/dsa/dsa_priv.h | 2 -- net/dsa/port.c | 11 ----------- net/dsa/slave.c | 3 --- 4 files changed, 20 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index d7b9bdd0e768..4ef185997b6f 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -400,10 +400,6 @@ struct dsa_switch_ops { struct switchdev_trans *trans); int (*port_mdb_del)(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb); - int (*port_mdb_dump)(struct dsa_switch *ds, int port, - struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb); - /* * RXNFC */ diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 7f297bcefa0c..9c890de6e4dd 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -140,8 +140,6 @@ int dsa_port_mdb_add(struct dsa_port *dp, struct switchdev_trans *trans); int dsa_port_mdb_del(struct dsa_port *dp, const struct switchdev_obj_port_mdb *mdb); -int dsa_port_mdb_dump(struct dsa_port *dp, struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb); int dsa_port_vlan_add(struct dsa_port *dp, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans); diff --git a/net/dsa/port.c b/net/dsa/port.c index ce1921663cdd..73787828953a 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -210,17 +210,6 @@ int dsa_port_mdb_del(struct dsa_port *dp, return dsa_port_notify(dp, DSA_NOTIFIER_MDB_DEL, &info); } -int dsa_port_mdb_dump(struct dsa_port *dp, struct switchdev_obj_port_mdb *mdb, - switchdev_obj_dump_cb_t *cb) -{ - struct dsa_switch *ds = dp->ds; - - if (ds->ops->port_mdb_dump) - return ds->ops->port_mdb_dump(ds, dp->index, mdb, cb); - - return -EOPNOTSUPP; -} - int dsa_port_vlan_add(struct dsa_port *dp, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ccf670679343..5807c905fd1d 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -299,9 +299,6 @@ static int dsa_slave_port_obj_dump(struct net_device *dev, case SWITCHDEV_OBJ_ID_PORT_FDB: err = dsa_port_fdb_dump(dp, SWITCHDEV_OBJ_PORT_FDB(obj), cb); break; - case SWITCHDEV_OBJ_ID_PORT_MDB: - err = dsa_port_mdb_dump(dp, SWITCHDEV_OBJ_PORT_MDB(obj), cb); - break; default: err = -EOPNOTSUPP; break; -- cgit v1.2.3-55-g7522 From 2bedde1abbef5eec211308f0293dd7681b0513ec Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:49 +0300 Subject: net: dsa: Move FDB dump implementation inside DSA >From all switchdev devices only DSA requires special FDB dump. This is due to lack of ability for syncing the hardware learned FDBs with the bridge. Due to this it is removed from switchdev and moved inside DSA. Signed-off-by: Arkadi Sharshevsky Signed-off-by: David S. Miller --- drivers/net/dsa/b53/b53_common.c | 16 ++---- drivers/net/dsa/b53/b53_priv.h | 3 +- drivers/net/dsa/microchip/ksz_common.c | 20 ++----- drivers/net/dsa/mt7530.c | 10 +--- drivers/net/dsa/mv88e6xxx/chip.c | 38 ++++--------- drivers/net/dsa/qca8k.c | 15 ++--- include/net/dsa.h | 5 +- include/net/switchdev.h | 12 ---- net/dsa/dsa_priv.h | 2 - net/dsa/port.c | 11 ---- net/dsa/slave.c | 100 +++++++++++++++++++++++++-------- net/switchdev/switchdev.c | 84 --------------------------- 12 files changed, 112 insertions(+), 204 deletions(-) diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0176d8087344..274f3679f33d 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1227,8 +1227,7 @@ static void b53_arl_search_rd(struct b53_device *dev, u8 idx, } static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { if (!ent->is_valid) return 0; @@ -1236,16 +1235,11 @@ static int b53_fdb_copy(int port, const struct b53_arl_entry *ent, if (port != ent->port) return 0; - ether_addr_copy(fdb->addr, ent->mac); - fdb->vid = ent->vid; - fdb->ndm_state = ent->is_static ? NUD_NOARP : NUD_REACHABLE; - - return cb(&fdb->obj); + return cb(ent->mac, ent->vid, ent->is_static, data); } int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct b53_device *priv = ds->priv; struct b53_arl_entry results[2]; @@ -1263,13 +1257,13 @@ int b53_fdb_dump(struct dsa_switch *ds, int port, return ret; b53_arl_search_rd(priv, 0, &results[0]); - ret = b53_fdb_copy(port, &results[0], fdb, cb); + ret = b53_fdb_copy(port, &results[0], cb, data); if (ret) return ret; if (priv->num_arl_entries > 2) { b53_arl_search_rd(priv, 1, &results[1]); - ret = b53_fdb_copy(port, &results[1], fdb, cb); + ret = b53_fdb_copy(port, &results[1], cb, data); if (ret) return ret; diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index af5d6c166bff..01bd8cbe9a3f 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h @@ -398,8 +398,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port, int b53_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int b53_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb); + dsa_fdb_dump_cb_t *cb, void *data); int b53_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress); void b53_mirror_del(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 4de9d90a4bb3..56cd6d365352 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -805,12 +805,11 @@ static void convert_alu(struct alu_struct *alu, u32 *alu_table) } static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct ksz_device *dev = ds->priv; int ret = 0; - u32 data; + u32 ksz_data; u32 alu_table[4]; struct alu_struct alu; int timeout; @@ -823,8 +822,8 @@ static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, do { timeout = 1000; do { - ksz_read32(dev, REG_SW_ALU_CTRL__4, &data); - if ((data & ALU_VALID) || !(data & ALU_START)) + ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); + if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) break; usleep_range(1, 10); } while (timeout-- > 0); @@ -841,18 +840,11 @@ static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, convert_alu(&alu, alu_table); if (alu.port_forward & BIT(port)) { - fdb->vid = alu.fid; - if (alu.is_static) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - ether_addr_copy(fdb->addr, alu.mac); - - ret = cb(&fdb->obj); + ret = cb(alu.mac, alu.fid, alu.is_static, data); if (ret) goto exit; } - } while (data & ALU_START); + } while (ksz_data & ALU_START); exit: diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index f92aae8947e6..12700710f26d 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -834,8 +834,7 @@ mt7530_port_fdb_del(struct dsa_switch *ds, int port, static int mt7530_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mt7530_priv *priv = ds->priv; struct mt7530_fdb _fdb = { 0 }; @@ -853,11 +852,8 @@ mt7530_port_fdb_dump(struct dsa_switch *ds, int port, if (rsp & ATC_SRCH_HIT) { mt7530_fdb_read(priv, &_fdb); if (_fdb.port_mask & BIT(port)) { - ether_addr_copy(fdb->addr, _fdb.mac); - fdb->vid = _fdb.vid; - fdb->ndm_state = _fdb.noarp ? - NUD_NOARP : NUD_REACHABLE; - ret = cb(&fdb->obj); + ret = cb(_fdb.mac, _fdb.vid, _fdb.noarp, + data); if (ret < 0) break; } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 1f5c202b974d..918d8f0fe091 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1381,10 +1381,10 @@ static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port, static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, u16 fid, u16 vid, int port, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_atu_entry addr; + bool is_static; int err; addr.state = MV88E6XXX_G1_ATU_DATA_STATE_UNUSED; @@ -1401,24 +1401,12 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, if (addr.trunk || (addr.portvec & BIT(port)) == 0) continue; - if (obj->id == SWITCHDEV_OBJ_ID_PORT_FDB) { - struct switchdev_obj_port_fdb *fdb; - - if (!is_unicast_ether_addr(addr.mac)) - continue; - - fdb = SWITCHDEV_OBJ_PORT_FDB(obj); - fdb->vid = vid; - ether_addr_copy(fdb->addr, addr.mac); - if (addr.state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - } else { - return -EOPNOTSUPP; - } + if (!is_unicast_ether_addr(addr.mac)) + continue; - err = cb(obj); + is_static = (addr.state == + MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); + err = cb(addr.mac, vid, is_static, data); if (err) return err; } while (!is_broadcast_ether_addr(addr.mac)); @@ -1427,8 +1415,7 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_vtu_entry vlan = { .vid = chip->info->max_vid, @@ -1441,7 +1428,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, if (err) return err; - err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, obj, cb); + err = mv88e6xxx_port_db_dump_fid(chip, fid, 0, port, cb, data); if (err) return err; @@ -1455,7 +1442,7 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, break; err = mv88e6xxx_port_db_dump_fid(chip, vlan.fid, vlan.vid, port, - obj, cb); + cb, data); if (err) return err; } while (vlan.vid < chip->info->max_vid); @@ -1464,14 +1451,13 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port, } static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct mv88e6xxx_chip *chip = ds->priv; int err; mutex_lock(&chip->reg_lock); - err = mv88e6xxx_port_db_dump(chip, port, &fdb->obj, cb); + err = mv88e6xxx_port_db_dump(chip, port, cb, data); mutex_unlock(&chip->reg_lock); return err; diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index f8ef823349bc..17977f06cb98 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -801,27 +801,20 @@ qca8k_port_fdb_del(struct dsa_switch *ds, int port, static int qca8k_port_fdb_dump(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + dsa_fdb_dump_cb_t *cb, void *data) { struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; struct qca8k_fdb _fdb = { 0 }; int cnt = QCA8K_NUM_FDB_RECORDS; + bool is_static; int ret = 0; mutex_lock(&priv->reg_mutex); while (cnt-- && !qca8k_fdb_next(priv, &_fdb, port)) { if (!_fdb.aging) break; - - ether_addr_copy(fdb->addr, _fdb.mac); - fdb->vid = _fdb.vid; - if (_fdb.aging == QCA8K_ATU_STATUS_STATIC) - fdb->ndm_state = NUD_NOARP; - else - fdb->ndm_state = NUD_REACHABLE; - - ret = cb(&fdb->obj); + is_static = (_fdb.aging == QCA8K_ATU_STATUS_STATIC); + ret = cb(_fdb.mac, _fdb.vid, is_static, data); if (ret) break; } diff --git a/include/net/dsa.h b/include/net/dsa.h index 4ef185997b6f..a4f66dbb4b7c 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -272,6 +272,8 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds) return ds->rtable[dst->cpu_dp->ds->index]; } +typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid, + bool is_static, void *data); struct dsa_switch_ops { /* * Legacy probing. @@ -386,8 +388,7 @@ struct dsa_switch_ops { int (*port_fdb_del)(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid); int (*port_fdb_dump)(struct dsa_switch *ds, int port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb); + dsa_fdb_dump_cb_t *cb, void *data); /* * Multicast database diff --git a/include/net/switchdev.h b/include/net/switchdev.h index 8ae9e3b6392e..d2637a685ca6 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -208,9 +208,6 @@ int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, u16 vid); -int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, int *idx); void switchdev_port_fwd_mark_set(struct net_device *dev, struct net_device *group_dev, bool joining); @@ -309,15 +306,6 @@ static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], return -EOPNOTSUPP; } -static inline int switchdev_port_fdb_dump(struct sk_buff *skb, - struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, - int *idx) -{ - return *idx; -} - static inline bool switchdev_port_same_parent_id(struct net_device *a, struct net_device *b) { diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 9c890de6e4dd..1debf9c42fc4 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -133,8 +133,6 @@ int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr, u16 vid); int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, u16 vid); -int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb); int dsa_port_mdb_add(struct dsa_port *dp, const struct switchdev_obj_port_mdb *mdb, struct switchdev_trans *trans); diff --git a/net/dsa/port.c b/net/dsa/port.c index 73787828953a..659676ba3f8b 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -173,17 +173,6 @@ int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr, return dsa_port_notify(dp, DSA_NOTIFIER_FDB_DEL, &info); } -int dsa_port_fdb_dump(struct dsa_port *dp, struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) -{ - struct dsa_switch *ds = dp->ds; - - if (ds->ops->port_fdb_dump) - return ds->ops->port_fdb_dump(ds, dp->index, fdb, cb); - - return -EOPNOTSUPP; -} - int dsa_port_mdb_add(struct dsa_port *dp, const struct switchdev_obj_port_mdb *mdb, struct switchdev_trans *trans) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5807c905fd1d..8c79011c5a83 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -199,6 +199,83 @@ out: return 0; } +struct dsa_slave_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +static int +dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid, + bool is_static, void *data) +{ + struct dsa_slave_dump_ctx *dump = data; + u32 portid = NETLINK_CB(dump->cb->skb).portid; + u32 seq = dump->cb->nlh->nlmsg_seq; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + if (dump->idx < dump->cb->args[2]) + goto skip; + + nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, + sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dump->dev->ifindex; + ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; + + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) + goto nla_put_failure; + + nlmsg_end(dump->skb, nlh); + +skip: + dump->idx++; + return 0; + +nla_put_failure: + nlmsg_cancel(dump->skb, nlh); + return -EMSGSIZE; +} + +static int +dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev, struct net_device *filter_dev, + int *idx) +{ + struct dsa_slave_dump_ctx dump = { + .dev = dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_port *dp = p->dp; + struct dsa_switch *ds = dp->ds; + int err; + + if (!ds->ops->port_fdb_dump) + return -EOPNOTSUPP; + + err = ds->ops->port_fdb_dump(ds, dp->index, + dsa_slave_port_fdb_do_dump, + &dump); + *idx = dump.idx; + return err; +} + static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct dsa_slave_priv *p = netdev_priv(dev); @@ -287,26 +364,6 @@ static int dsa_slave_port_obj_del(struct net_device *dev, return err; } -static int dsa_slave_port_obj_dump(struct net_device *dev, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_port *dp = p->dp; - int err; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = dsa_port_fdb_dump(dp, SWITCHDEV_OBJ_PORT_FDB(obj), cb); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - static int dsa_slave_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) { @@ -974,7 +1031,7 @@ static const struct net_device_ops dsa_slave_netdev_ops = { .ndo_set_mac_address = dsa_slave_set_mac_address, .ndo_fdb_add = dsa_legacy_fdb_add, .ndo_fdb_del = dsa_legacy_fdb_del, - .ndo_fdb_dump = switchdev_port_fdb_dump, + .ndo_fdb_dump = dsa_slave_fdb_dump, .ndo_do_ioctl = dsa_slave_ioctl, .ndo_get_iflink = dsa_slave_get_iflink, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -992,7 +1049,6 @@ static const struct switchdev_ops dsa_slave_switchdev_ops = { .switchdev_port_attr_set = dsa_slave_port_attr_set, .switchdev_port_obj_add = dsa_slave_port_obj_add, .switchdev_port_obj_del = dsa_slave_port_obj_del, - .switchdev_port_obj_dump = dsa_slave_port_obj_dump, }; static struct device_type dsa_type = { diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 25dc67ef9d37..3d32981b9aa1 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -1009,90 +1009,6 @@ int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], } EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); -struct switchdev_fdb_dump { - struct switchdev_obj_port_fdb fdb; - struct net_device *dev; - struct sk_buff *skb; - struct netlink_callback *cb; - int idx; -}; - -static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj) -{ - struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj); - struct switchdev_fdb_dump *dump = - container_of(fdb, struct switchdev_fdb_dump, fdb); - u32 portid = NETLINK_CB(dump->cb->skb).portid; - u32 seq = dump->cb->nlh->nlmsg_seq; - struct nlmsghdr *nlh; - struct ndmsg *ndm; - - if (dump->idx < dump->cb->args[2]) - goto skip; - - nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, - sizeof(*ndm), NLM_F_MULTI); - if (!nlh) - return -EMSGSIZE; - - ndm = nlmsg_data(nlh); - ndm->ndm_family = AF_BRIDGE; - ndm->ndm_pad1 = 0; - ndm->ndm_pad2 = 0; - ndm->ndm_flags = NTF_SELF; - ndm->ndm_type = 0; - ndm->ndm_ifindex = dump->dev->ifindex; - ndm->ndm_state = fdb->ndm_state; - - if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr)) - goto nla_put_failure; - - if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid)) - goto nla_put_failure; - - nlmsg_end(dump->skb, nlh); - -skip: - dump->idx++; - return 0; - -nla_put_failure: - nlmsg_cancel(dump->skb, nlh); - return -EMSGSIZE; -} - -/** - * switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries - * - * @skb: netlink skb - * @cb: netlink callback - * @dev: port device - * @filter_dev: filter device - * @idx: - * - * Dump FDB entries from switch device. - */ -int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct net_device *dev, - struct net_device *filter_dev, int *idx) -{ - struct switchdev_fdb_dump dump = { - .fdb.obj.orig_dev = dev, - .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, - .dev = dev, - .skb = skb, - .cb = cb, - .idx = *idx, - }; - int err; - - err = switchdev_port_obj_dump(dev, &dump.fdb.obj, - switchdev_port_fdb_dump_cb); - *idx = dump.idx; - return err; -} -EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump); - bool switchdev_port_same_parent_id(struct net_device *a, struct net_device *b) { -- cgit v1.2.3-55-g7522 From 3a83c2a7a518d4044f7bb73b5b05b59be618e547 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:50 +0300 Subject: net: bridge: Remove FDB deletion through switchdev object At this point no driver supports FDB add/del through switchdev object but rather via notification chain, thus, it is removed. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Vivien Didelot Reviewed-by: Ivan Vecera Acked-by: Jiri Pirko Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- net/bridge/br_fdb.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index a5e4a736a984..a79b648aac88 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -169,29 +169,11 @@ static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr) } } -static void fdb_del_external_learn(struct net_bridge_fdb_entry *f) -{ - struct switchdev_obj_port_fdb fdb = { - .obj = { - .orig_dev = f->dst->dev, - .id = SWITCHDEV_OBJ_ID_PORT_FDB, - .flags = SWITCHDEV_F_DEFER, - }, - .vid = f->vlan_id, - }; - - ether_addr_copy(fdb.addr, f->addr.addr); - switchdev_port_obj_del(f->dst->dev, &fdb.obj); -} - static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f) { if (f->is_static) fdb_del_hw_addr(br, f->addr.addr); - if (f->added_by_external_learn) - fdb_del_external_learn(f); - hlist_del_init_rcu(&f->hlist); fdb_notify(br, f, RTM_DELNEIGH); call_rcu(&f->rcu, fdb_rcu_free); -- cgit v1.2.3-55-g7522 From 29ab586c3d83f81c435e269cace9a1619afb5bbd Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Sun, 6 Aug 2017 16:15:51 +0300 Subject: net: switchdev: Remove bridge bypass support from switchdev Currently the bridge port flags, vlans, FDBs and MDBs can be offloaded through the bridge code, making the switchdev's SELF bridge bypass implementation to be redundant. This implies several changes: - No need for dump infra in switchdev, DSA's special case is handled privately. - Remove obj_dump from switchdev_ops. - FDBs are removed from obj_add/del routines, due to the fact that they are offloaded through the bridge notification chain. - The switchdev_port_bridge_xx() and switchdev_port_fdb_xx() functions can be removed. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Vivien Didelot Acked-by: Jiri Pirko Reviewed-by: Ivan Vecera Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- include/net/switchdev.h | 75 -------- net/switchdev/switchdev.c | 435 ---------------------------------------------- 2 files changed, 510 deletions(-) diff --git a/include/net/switchdev.h b/include/net/switchdev.h index d2637a685ca6..d767b7991887 100644 --- a/include/net/switchdev.h +++ b/include/net/switchdev.h @@ -74,7 +74,6 @@ struct switchdev_attr { enum switchdev_obj_id { SWITCHDEV_OBJ_ID_UNDEFINED, SWITCHDEV_OBJ_ID_PORT_VLAN, - SWITCHDEV_OBJ_ID_PORT_FDB, SWITCHDEV_OBJ_ID_PORT_MDB, }; @@ -97,17 +96,6 @@ struct switchdev_obj_port_vlan { #define SWITCHDEV_OBJ_PORT_VLAN(obj) \ container_of(obj, struct switchdev_obj_port_vlan, obj) -/* SWITCHDEV_OBJ_ID_PORT_FDB */ -struct switchdev_obj_port_fdb { - struct switchdev_obj obj; - unsigned char addr[ETH_ALEN]; - u16 vid; - u16 ndm_state; -}; - -#define SWITCHDEV_OBJ_PORT_FDB(obj) \ - container_of(obj, struct switchdev_obj_port_fdb, obj) - /* SWITCHDEV_OBJ_ID_PORT_MDB */ struct switchdev_obj_port_mdb { struct switchdev_obj obj; @@ -135,8 +123,6 @@ typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj); * @switchdev_port_obj_add: Add an object to port (see switchdev_obj_*). * * @switchdev_port_obj_del: Delete an object from port (see switchdev_obj_*). - * - * @switchdev_port_obj_dump: Dump port objects (see switchdev_obj_*). */ struct switchdev_ops { int (*switchdev_port_attr_get)(struct net_device *dev, @@ -149,9 +135,6 @@ struct switchdev_ops { struct switchdev_trans *trans); int (*switchdev_port_obj_del)(struct net_device *dev, const struct switchdev_obj *obj); - int (*switchdev_port_obj_dump)(struct net_device *dev, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb); }; enum switchdev_notifier_type { @@ -189,25 +172,10 @@ int switchdev_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj); int switchdev_port_obj_del(struct net_device *dev, const struct switchdev_obj *obj); -int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb); int register_switchdev_notifier(struct notifier_block *nb); int unregister_switchdev_notifier(struct notifier_block *nb); int call_switchdev_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info); -int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask, - int nlflags); -int switchdev_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags); -int switchdev_port_bridge_dellink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags); -int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid, u16 nlm_flags); -int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid); void switchdev_port_fwd_mark_set(struct net_device *dev, struct net_device *group_dev, bool joining); @@ -246,13 +214,6 @@ static inline int switchdev_port_obj_del(struct net_device *dev, return -EOPNOTSUPP; } -static inline int switchdev_port_obj_dump(struct net_device *dev, - const struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) -{ - return -EOPNOTSUPP; -} - static inline int register_switchdev_notifier(struct notifier_block *nb) { return 0; @@ -270,42 +231,6 @@ static inline int call_switchdev_notifiers(unsigned long val, return NOTIFY_DONE; } -static inline int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, - u32 seq, struct net_device *dev, - u32 filter_mask, int nlflags) -{ - return -EOPNOTSUPP; -} - -static inline int switchdev_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, - u16 flags) -{ - return -EOPNOTSUPP; -} - -static inline int switchdev_port_bridge_dellink(struct net_device *dev, - struct nlmsghdr *nlh, - u16 flags) -{ - return -EOPNOTSUPP; -} - -static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, - u16 vid, u16 nlm_flags) -{ - return -EOPNOTSUPP; -} - -static inline int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, - const unsigned char *addr, u16 vid) -{ - return -EOPNOTSUPP; -} - static inline bool switchdev_port_same_parent_id(struct net_device *a, struct net_device *b) { diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 3d32981b9aa1..0531b41d1f2d 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -343,8 +343,6 @@ static size_t switchdev_obj_size(const struct switchdev_obj *obj) switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: return sizeof(struct switchdev_obj_port_vlan); - case SWITCHDEV_OBJ_ID_PORT_FDB: - return sizeof(struct switchdev_obj_port_fdb); case SWITCHDEV_OBJ_ID_PORT_MDB: return sizeof(struct switchdev_obj_port_mdb); default: @@ -534,43 +532,6 @@ int switchdev_port_obj_del(struct net_device *dev, } EXPORT_SYMBOL_GPL(switchdev_port_obj_del); -/** - * switchdev_port_obj_dump - Dump port objects - * - * @dev: port device - * @id: object ID - * @obj: object to dump - * @cb: function to call with a filled object - * - * rtnl_lock must be held. - */ -int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) -{ - const struct switchdev_ops *ops = dev->switchdev_ops; - struct net_device *lower_dev; - struct list_head *iter; - int err = -EOPNOTSUPP; - - ASSERT_RTNL(); - - if (ops && ops->switchdev_port_obj_dump) - return ops->switchdev_port_obj_dump(dev, obj, cb); - - /* Switch device port(s) may be stacked under - * bond/team/vlan dev, so recurse down to dump objects on - * first port at bottom of stack. - */ - - netdev_for_each_lower_dev(dev, lower_dev, iter) { - err = switchdev_port_obj_dump(lower_dev, obj, cb); - break; - } - - return err; -} -EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); - static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); /** @@ -613,402 +574,6 @@ int call_switchdev_notifiers(unsigned long val, struct net_device *dev, } EXPORT_SYMBOL_GPL(call_switchdev_notifiers); -struct switchdev_vlan_dump { - struct switchdev_obj_port_vlan vlan; - struct sk_buff *skb; - u32 filter_mask; - u16 flags; - u16 begin; - u16 end; -}; - -static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump) -{ - struct bridge_vlan_info vinfo; - - vinfo.flags = dump->flags; - - if (dump->begin == 0 && dump->end == 0) { - return 0; - } else if (dump->begin == dump->end) { - vinfo.vid = dump->begin; - if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, - sizeof(vinfo), &vinfo)) - return -EMSGSIZE; - } else { - vinfo.vid = dump->begin; - vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN; - if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, - sizeof(vinfo), &vinfo)) - return -EMSGSIZE; - vinfo.vid = dump->end; - vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN; - vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END; - if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO, - sizeof(vinfo), &vinfo)) - return -EMSGSIZE; - } - - return 0; -} - -static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj) -{ - struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - struct switchdev_vlan_dump *dump = - container_of(vlan, struct switchdev_vlan_dump, vlan); - int err = 0; - - if (vlan->vid_begin > vlan->vid_end) - return -EINVAL; - - if (dump->filter_mask & RTEXT_FILTER_BRVLAN) { - dump->flags = vlan->flags; - for (dump->begin = dump->end = vlan->vid_begin; - dump->begin <= vlan->vid_end; - dump->begin++, dump->end++) { - err = switchdev_port_vlan_dump_put(dump); - if (err) - return err; - } - } else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) { - if (dump->begin > vlan->vid_begin && - dump->begin >= vlan->vid_end) { - if ((dump->begin - 1) == vlan->vid_end && - dump->flags == vlan->flags) { - /* prepend */ - dump->begin = vlan->vid_begin; - } else { - err = switchdev_port_vlan_dump_put(dump); - dump->flags = vlan->flags; - dump->begin = vlan->vid_begin; - dump->end = vlan->vid_end; - } - } else if (dump->end <= vlan->vid_begin && - dump->end < vlan->vid_end) { - if ((dump->end + 1) == vlan->vid_begin && - dump->flags == vlan->flags) { - /* append */ - dump->end = vlan->vid_end; - } else { - err = switchdev_port_vlan_dump_put(dump); - dump->flags = vlan->flags; - dump->begin = vlan->vid_begin; - dump->end = vlan->vid_end; - } - } else { - err = -EINVAL; - } - } - - return err; -} - -static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev, - u32 filter_mask) -{ - struct switchdev_vlan_dump dump = { - .vlan.obj.orig_dev = dev, - .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, - .skb = skb, - .filter_mask = filter_mask, - }; - int err = 0; - - if ((filter_mask & RTEXT_FILTER_BRVLAN) || - (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { - err = switchdev_port_obj_dump(dev, &dump.vlan.obj, - switchdev_port_vlan_dump_cb); - if (err) - goto err_out; - if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) - /* last one */ - err = switchdev_port_vlan_dump_put(&dump); - } - -err_out: - return err == -EOPNOTSUPP ? 0 : err; -} - -/** - * switchdev_port_bridge_getlink - Get bridge port attributes - * - * @dev: port device - * - * Called for SELF on rtnl_bridge_getlink to get bridge port - * attributes. - */ -int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, - struct net_device *dev, u32 filter_mask, - int nlflags) -{ - struct switchdev_attr attr = { - .orig_dev = dev, - .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, - }; - u16 mode = BRIDGE_MODE_UNDEF; - u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD; - int err; - - if (!netif_is_bridge_port(dev)) - return -EOPNOTSUPP; - - err = switchdev_port_attr_get(dev, &attr); - if (err && err != -EOPNOTSUPP) - return err; - - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, - attr.u.brport_flags, mask, nlflags, - filter_mask, switchdev_port_vlan_fill); -} -EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink); - -static int switchdev_port_br_setflag(struct net_device *dev, - struct nlattr *nlattr, - unsigned long brport_flag) -{ - struct switchdev_attr attr = { - .orig_dev = dev, - .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS, - }; - u8 flag = nla_get_u8(nlattr); - int err; - - err = switchdev_port_attr_get(dev, &attr); - if (err) - return err; - - if (flag) - attr.u.brport_flags |= brport_flag; - else - attr.u.brport_flags &= ~brport_flag; - - return switchdev_port_attr_set(dev, &attr); -} - -static const struct nla_policy -switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = { - [IFLA_BRPORT_STATE] = { .type = NLA_U8 }, - [IFLA_BRPORT_COST] = { .type = NLA_U32 }, - [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 }, - [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, - [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, - [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, - [IFLA_BRPORT_FAST_LEAVE] = { .type = NLA_U8 }, - [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, - [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, - [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, -}; - -static int switchdev_port_br_setlink_protinfo(struct net_device *dev, - struct nlattr *protinfo) -{ - struct nlattr *attr; - int rem; - int err; - - err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX, - switchdev_port_bridge_policy, NULL); - if (err) - return err; - - nla_for_each_nested(attr, protinfo, rem) { - switch (nla_type(attr)) { - case IFLA_BRPORT_LEARNING: - err = switchdev_port_br_setflag(dev, attr, - BR_LEARNING); - break; - case IFLA_BRPORT_LEARNING_SYNC: - err = switchdev_port_br_setflag(dev, attr, - BR_LEARNING_SYNC); - break; - case IFLA_BRPORT_UNICAST_FLOOD: - err = switchdev_port_br_setflag(dev, attr, BR_FLOOD); - break; - default: - err = -EOPNOTSUPP; - break; - } - if (err) - return err; - } - - return 0; -} - -static int switchdev_port_br_afspec(struct net_device *dev, - struct nlattr *afspec, - int (*f)(struct net_device *dev, - const struct switchdev_obj *obj)) -{ - struct nlattr *attr; - struct bridge_vlan_info *vinfo; - struct switchdev_obj_port_vlan vlan = { - .obj.orig_dev = dev, - .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, - }; - int rem; - int err; - - nla_for_each_nested(attr, afspec, rem) { - if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO) - continue; - if (nla_len(attr) != sizeof(struct bridge_vlan_info)) - return -EINVAL; - vinfo = nla_data(attr); - if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK) - return -EINVAL; - vlan.flags = vinfo->flags; - if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) { - if (vlan.vid_begin) - return -EINVAL; - vlan.vid_begin = vinfo->vid; - /* don't allow range of pvids */ - if (vlan.flags & BRIDGE_VLAN_INFO_PVID) - return -EINVAL; - } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) { - if (!vlan.vid_begin) - return -EINVAL; - vlan.vid_end = vinfo->vid; - if (vlan.vid_end <= vlan.vid_begin) - return -EINVAL; - err = f(dev, &vlan.obj); - if (err) - return err; - vlan.vid_begin = 0; - } else { - if (vlan.vid_begin) - return -EINVAL; - vlan.vid_begin = vinfo->vid; - vlan.vid_end = vinfo->vid; - err = f(dev, &vlan.obj); - if (err) - return err; - vlan.vid_begin = 0; - } - } - - return 0; -} - -/** - * switchdev_port_bridge_setlink - Set bridge port attributes - * - * @dev: port device - * @nlh: netlink header - * @flags: netlink flags - * - * Called for SELF on rtnl_bridge_setlink to set bridge port - * attributes. - */ -int switchdev_port_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags) -{ - struct nlattr *protinfo; - struct nlattr *afspec; - int err = 0; - - if (!netif_is_bridge_port(dev)) - return -EOPNOTSUPP; - - protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), - IFLA_PROTINFO); - if (protinfo) { - err = switchdev_port_br_setlink_protinfo(dev, protinfo); - if (err) - return err; - } - - afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), - IFLA_AF_SPEC); - if (afspec) - err = switchdev_port_br_afspec(dev, afspec, - switchdev_port_obj_add); - - return err; -} -EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink); - -/** - * switchdev_port_bridge_dellink - Set bridge port attributes - * - * @dev: port device - * @nlh: netlink header - * @flags: netlink flags - * - * Called for SELF on rtnl_bridge_dellink to set bridge port - * attributes. - */ -int switchdev_port_bridge_dellink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags) -{ - struct nlattr *afspec; - - if (!netif_is_bridge_port(dev)) - return -EOPNOTSUPP; - - afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), - IFLA_AF_SPEC); - if (afspec) - return switchdev_port_br_afspec(dev, afspec, - switchdev_port_obj_del); - - return 0; -} -EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink); - -/** - * switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port - * - * @ndmsg: netlink hdr - * @nlattr: netlink attributes - * @dev: port device - * @addr: MAC address to add - * @vid: VLAN to add - * - * Add FDB entry to switch device. - */ -int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid, u16 nlm_flags) -{ - struct switchdev_obj_port_fdb fdb = { - .obj.orig_dev = dev, - .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, - .vid = vid, - }; - - ether_addr_copy(fdb.addr, addr); - return switchdev_port_obj_add(dev, &fdb.obj); -} -EXPORT_SYMBOL_GPL(switchdev_port_fdb_add); - -/** - * switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port - * - * @ndmsg: netlink hdr - * @nlattr: netlink attributes - * @dev: port device - * @addr: MAC address to delete - * @vid: VLAN to delete - * - * Delete FDB entry from switch device. - */ -int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], - struct net_device *dev, const unsigned char *addr, - u16 vid) -{ - struct switchdev_obj_port_fdb fdb = { - .obj.orig_dev = dev, - .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB, - .vid = vid, - }; - - ether_addr_copy(fdb.addr, addr); - return switchdev_port_obj_del(dev, &fdb.obj); -} -EXPORT_SYMBOL_GPL(switchdev_port_fdb_del); - bool switchdev_port_same_parent_id(struct net_device *a, struct net_device *b) { -- cgit v1.2.3-55-g7522 From 733a707d6c6a1a96625a45c4bdb19618d48ca74b Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sun, 6 Aug 2017 22:39:06 +0530 Subject: isdn: kcapi: make capi_version const Declare this structure as const as it is only used during a copy operation. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/isdn/capi/kcapi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c index 9ca691d6c13b..46c189ad8d94 100644 --- a/drivers/isdn/capi/kcapi.c +++ b/drivers/isdn/capi/kcapi.c @@ -55,7 +55,7 @@ struct capictr_event { /* ------------------------------------------------------------- */ -static struct capi_version driver_version = {2, 0, 1, 1 << 4}; +static const struct capi_version driver_version = {2, 0, 1, 1 << 4}; static char driver_serial[CAPI_SERIAL_LEN] = "0004711"; static char capi_manufakturer[64] = "AVM Berlin"; -- cgit v1.2.3-55-g7522 From 53b948356554376ec6f89016376825d48bf396c3 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Mon, 7 Aug 2017 10:08:10 -0700 Subject: net: vrf: Add extack messages for newlink failures Add extack error messages for failure paths creating vrf devices. Once extack support is added to iproute2, we go from the unhelpful: $ ip li add foobar type vrf RTNETLINK answers: Invalid argument to: $ ip li add foobar type vrf Error: VRF table id is missing Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 8a1eaf3c302a..abd2010c48ae 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1371,10 +1371,14 @@ static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { - if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { + NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EINVAL; - if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + } + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { + NL_SET_ERR_MSG(extack, "Invalid hardware address"); return -EADDRNOTAVAIL; + } } return 0; } @@ -1399,12 +1403,17 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev, struct net *net; int err; - if (!data || !data[IFLA_VRF_TABLE]) + if (!data || !data[IFLA_VRF_TABLE]) { + NL_SET_ERR_MSG(extack, "VRF table id is missing"); return -EINVAL; + } vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]); - if (vrf->tb_id == RT_TABLE_UNSPEC) + if (vrf->tb_id == RT_TABLE_UNSPEC) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE], + "Invalid VRF table id"); return -EINVAL; + } dev->priv_flags |= IFF_L3MDEV_MASTER; -- cgit v1.2.3-55-g7522 From 984eb90532804176a57a18ffc1b194aee350c4d3 Mon Sep 17 00:00:00 2001 From: Gabriel Craciunescu Date: Thu, 3 Aug 2017 15:01:02 +0300 Subject: ath10k: ath10k_htt_rx_amsdu_allowed() use ath10k_dbg() Each time we get disconnected from AP we get flooded with messages like: ... ath10k_pci 0000:03:00.0: no channel configured; ignoring frame(s)! ath10k_warn: 155 callbacks suppressed ... Use ath10k_dbg() here too. Signed-off-by: Gabriel Craciunescu Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 799fb7501eb5..9323be663179 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -1524,7 +1524,7 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar, */ if (!rx_status->freq) { - ath10k_warn(ar, "no channel configured; ignoring frame(s)!\n"); + ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n"); return false; } -- cgit v1.2.3-55-g7522 From f35a7f91f66af528b3ee1921de16bea31d347ab0 Mon Sep 17 00:00:00 2001 From: Rakesh Pillai Date: Wed, 2 Aug 2017 16:03:37 +0530 Subject: ath10k: fix memory leak in rx ring buffer allocation The rx ring buffers are added to a hash table if firmware support full rx reorder. If the full rx reorder support flag is not set before allocating the rx ring buffers, none of the buffers are added to the hash table. There is a race condition between rx ring refill and rx buffer replenish from napi poll. The interrupts are enabled in hif start, before the rx ring is refilled during init. We replenish buffers from napi poll due to the interrupts which get enabled after hif start. Hence before the entire rx ring is refilled during the init, the napi poll replenishes a few buffers in steps of 100 buffers per attempt. During this rx ring replenish from napi poll, the rx reorder flag has not been set due to which the replenished buffers are not added to the hash table Set the rx full reorder support flag before we allocate the rx ring buffer to avoid the memory leak. Signed-off-by: Rakesh Pillai Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 3602aa462662..9b4c4facf725 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -2057,6 +2057,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, goto err_wmi_detach; } + /* If firmware indicates Full Rx Reorder support it must be used in a + * slightly different manner. Let HTT code know. + */ + ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, + ar->wmi.svc_map)); + status = ath10k_htt_rx_alloc(&ar->htt); if (status) { ath10k_err(ar, "failed to alloc htt rx: %d\n", status); @@ -2177,12 +2183,6 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, } } - /* If firmware indicates Full Rx Reorder support it must be used in a - * slightly different manner. Let HTT code know. - */ - ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER, - ar->wmi.svc_map)); - status = ath10k_htt_rx_ring_refill(ar); if (status) { ath10k_err(ar, "failed to refill htt rx ring: %d\n", status); -- cgit v1.2.3-55-g7522 From ab3f9c8868654adc836cd6d726e202f18776331a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 2 Aug 2017 16:57:25 +0300 Subject: ath10k: switch to use new generic UUID API There are new types and helpers that are supposed to be used in new code. As a preparation to get rid of legacy types and API functions do the conversion here. Signed-off-by: Andy Shevchenko Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 2 +- drivers/net/wireless/ath/ath10k/debug.c | 6 +++--- drivers/net/wireless/ath/ath10k/pci.c | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 34b713c5e022..949ebb3e967b 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -462,7 +462,7 @@ struct ath10k_ce_crash_hdr { struct ath10k_fw_crash_data { bool crashed_since_read; - uuid_le uuid; + guid_t guid; struct timespec timestamp; __le32 registers[REG_DUMP_COUNT_QCA988X]; struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX]; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 56404fe4e8f5..df514507d3f1 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -70,7 +70,7 @@ struct ath10k_dump_file_data { /* some info we can get from ath10k struct that might help */ - u8 uuid[16]; + guid_t guid; __le32 chip_id; @@ -719,7 +719,7 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) lockdep_assert_held(&ar->data_lock); crash_data->crashed_since_read = true; - uuid_le_gen(&crash_data->uuid); + guid_gen(&crash_data->guid); getnstimeofday(&crash_data->timestamp); return crash_data; @@ -766,7 +766,7 @@ static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar, dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION); - memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid)); + guid_copy(&dump_data->guid, &crash_data->guid); dump_data->chip_id = cpu_to_le32(ar->chip_id); dump_data->bus_type = cpu_to_le32(0); dump_data->target_version = cpu_to_le32(ar->target_version); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index a697caec6579..0a5b88e80214 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1463,7 +1463,7 @@ static void ath10k_pci_dump_registers(struct ath10k *ar, static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) { struct ath10k_fw_crash_data *crash_data; - char uuid[50]; + char guid[UUID_STRING_LEN + 1]; spin_lock_bh(&ar->data_lock); @@ -1472,11 +1472,11 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) crash_data = ath10k_debug_get_new_fw_crash_data(ar); if (crash_data) - scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid); + scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid); else - scnprintf(uuid, sizeof(uuid), "n/a"); + scnprintf(guid, sizeof(guid), "n/a"); - ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid); + ath10k_err(ar, "firmware crashed! (guid %s)\n", guid); ath10k_print_driver_info(ar); ath10k_pci_dump_registers(ar, crash_data); ath10k_ce_dump_registers(ar, crash_data); -- cgit v1.2.3-55-g7522 From 39efc7cc7ccf82d1cd946580cdb70760f347305a Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 2 Aug 2017 18:28:00 -0700 Subject: wcn36xx: Introduce mutual exclusion of fw configuration As the association status changes the driver needs to configure the hardware. This is done based on information in the "sta" acquired by ieee80211_find_sta(), which requires the caller to ensure that the "sta" is valid while its being used; generally by entering an rcu read section. But the operations acting on the "sta" has to communicate with the firmware and may therefor sleep, resulting in the following report: [ 31.418190] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:238 [ 31.425919] in_atomic(): 0, irqs_disabled(): 0, pid: 34, name: kworker/u8:1 [ 31.434609] CPU: 0 PID: 34 Comm: kworker/u8:1 Tainted: G W 4.12.0-rc4-next-20170607+ #993 [ 31.441002] Hardware name: Qualcomm Technologies, Inc. APQ 8016 SBC (DT) [ 31.450380] Workqueue: phy0 ieee80211_iface_work [ 31.457226] Call trace: [ 31.461830] [] dump_backtrace+0x0/0x260 [ 31.464004] [] show_stack+0x14/0x20 [ 31.469557] [] dump_stack+0x98/0xb8 [ 31.474592] [] ___might_sleep+0xf0/0x118 [ 31.479626] [] __might_sleep+0x50/0x88 [ 31.485010] [] mutex_lock+0x24/0x60 [ 31.490479] [] wcn36xx_smd_set_link_st+0x30/0x130 [ 31.495428] [] wcn36xx_bss_info_changed+0x148/0x448 [ 31.501504] [] ieee80211_bss_info_change_notify+0xbc/0x118 [ 31.508102] [] ieee80211_assoc_success+0x664/0x7f8 [ 31.515220] [] ieee80211_rx_mgmt_assoc_resp+0x144/0x2d8 [ 31.521555] [] ieee80211_sta_rx_queued_mgmt+0x190/0x698 [ 31.528239] [] ieee80211_iface_work+0x234/0x368 [ 31.535011] [] process_one_work+0x1cc/0x340 [ 31.541086] [] worker_thread+0x48/0x430 [ 31.546814] [] kthread+0x108/0x138 [ 31.552195] [] ret_from_fork+0x10/0x50 In order to ensure that the "sta" remains alive (and consistent) for the duration of bss_info_changed() mutual exclusion has to be ensured with sta_remove(). This is done by introducing a mutex to cover firmware configuration changes, which is made to also ensure mutual exclusion between other operations changing the state or configuration of the firmware. With this we can drop the rcu read lock. Cc: stable@vger.kernel.org Signed-off-by: Bjorn Andersson Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wcn36xx/main.c | 52 ++++++++++++++++++++++++++++-- drivers/net/wireless/ath/wcn36xx/wcn36xx.h | 3 ++ 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 517a315e259b..35bd50bcbbd5 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -372,6 +372,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed); + mutex_lock(&wcn->conf_mutex); + if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { int ch = WCN36XX_HW_CHANNEL(wcn); wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n", @@ -382,6 +384,8 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed) } } + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -396,6 +400,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n"); + mutex_lock(&wcn->conf_mutex); + *total &= FIF_ALLMULTI; fp = (void *)(unsigned long)multicast; @@ -408,6 +414,8 @@ static void wcn36xx_configure_filter(struct ieee80211_hw *hw, else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc) wcn36xx_smd_set_mc_list(wcn, vif, fp); } + + mutex_unlock(&wcn->conf_mutex); kfree(fp); } @@ -471,6 +479,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, key_conf->key, key_conf->keylen); + mutex_lock(&wcn->conf_mutex); + switch (key_conf->cipher) { case WLAN_CIPHER_SUITE_WEP40: vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40; @@ -565,6 +575,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } out: + mutex_unlock(&wcn->conf_mutex); + return ret; } @@ -725,6 +737,8 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n", vif, changed); + mutex_lock(&wcn->conf_mutex); + if (changed & BSS_CHANGED_BEACON_INFO) { wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed dtim period %d\n", @@ -787,7 +801,13 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, bss_conf->aid); vif_priv->sta_assoc = true; - rcu_read_lock(); + + /* + * Holding conf_mutex ensures mutal exclusion with + * wcn36xx_sta_remove() and as such ensures that sta + * won't be freed while we're operating on it. As such + * we do not need to hold the rcu_read_lock(). + */ sta = ieee80211_find_sta(vif, bss_conf->bssid); if (!sta) { wcn36xx_err("sta %pM is not found\n", @@ -811,7 +831,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, * place where AID is available. */ wcn36xx_smd_config_sta(wcn, vif, sta); - rcu_read_unlock(); } else { wcn36xx_dbg(WCN36XX_DBG_MAC, "disassociated bss %pM vif %pM AID=%d\n", @@ -873,6 +892,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw, } } out: + + mutex_unlock(&wcn->conf_mutex); + return; } @@ -882,7 +904,10 @@ static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value) struct wcn36xx *wcn = hw->priv; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value); + mutex_lock(&wcn->conf_mutex); wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value); + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -893,8 +918,12 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw, struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif); + mutex_lock(&wcn->conf_mutex); + list_del(&vif_priv->list); wcn36xx_smd_delete_sta_self(wcn, vif->addr); + + mutex_unlock(&wcn->conf_mutex); } static int wcn36xx_add_interface(struct ieee80211_hw *hw, @@ -915,9 +944,13 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw, return -EOPNOTSUPP; } + mutex_lock(&wcn->conf_mutex); + list_add(&vif_priv->list, &wcn->vif_list); wcn36xx_smd_add_sta_self(wcn, vif); + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -930,6 +963,8 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n", vif, sta->addr); + mutex_lock(&wcn->conf_mutex); + spin_lock_init(&sta_priv->ampdu_lock); sta_priv->vif = vif_priv; /* @@ -941,6 +976,9 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, sta_priv->aid = sta->aid; wcn36xx_smd_config_sta(wcn, vif, sta); } + + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -954,8 +992,13 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n", vif, sta->addr, sta_priv->sta_index); + mutex_lock(&wcn->conf_mutex); + wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index); sta_priv->vif = NULL; + + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -999,6 +1042,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", action, tid); + mutex_lock(&wcn->conf_mutex); + switch (action) { case IEEE80211_AMPDU_RX_START: sta_priv->tid = tid; @@ -1038,6 +1083,8 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, wcn36xx_err("Unknown AMPDU action\n"); } + mutex_unlock(&wcn->conf_mutex); + return 0; } @@ -1216,6 +1263,7 @@ static int wcn36xx_probe(struct platform_device *pdev) wcn = hw->priv; wcn->hw = hw; wcn->dev = &pdev->dev; + mutex_init(&wcn->conf_mutex); mutex_init(&wcn->hal_mutex); mutex_init(&wcn->scan_lock); diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h index b52b4da9a967..6aefba4c0cda 100644 --- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h +++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h @@ -202,6 +202,9 @@ struct wcn36xx { struct qcom_smem_state *tx_rings_empty_state; unsigned tx_rings_empty_state_bit; + /* prevents concurrent FW reconfiguration */ + struct mutex conf_mutex; + /* * smd_buf must be protected with smd_mutex to garantee * that all messages are sent one after another -- cgit v1.2.3-55-g7522 From 3588e1110e49fdad7fc8898420a9c71b34a3ca04 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Thu, 3 Aug 2017 20:25:31 +0530 Subject: ath9k: make ath_ps_ops structures as const ath_ps_ops structures are only stored as a reference in the ps_ops field of a ath_common structure. This field is of type const, so make the structures as const. Signed-off-by: Bhumika Goyal Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/htc_drv_init.c | 2 +- drivers/net/wireless/ath/ath9k/init.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index defacc6c9c99..da2164b0cccc 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -71,7 +71,7 @@ static void ath9k_htc_op_ps_restore(struct ath_common *common) ath9k_htc_ps_restore((struct ath9k_htc_priv *) common->priv); } -static struct ath_ps_ops ath9k_htc_ps_ops = { +static const struct ath_ps_ops ath9k_htc_ps_ops = { .wakeup = ath9k_htc_op_ps_wakeup, .restore = ath9k_htc_op_ps_restore, }; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index fd9a61834c17..bb7936090b91 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -104,7 +104,7 @@ static void ath9k_op_ps_restore(struct ath_common *common) ath9k_ps_restore((struct ath_softc *) common->priv); } -static struct ath_ps_ops ath9k_ps_ops = { +static const struct ath_ps_ops ath9k_ps_ops = { .wakeup = ath9k_op_ps_wakeup, .restore = ath9k_op_ps_restore, }; -- cgit v1.2.3-55-g7522 From a81d72d2002d6a932bd83022cbf8c442b1b97512 Mon Sep 17 00:00:00 2001 From: Dmitry Tunin Date: Tue, 8 Aug 2017 14:09:02 +0300 Subject: Bluetooth: Add support of 13d3:3494 RTL8723BE device T: Bus=02 Lev=01 Prnt=01 Port=03 Cnt=03 Dev#= 4 Spd=12 MxCh= 0 D: Ver= 2.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=13d3 ProdID=3494 Rev= 2.00 S: Manufacturer=Realtek S: Product=Bluetooth Radio S: SerialNumber=00e04c000001 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=500mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms Signed-off-by: Dmitry Tunin Signed-off-by: Marcel Holtmann Cc: stable@vger.kernel.org --- drivers/bluetooth/btusb.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 24cc8383fdd4..e1124ba44154 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -359,6 +359,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, -- cgit v1.2.3-55-g7522 From 9ca766b3641f99975783867d3e0a54e24549d97e Mon Sep 17 00:00:00 2001 From: Vincent Legoll Date: Mon, 17 Apr 2017 12:07:30 +0200 Subject: bcma: make BCMA a menuconfig to ease disabling it all No need to get into the submenu to disable all BCMA-related config entries. Signed-off-by: Vincent Legoll Signed-off-by: Kalle Valo --- drivers/bcma/Kconfig | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index b5c48a8d485f..54f81c554815 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -3,11 +3,8 @@ config BCMA_POSSIBLE depends on HAS_IOMEM && HAS_DMA default y -menu "Broadcom specific AMBA" - depends on BCMA_POSSIBLE - -config BCMA - tristate "BCMA support" +menuconfig BCMA + tristate "Broadcom specific AMBA" depends on BCMA_POSSIBLE help Bus driver for Broadcom specific Advanced Microcontroller Bus @@ -117,5 +114,3 @@ config BCMA_DEBUG This turns on additional debugging messages. If unsure, say N - -endmenu -- cgit v1.2.3-55-g7522 From 6345016d4bea6a82300c209d99e7f593800e3b4e Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Thu, 3 Aug 2017 19:56:39 +0530 Subject: rsi: fix uninitialized descriptor pointer issue This patch fixes the uninitialized descriptor pointer issue in function rsi_send_internal_mgmt_frame(). Descriptor should point to start of the skb data. Reported-by: Dan Carpenter Fixes: 9a629fafe7d8 ("rsi: immediate wakeup bit and priority for TX command packets") Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 1fba7bba3a10..c488f4b44576 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -274,6 +274,7 @@ static int rsi_send_internal_mgmt_frame(struct rsi_common *common, rsi_dbg(ERR_ZONE, "%s: Unable to allocate skb\n", __func__); return -ENOMEM; } + desc = (struct rsi_cmd_desc *)skb->data; desc->desc_dword0.len_qno |= cpu_to_le16(DESC_IMMEDIATE_WAKEUP); skb->priority = MGMT_SOFT_Q; tx_params = (struct skb_info *)&IEEE80211_SKB_CB(skb)->driver_data; -- cgit v1.2.3-55-g7522 From edba3532c6522302e1cfa9625e096d9e6fbb4d36 Mon Sep 17 00:00:00 2001 From: Pavani Muthyala Date: Thu, 3 Aug 2017 19:58:57 +0530 Subject: rsi: add support for rf-kill functionality This patch implements rfkill_poll handler. Also, necessary changes are done in interface up and down handler to support rfkill functionality. Signed-off-by: Pavani Muthyala Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index c91d6efa7c84..193f9227fdb3 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -279,11 +279,12 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw) struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + rsi_dbg(ERR_ZONE, "===> Interface UP <===\n"); mutex_lock(&common->mutex); common->iface_down = false; - mutex_unlock(&common->mutex); - + wiphy_rfkill_start_polling(hw->wiphy); rsi_send_rx_filter_frame(common, 0); + mutex_unlock(&common->mutex); return 0; } @@ -299,8 +300,10 @@ static void rsi_mac80211_stop(struct ieee80211_hw *hw) struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + rsi_dbg(ERR_ZONE, "===> Interface DOWN <===\n"); mutex_lock(&common->mutex); common->iface_down = true; + wiphy_rfkill_stop_polling(hw->wiphy); /* Block all rx frames */ rsi_send_rx_filter_frame(common, 0xffff); @@ -1214,6 +1217,19 @@ static void rsi_reg_notify(struct wiphy *wiphy, mutex_unlock(&common->mutex); } +static void rsi_mac80211_rfkill_poll(struct ieee80211_hw *hw) +{ + struct rsi_hw *adapter = hw->priv; + struct rsi_common *common = adapter->priv; + + mutex_lock(&common->mutex); + if (common->fsm_state != FSM_MAC_INIT_DONE) + wiphy_rfkill_set_hw_state(hw->wiphy, true); + else + wiphy_rfkill_set_hw_state(hw->wiphy, false); + mutex_unlock(&common->mutex); +} + static struct ieee80211_ops mac80211_ops = { .tx = rsi_mac80211_tx, .start = rsi_mac80211_start, @@ -1232,6 +1248,7 @@ static struct ieee80211_ops mac80211_ops = { .sta_remove = rsi_mac80211_sta_remove, .set_antenna = rsi_mac80211_set_antenna, .get_antenna = rsi_mac80211_get_antenna, + .rfkill_poll = rsi_mac80211_rfkill_poll, }; /** -- cgit v1.2.3-55-g7522 From 588349a1fe3b1983ecdda33a0cc1b87076eea033 Mon Sep 17 00:00:00 2001 From: Pavani Muthyala Date: Thu, 3 Aug 2017 19:58:58 +0530 Subject: rsi: update set_antenna command frame TX command frame set_antenna is modified to use common descriptor structure. Also it's subframe type is set. Signed-off-by: Pavani Muthyala Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 13 +++++++------ drivers/net/wireless/rsi/rsi_mgmt.h | 11 +++++++++++ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index c488f4b44576..e00d4edec2ba 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1407,7 +1407,7 @@ int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word) */ int rsi_set_antenna(struct rsi_common *common, u8 antenna) { - struct rsi_mac_frame *cmd_frame; + struct rsi_ant_sel_frame *ant_sel_frame; struct sk_buff *skb; skb = dev_alloc_skb(FRAME_DESC_SZ); @@ -1418,12 +1418,13 @@ int rsi_set_antenna(struct rsi_common *common, u8 antenna) } memset(skb->data, 0, FRAME_DESC_SZ); - cmd_frame = (struct rsi_mac_frame *)skb->data; - - cmd_frame->desc_word[1] = cpu_to_le16(ANT_SEL_FRAME); - cmd_frame->desc_word[3] = cpu_to_le16(antenna & 0x00ff); - cmd_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); + ant_sel_frame = (struct rsi_ant_sel_frame *)skb->data; + ant_sel_frame->desc_dword0.frame_type = ANT_SEL_FRAME; + ant_sel_frame->sub_frame_type = ANTENNA_SEL_TYPE; + ant_sel_frame->ant_value = cpu_to_le16(antenna & ANTENNA_MASK_VALUE); + rsi_set_len_qno(&ant_sel_frame->desc_dword0.len_qno, + 0, RSI_WIFI_MGMT_Q); skb_put(skb, FRAME_DESC_SZ); return rsi_send_internal_mgmt_frame(common, skb); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index cb0b17ec48d0..1060edcb2a96 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -155,6 +155,8 @@ #define ANTENNA_SEL_INT 0x02 /* RF_OUT_2 / Integerated */ #define ANTENNA_SEL_UFL 0x03 /* RF_OUT_1 / U.FL */ +#define ANTENNA_MASK_VALUE 0x00ff +#define ANTENNA_SEL_TYPE 1 /* Rx filter word definitions */ #define PROMISCOUS_MODE BIT(0) @@ -348,6 +350,15 @@ struct rsi_vap_caps { __le16 beacon_miss_threshold; } __packed; +struct rsi_ant_sel_frame { + struct rsi_cmd_desc_dword0 desc_dword0; + u8 reserved; + u8 sub_frame_type; + __le16 ant_value; + __le32 reserved1; + __le32 reserved2; +} __packed; + /* Key descriptor flags */ #define RSI_KEY_TYPE_BROADCAST BIT(1) #define RSI_WEP_KEY BIT(2) -- cgit v1.2.3-55-g7522 From ce86893fa8d8509d69bef70170ed8c797275c411 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 3 Aug 2017 19:58:59 +0530 Subject: rsi: add support for legacy power save This patch adds support for legacy power save. Necessary configuration frames are downloaded to firmware when power save is enabled/disabled Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/Makefile | 1 + drivers/net/wireless/rsi/rsi_91x_hal.c | 7 ++ drivers/net/wireless/rsi/rsi_91x_mac80211.c | 22 +++++ drivers/net/wireless/rsi/rsi_91x_main.c | 2 + drivers/net/wireless/rsi/rsi_91x_mgmt.c | 57 +++++++++++- drivers/net/wireless/rsi/rsi_91x_ps.c | 129 ++++++++++++++++++++++++++++ drivers/net/wireless/rsi/rsi_main.h | 9 +- drivers/net/wireless/rsi/rsi_mgmt.h | 21 +++++ drivers/net/wireless/rsi/rsi_ps.h | 64 ++++++++++++++ 9 files changed, 309 insertions(+), 3 deletions(-) create mode 100644 drivers/net/wireless/rsi/rsi_91x_ps.c create mode 100644 drivers/net/wireless/rsi/rsi_ps.h diff --git a/drivers/net/wireless/rsi/Makefile b/drivers/net/wireless/rsi/Makefile index a475c813674a..ebb89965997a 100644 --- a/drivers/net/wireless/rsi/Makefile +++ b/drivers/net/wireless/rsi/Makefile @@ -3,6 +3,7 @@ rsi_91x-y += rsi_91x_core.o rsi_91x-y += rsi_91x_mac80211.o rsi_91x-y += rsi_91x_mgmt.o rsi_91x-y += rsi_91x_hal.o +rsi_91x-y += rsi_91x_ps.o rsi_91x-$(CONFIG_RSI_DEBUGFS) += rsi_91x_debugfs.o rsi_usb-y += rsi_91x_usb.o rsi_91x_usb_ops.o diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index b0a7a1511aee..4addcc0826db 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -111,6 +111,8 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) /* This function prepares descriptor for given data packet */ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) { + struct rsi_hw *adapter = common->priv; + struct ieee80211_vif *vif; struct ieee80211_hdr *wh = NULL; struct ieee80211_tx_info *info; struct skb_info *tx_params; @@ -148,6 +150,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; wh = (struct ieee80211_hdr *)&skb->data[header_size]; seq_num = (le16_to_cpu(wh->seq_ctrl) >> 4); + vif = adapter->vifs[0]; data_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; @@ -156,6 +159,10 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) data_desc->mac_flags |= cpu_to_le16(RSI_QOS_ENABLE); } + if ((vif->type == NL80211_IFTYPE_STATION) && + (adapter->ps_state == PS_ENABLED)) + wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE); + if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) && (common->secinfo.security_enable)) { if (rsi_is_cipher_wep(common)) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 193f9227fdb3..16a0fd0f519a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -18,6 +18,7 @@ #include "rsi_debugfs.h" #include "rsi_mgmt.h" #include "rsi_common.h" +#include "rsi_ps.h" static const struct ieee80211_channel rsi_2ghz_channels[] = { { .band = NL80211_BAND_2GHZ, .center_freq = 2412, @@ -467,6 +468,8 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + struct ieee80211_vif *vif = adapter->vifs[0]; + struct ieee80211_conf *conf = &hw->conf; int status = -EOPNOTSUPP; mutex_lock(&common->mutex); @@ -480,6 +483,19 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, status = rsi_config_power(hw); } + /* Power save parameters */ + if ((changed & IEEE80211_CONF_CHANGE_PS) && + (vif->type == NL80211_IFTYPE_STATION)) { + unsigned long flags; + + spin_lock_irqsave(&adapter->ps_lock, flags); + if (conf->flags & IEEE80211_CONF_PS) + rsi_enable_ps(adapter); + else + rsi_disable_ps(adapter); + spin_unlock_irqrestore(&adapter->ps_lock, flags); + } + mutex_unlock(&common->mutex); return status; @@ -522,6 +538,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &vif->bss_conf; + struct ieee80211_conf *conf = &hw->conf; u16 rx_filter_word = 0; mutex_lock(&common->mutex); @@ -540,6 +558,8 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid, bss_conf->qos, bss_conf->aid); + adapter->ps_info.dtim_interval_duration = bss->dtim_period; + adapter->ps_info.listen_interval = conf->listen_interval; } if (changed & BSS_CHANGED_CQM) { @@ -1283,6 +1303,8 @@ int rsi_mac80211_attach(struct rsi_common *common) ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, AMPDU_AGGREGATION); + ieee80211_hw_set(hw, SUPPORTS_PS); + ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); hw->queues = MAX_HW_QUEUES; hw->extra_tx_headroom = RSI_NEEDED_HEADROOM; diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c index bb0febb17be0..3e1e80888d98 100644 --- a/drivers/net/wireless/rsi/rsi_91x_main.c +++ b/drivers/net/wireless/rsi/rsi_91x_main.c @@ -231,6 +231,8 @@ struct rsi_hw *rsi_91x_init(void) goto err; } + rsi_default_ps_params(adapter); + spin_lock_init(&adapter->ps_lock); common->init_done = true; return adapter; diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index e00d4edec2ba..f76b34679df8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -17,6 +17,7 @@ #include #include "rsi_mgmt.h" #include "rsi_common.h" +#include "rsi_ps.h" static struct bootup_params boot_params_20 = { .magic_number = cpu_to_le16(0x5aa5), @@ -1396,6 +1397,58 @@ int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word) return rsi_send_internal_mgmt_frame(common, skb); } +int rsi_send_ps_request(struct rsi_hw *adapter, bool enable) +{ + struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &adapter->vifs[0]->bss_conf; + struct rsi_request_ps *ps; + struct rsi_ps_info *ps_info; + struct sk_buff *skb; + int frame_len = sizeof(*ps); + + skb = dev_alloc_skb(frame_len); + if (!skb) + return -ENOMEM; + memset(skb->data, 0, frame_len); + + ps = (struct rsi_request_ps *)skb->data; + ps_info = &adapter->ps_info; + + rsi_set_len_qno(&ps->desc.desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); + ps->desc.desc_dword0.frame_type = WAKEUP_SLEEP_REQUEST; + if (enable) { + ps->ps_sleep.enable = RSI_PS_ENABLE; + ps->desc.desc_dword3.token = cpu_to_le16(RSI_SLEEP_REQUEST); + } else { + ps->ps_sleep.enable = RSI_PS_DISABLE; + ps->desc.desc_dword0.len_qno |= cpu_to_le16(RSI_PS_DISABLE_IND); + ps->desc.desc_dword3.token = cpu_to_le16(RSI_WAKEUP_REQUEST); + } + ps->ps_sleep.sleep_type = ps_info->sleep_type; + ps->ps_sleep.num_bcns_per_lis_int = + cpu_to_le16(ps_info->num_bcns_per_lis_int); + ps->ps_sleep.sleep_duration = + cpu_to_le32(ps_info->deep_sleep_wakeup_period); + + if (bss->assoc) + ps->ps_sleep.connected_sleep = RSI_CONNECTED_SLEEP; + else + ps->ps_sleep.connected_sleep = RSI_DEEP_SLEEP; + + ps->ps_listen_interval = cpu_to_le32(ps_info->listen_interval); + ps->ps_dtim_interval_duration = + cpu_to_le32(ps_info->dtim_interval_duration); + + if (ps_info->listen_interval > ps_info->dtim_interval_duration) + ps->ps_listen_interval = cpu_to_le32(RSI_PS_DISABLE); + + ps->ps_num_dtim_intervals = cpu_to_le16(ps_info->num_dtims_per_sleep); + skb_put(skb, frame_len); + + return rsi_send_internal_mgmt_frame(common, skb); +} + /** * rsi_set_antenna() - This fuction send antenna configuration request * to device @@ -1569,7 +1622,9 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common, return 0; } break; - + case WAKEUP_SLEEP_REQUEST: + rsi_dbg(INFO_ZONE, "Wakeup/Sleep confirmation.\n"); + return rsi_handle_ps_confirm(adapter, msg); default: rsi_dbg(INFO_ZONE, "%s: Invalid TA confirm pkt received\n", __func__); diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c new file mode 100644 index 000000000000..25e8f853837b --- /dev/null +++ b/drivers/net/wireless/rsi/rsi_91x_ps.c @@ -0,0 +1,129 @@ +/** + * Copyright (c) 2014 Redpine Signals Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "rsi_debugfs.h" +#include "rsi_mgmt.h" +#include "rsi_common.h" +#include "rsi_ps.h" + +char *str_psstate(enum ps_state state) +{ + switch (state) { + case PS_NONE: + return "PS_NONE"; + case PS_DISABLE_REQ_SENT: + return "PS_DISABLE_REQ_SENT"; + case PS_ENABLE_REQ_SENT: + return "PS_ENABLE_REQ_SENT"; + case PS_ENABLED: + return "PS_ENABLED"; + default: + return "INVALID_STATE"; + } + return "INVALID_STATE"; +} + +static inline void rsi_modify_ps_state(struct rsi_hw *adapter, + enum ps_state nstate) +{ + rsi_dbg(INFO_ZONE, "PS state changed %s => %s\n", + str_psstate(adapter->ps_state), + str_psstate(nstate)); + + adapter->ps_state = nstate; +} + +void rsi_default_ps_params(struct rsi_hw *adapter) +{ + struct rsi_ps_info *ps_info = &adapter->ps_info; + + ps_info->enabled = true; + ps_info->sleep_type = RSI_SLEEP_TYPE_LP; + ps_info->tx_threshold = 0; + ps_info->rx_threshold = 0; + ps_info->tx_hysterisis = 0; + ps_info->rx_hysterisis = 0; + ps_info->monitor_interval = 0; + ps_info->listen_interval = RSI_DEF_LISTEN_INTERVAL; + ps_info->num_bcns_per_lis_int = 0; + ps_info->dtim_interval_duration = 0; + ps_info->num_dtims_per_sleep = 0; + ps_info->deep_sleep_wakeup_period = RSI_DEF_DS_WAKEUP_PERIOD; +} + +void rsi_enable_ps(struct rsi_hw *adapter) +{ + if (adapter->ps_state != PS_NONE) { + rsi_dbg(ERR_ZONE, + "%s: Cannot accept enable PS in %s state\n", + __func__, str_psstate(adapter->ps_state)); + return; + } + + if (rsi_send_ps_request(adapter, true)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to send PS request to device\n", + __func__); + return; + } + + rsi_modify_ps_state(adapter, PS_ENABLE_REQ_SENT); +} + +void rsi_disable_ps(struct rsi_hw *adapter) +{ + if (adapter->ps_state != PS_ENABLED) { + rsi_dbg(ERR_ZONE, + "%s: Cannot accept disable PS in %s state\n", + __func__, str_psstate(adapter->ps_state)); + return; + } + + if (rsi_send_ps_request(adapter, false)) { + rsi_dbg(ERR_ZONE, + "%s: Failed to send PS request to device\n", + __func__); + return; + } + + rsi_modify_ps_state(adapter, PS_DISABLE_REQ_SENT); +} + +int rsi_handle_ps_confirm(struct rsi_hw *adapter, u8 *msg) +{ + u16 cfm_type = get_unaligned_le16(msg + PS_CONFIRM_INDEX); + + switch (cfm_type) { + case RSI_SLEEP_REQUEST: + if (adapter->ps_state == PS_ENABLE_REQ_SENT) + rsi_modify_ps_state(adapter, PS_ENABLED); + break; + case RSI_WAKEUP_REQUEST: + if (adapter->ps_state == PS_DISABLE_REQ_SENT) + rsi_modify_ps_state(adapter, PS_NONE); + break; + default: + rsi_dbg(ERR_ZONE, + "Invalid PS confirm type %x in state %s\n", + cfm_type, str_psstate(adapter->ps_state)); + return -1; + } + + return 0; +} diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 6a8e8e7ed1fb..9aada0b73108 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -21,6 +21,10 @@ #include #include +struct rsi_hw; + +#include "rsi_ps.h" + #define ERR_ZONE BIT(0) /* For Error Msgs */ #define INFO_ZONE BIT(1) /* For General Status Msgs */ #define INIT_ZONE BIT(2) /* For Driver Init Seq Msgs */ @@ -177,8 +181,6 @@ enum rsi_dfs_regions { RSI_REGION_WORLD }; -struct rsi_hw; - struct rsi_common { struct rsi_hw *priv; struct vif_priv vif_info[RSI_MAX_VIFS]; @@ -282,6 +284,9 @@ struct rsi_hw { enum host_intf rsi_host_intf; u16 block_size; + enum ps_state ps_state; + struct rsi_ps_info ps_info; + spinlock_t ps_lock; /*To protect power save config*/ u32 usb_buffer_status_reg; #ifdef CONFIG_RSI_DEBUGFS struct rsi_debugfs *dfsentry; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 1060edcb2a96..c5d114d67c83 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -69,6 +69,7 @@ #define RSI_QOS_ENABLE BIT(12) #define RSI_REKEY_PURPOSE BIT(13) #define RSI_ENCRYPT_PKT BIT(15) +#define RSI_SET_PS_ENABLE BIT(12) #define RSI_CMDDESC_40MHZ BIT(4) #define RSI_CMDDESC_UPPER_20_ENABLE BIT(5) @@ -172,6 +173,14 @@ #define RSI_BEACON_INTERVAL 200 #define RSI_DTIM_COUNT 2 +#define RSI_PS_DISABLE_IND BIT(15) +#define RSI_PS_ENABLE 1 +#define RSI_PS_DISABLE 0 +#define RSI_DEEP_SLEEP 1 +#define RSI_CONNECTED_SLEEP 2 +#define RSI_SLEEP_REQUEST 1 +#define RSI_WAKEUP_REQUEST 2 + enum opmode { STA_OPMODE = 1, AP_OPMODE = 2 @@ -519,6 +528,18 @@ struct rsi_eeprom_read_frame { __le16 reserved3; } __packed; +struct rsi_request_ps { + struct rsi_cmd_desc desc; + struct ps_sleep_params ps_sleep; + u8 ps_mimic_support; + u8 ps_uapsd_acs; + u8 ps_uapsd_wakeup_period; + u8 reserved; + __le32 ps_listen_interval; + __le32 ps_dtim_interval_duration; + __le16 ps_num_dtim_intervals; +} __packed; + static inline u32 rsi_get_queueno(u8 *addr, u16 offset) { return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12; diff --git a/drivers/net/wireless/rsi/rsi_ps.h b/drivers/net/wireless/rsi/rsi_ps.h new file mode 100644 index 000000000000..d8475873df36 --- /dev/null +++ b/drivers/net/wireless/rsi/rsi_ps.h @@ -0,0 +1,64 @@ +/** + * Copyright (c) 2017 Redpine Signals Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __RSI_PS_H__ +#define __RSI_PS_H__ + +#define PS_CONFIRM_INDEX 12 +#define RSI_DEF_DS_WAKEUP_PERIOD 200 +#define RSI_DEF_LISTEN_INTERVAL 200 +#define RSI_SLEEP_TYPE_LP 1 + +enum ps_state { + PS_NONE = 0, + PS_ENABLE_REQ_SENT = 1, + PS_DISABLE_REQ_SENT = 2, + PS_ENABLED = 3 +}; + +struct ps_sleep_params { + u8 enable; + u8 sleep_type; + u8 connected_sleep; + u8 reserved1; + __le16 num_bcns_per_lis_int; + __le16 wakeup_type; + __le32 sleep_duration; +} __packed; + +struct rsi_ps_info { + u8 enabled; + u8 sleep_type; + u8 tx_threshold; + u8 rx_threshold; + u8 tx_hysterisis; + u8 rx_hysterisis; + u16 monitor_interval; + u32 listen_interval; + u16 num_bcns_per_lis_int; + u32 dtim_interval_duration; + u16 num_dtims_per_sleep; + u32 deep_sleep_wakeup_period; +} __packed; + +char *str_psstate(enum ps_state state); +void rsi_enable_ps(struct rsi_hw *adapter); +void rsi_disable_ps(struct rsi_hw *adapter); +int rsi_handle_ps_confirm(struct rsi_hw *adapter, u8 *msg); +void rsi_default_ps_params(struct rsi_hw *hw); +int rsi_send_ps_request(struct rsi_hw *adapter, bool enable); +void rsi_conf_uapsd(struct rsi_hw *adapter); +#endif -- cgit v1.2.3-55-g7522 From db07971d085fa637816ce029a5411f2ce83ee672 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 3 Aug 2017 19:59:00 +0530 Subject: rsi: add support for U-APSD power save This patch adds support for U-APSD power save. Configuration frame is downloaded to firmware with default settings and support is advertised to mac80211 Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 19 +++++++++++++++++++ drivers/net/wireless/rsi/rsi_91x_mgmt.c | 3 +++ drivers/net/wireless/rsi/rsi_91x_ps.c | 17 +++++++++++++++++ drivers/net/wireless/rsi/rsi_main.h | 1 + drivers/net/wireless/rsi/rsi_mgmt.h | 6 ++++++ 5 files changed, 46 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 16a0fd0f519a..6b833c424b18 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -327,6 +327,7 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw, struct rsi_common *common = adapter->priv; int ret = -EOPNOTSUPP; + vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; mutex_lock(&common->mutex); switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -560,6 +561,16 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, bss_conf->aid); adapter->ps_info.dtim_interval_duration = bss->dtim_period; adapter->ps_info.listen_interval = conf->listen_interval; + + /* If U-APSD is updated, send ps parameters to firmware */ + if (bss->assoc) { + if (common->uapsd_bitmap) { + rsi_dbg(INFO_ZONE, "Configuring UAPSD\n"); + rsi_conf_uapsd(adapter); + } + } else { + common->uapsd_bitmap = 0; + } } if (changed & BSS_CHANGED_CQM) { @@ -641,6 +652,12 @@ static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw, memcpy(&common->edca_params[idx], params, sizeof(struct ieee80211_tx_queue_params)); + + if (params->uapsd) + common->uapsd_bitmap |= idx; + else + common->uapsd_bitmap &= (~idx); + mutex_unlock(&common->mutex); return 0; @@ -1311,6 +1328,8 @@ int rsi_mac80211_attach(struct rsi_common *common) hw->max_rates = 1; hw->max_rate_tries = MAX_RETRIES; + hw->uapsd_queues = RSI_IEEE80211_UAPSD_QUEUES; + hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; hw->max_tx_aggregation_subframes = 6; rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ); diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index f76b34679df8..e5fe443ddfb6 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1425,6 +1425,9 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable) ps->desc.desc_dword0.len_qno |= cpu_to_le16(RSI_PS_DISABLE_IND); ps->desc.desc_dword3.token = cpu_to_le16(RSI_WAKEUP_REQUEST); } + + ps->ps_uapsd_acs = common->uapsd_bitmap; + ps->ps_sleep.sleep_type = ps_info->sleep_type; ps->ps_sleep.num_bcns_per_lis_int = cpu_to_le16(ps_info->num_bcns_per_lis_int); diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c index 25e8f853837b..48c79f035c59 100644 --- a/drivers/net/wireless/rsi/rsi_91x_ps.c +++ b/drivers/net/wireless/rsi/rsi_91x_ps.c @@ -105,6 +105,22 @@ void rsi_disable_ps(struct rsi_hw *adapter) rsi_modify_ps_state(adapter, PS_DISABLE_REQ_SENT); } +void rsi_conf_uapsd(struct rsi_hw *adapter) +{ + int ret; + + if (adapter->ps_state != PS_ENABLED) + return; + + ret = rsi_send_ps_request(adapter, false); + if (!ret) + ret = rsi_send_ps_request(adapter, true); + if (ret) + rsi_dbg(ERR_ZONE, + "%s: Failed to send PS request to device\n", + __func__); +} + int rsi_handle_ps_confirm(struct rsi_hw *adapter, u8 *msg) { u16 cfm_type = get_unaligned_le16(msg + PS_CONFIRM_INDEX); @@ -127,3 +143,4 @@ int rsi_handle_ps_confirm(struct rsi_hw *adapter, u8 *msg) return 0; } + diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 9aada0b73108..d2cc47e98639 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -243,6 +243,7 @@ struct rsi_common { u16 oper_mode; u8 lp_ps_handshake_mode; u8 ulp_ps_handshake_mode; + u8 uapsd_bitmap; u8 rf_power_val; u8 wlan_rf_power_mode; u8 obm_ant_sel_val; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index c5d114d67c83..b22103fd6c81 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -181,6 +181,12 @@ #define RSI_SLEEP_REQUEST 1 #define RSI_WAKEUP_REQUEST 2 +#define RSI_IEEE80211_UAPSD_QUEUES \ + (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \ + IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) + enum opmode { STA_OPMODE = 1, AP_OPMODE = 2 -- cgit v1.2.3-55-g7522 From 23e414cca1f7331189d0165a3874f0b0453bf308 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 3 Aug 2017 19:59:01 +0530 Subject: rsi: rename sdio_read_buffer_status_register rsi_sdio_check_buffer_status would be the appropriate name for this function as we are checking hardware buffers availability status. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio.c | 2 +- drivers/net/wireless/rsi/rsi_91x_sdio_ops.c | 13 +++---------- drivers/net/wireless/rsi/rsi_sdio.h | 2 +- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c index 42d558b61721..742f6cd44f6c 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c @@ -860,7 +860,7 @@ static int rsi_init_sdio_interface(struct rsi_hw *adapter, sdio_release_host(pfunction); adapter->determine_event_timeout = rsi_sdio_determine_event_timeout; - adapter->check_hw_queue_status = rsi_sdio_read_buffer_status_register; + adapter->check_hw_queue_status = rsi_sdio_check_buffer_status; #ifdef CONFIG_RSI_DEBUGFS adapter->num_debugfs_entries = MAX_DEBUGFS_ENTRIES; diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index b3f7adc9d085..9b94ba780ee2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -323,17 +323,10 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) } while (1); } -/** - * rsi_sdio_read_buffer_status_register() - This function is used to the read - * buffer status register and set - * relevant fields in - * rsi_91x_sdiodev struct. - * @adapter: Pointer to the driver hw structure. - * @q_num: The Q number whose status is to be found. - * - * Return: status: -1 on failure or else queue full/stop is indicated. +/* This function is used to read buffer status register and + * set relevant fields in rsi_91x_sdiodev struct. */ -int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num) +int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num) { struct rsi_common *common = adapter->priv; struct rsi_91x_sdiodev *dev = diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 3cf67565feb1..9239fbe698f8 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -127,5 +127,5 @@ int rsi_sdio_write_register_multiple(struct rsi_hw *adapter, u32 addr, int rsi_sdio_master_access_msword(struct rsi_hw *adapter, u16 ms_word); void rsi_sdio_ack_intr(struct rsi_hw *adapter, u8 int_bit); int rsi_sdio_determine_event_timeout(struct rsi_hw *adapter); -int rsi_sdio_read_buffer_status_register(struct rsi_hw *adapter, u8 q_num); +int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num); #endif -- cgit v1.2.3-55-g7522 From d64dd2a172d875962f8ae35af3bbd9aa1c679039 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 3 Aug 2017 19:59:02 +0530 Subject: rsi: buffer full check optimization We get buffer full event from firmware whenever Tx queue is full Host should stop writing packets after this and resume after buffer free event. Buffer status checking is optimized for once in 4 times if BUFF_FULL condition is not set, otherwise once for every packet. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio_ops.c | 16 +++++++++++++++- drivers/net/wireless/rsi/rsi_sdio.h | 1 + 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 9b94ba780ee2..94a9fcd70dc7 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -271,6 +271,7 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) "%s: ==> BUFFER_AVAILABLE <==\n", __func__); dev->rx_info.buf_available_counter++; + dev->buff_status_updated = true; break; case FIRMWARE_ASSERT_IND: @@ -333,7 +334,14 @@ int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num) (struct rsi_91x_sdiodev *)adapter->rsi_dev; u8 buf_status = 0; int status = 0; + static int counter = 4; + if (!dev->buff_status_updated && counter) { + counter--; + goto out; + } + + dev->buff_status_updated = false; status = rsi_sdio_read_register(common->priv, RSI_DEVICE_BUFFER_STATUS_REGISTER, &buf_status); @@ -368,10 +376,16 @@ int rsi_sdio_check_buffer_status(struct rsi_hw *adapter, u8 q_num) dev->rx_info.semi_buffer_full = false; } + if (dev->rx_info.mgmt_buffer_full || dev->rx_info.buf_full_counter) + counter = 1; + else + counter = 4; + +out: if ((q_num == MGMT_SOFT_Q) && (dev->rx_info.mgmt_buffer_full)) return QUEUE_FULL; - if (dev->rx_info.buffer_full) + if ((q_num < MGMT_SOFT_Q) && (dev->rx_info.buffer_full)) return QUEUE_FULL; return QUEUE_NOT_FULL; diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h index 9239fbe698f8..95e4bed57baf 100644 --- a/drivers/net/wireless/rsi/rsi_sdio.h +++ b/drivers/net/wireless/rsi/rsi_sdio.h @@ -114,6 +114,7 @@ struct rsi_91x_sdiodev { u8 prev_desc[16]; u16 tx_blk_size; u8 write_fail; + bool buff_status_updated; }; void rsi_interrupt_handler(struct rsi_hw *adapter); -- cgit v1.2.3-55-g7522 From 67c52a4dafceca8ed63b2d8dc73079059e064399 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 3 Aug 2017 19:59:03 +0530 Subject: rsi: buffer available interrupt handling BUFFER_AVAILABLE interrupt is sent by firmware to indicate change in buffer status. We should check buffer status while handling this interrupt. Currently buffer status is checked only while dequeueing packets. This patch fixes a data traffic stuck problem observed occasionally. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_sdio_ops.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 94a9fcd70dc7..8e2a95c486b0 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -259,10 +259,12 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) switch (isr_type) { case BUFFER_AVAILABLE: - dev->rx_info.watch_bufferfull_count = 0; - dev->rx_info.buffer_full = false; - dev->rx_info.semi_buffer_full = false; - dev->rx_info.mgmt_buffer_full = false; + status = rsi_sdio_check_buffer_status(adapter, + 0); + if (status < 0) + rsi_dbg(ERR_ZONE, + "%s: Failed to check buffer status\n", + __func__); rsi_sdio_ack_intr(common->priv, (1 << PKT_BUFF_AVAILABLE)); rsi_set_event(&common->tx_thread.event); @@ -270,7 +272,6 @@ void rsi_interrupt_handler(struct rsi_hw *adapter) rsi_dbg(ISR_ZONE, "%s: ==> BUFFER_AVAILABLE <==\n", __func__); - dev->rx_info.buf_available_counter++; dev->buff_status_updated = true; break; -- cgit v1.2.3-55-g7522 From 80a88ecf3bccb129146e00e429a4b8a90fb89f11 Mon Sep 17 00:00:00 2001 From: Karun Eagalapati Date: Thu, 3 Aug 2017 19:59:04 +0530 Subject: rsi: RTS threshold configuration Provision is added for configuring RTS threshold by sending vap dynamic update frame to firmware. Signed-off-by: Karun Eagalapati Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 9 +++++++++ drivers/net/wireless/rsi/rsi_91x_mgmt.c | 31 +++++++++++++++++++++++++++++ drivers/net/wireless/rsi/rsi_main.h | 1 + drivers/net/wireless/rsi/rsi_mgmt.h | 14 +++++++++++++ 4 files changed, 55 insertions(+) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 6b833c424b18..210ad79038ed 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -497,6 +497,15 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw, spin_unlock_irqrestore(&adapter->ps_lock, flags); } + /* RTS threshold */ + if (changed & WIPHY_PARAM_RTS_THRESHOLD) { + rsi_dbg(INFO_ZONE, "RTS threshold\n"); + if ((common->rts_threshold) <= IEEE80211_MAX_RTS_THRESHOLD) { + rsi_dbg(INFO_ZONE, + "%s: Sending vap updates....\n", __func__); + status = rsi_send_vap_dynamic_update(common); + } + } mutex_unlock(&common->mutex); return status; diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index e5fe443ddfb6..f93499d0b8fa 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1058,6 +1058,37 @@ int rsi_send_radio_params_update(struct rsi_common *common) return rsi_send_internal_mgmt_frame(common, skb); } +/* This function programs the threshold. */ +int rsi_send_vap_dynamic_update(struct rsi_common *common) +{ + struct sk_buff *skb; + struct rsi_dynamic_s *dynamic_frame; + + rsi_dbg(MGMT_TX_ZONE, + "%s: Sending vap update indication frame\n", __func__); + + skb = dev_alloc_skb(sizeof(struct rsi_dynamic_s)); + if (!skb) + return -ENOMEM; + + memset(skb->data, 0, sizeof(struct rsi_dynamic_s)); + dynamic_frame = (struct rsi_dynamic_s *)skb->data; + rsi_set_len_qno(&dynamic_frame->desc_dword0.len_qno, + sizeof(dynamic_frame->frame_body), RSI_WIFI_MGMT_Q); + + dynamic_frame->desc_dword0.frame_type = VAP_DYNAMIC_UPDATE; + dynamic_frame->desc_dword2.pkt_info = + cpu_to_le32(common->rts_threshold); + /* Beacon miss threshold */ + dynamic_frame->frame_body.keep_alive_period = + cpu_to_le16(RSI_DEF_KEEPALIVE); + dynamic_frame->desc_dword3.sta_id = 0; /* vap id */ + + skb_put(skb, sizeof(struct rsi_dynamic_s)); + + return rsi_send_internal_mgmt_frame(common, skb); +} + /** * rsi_compare() - This function is used to compare two integers * @a: pointer to the first integer diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index d2cc47e98639..d05b5e0847bc 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -58,6 +58,7 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define IEEE80211_ADDR_LEN 6 #define FRAME_DESC_SZ 16 #define MIN_802_11_HDR_LEN 24 +#define RSI_DEF_KEEPALIVE 90 #define DATA_QUEUE_WATER_MARK 400 #define MIN_DATA_QUEUE_WATER_MARK 300 diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index b22103fd6c81..201a46572c69 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -239,6 +239,7 @@ enum cmd_frame_type { CW_MODE_REQ, PER_CMD_PKT, ANT_SEL_FRAME = 0x20, + VAP_DYNAMIC_UPDATE = 0x27, COMMON_DEV_CONFIG = 0x28, RADIO_PARAMS_UPDATE = 0x29 }; @@ -374,6 +375,18 @@ struct rsi_ant_sel_frame { __le32 reserved2; } __packed; +struct rsi_dynamic_s { + struct rsi_cmd_desc_dword0 desc_dword0; + struct rsi_cmd_desc_dword1 desc_dword1; + struct rsi_cmd_desc_dword2 desc_dword2; + struct rsi_cmd_desc_dword3 desc_dword3; + struct framebody { + __le16 data_rate; + __le16 mgmt_rate; + __le16 keep_alive_period; + } frame_body; +} __packed; + /* Key descriptor flags */ #define RSI_KEY_TYPE_BROADCAST BIT(1) #define RSI_WEP_KEY BIT(2) @@ -585,6 +598,7 @@ int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, u8 key_type, u8 key_id, u32 cipher); int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel); +int rsi_send_vap_dynamic_update(struct rsi_common *common); int rsi_send_block_unblock_frame(struct rsi_common *common, bool event); void rsi_inform_bss_status(struct rsi_common *common, u8 status, const u8 *bssid, u8 qos_enable, u16 aid); -- cgit v1.2.3-55-g7522 From bd69cddcdf47919a8f7145779c5da2db3c543bca Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Thu, 3 Aug 2017 01:34:46 +0530 Subject: mwifiex: replace netif_carrier_on/off by netif_device_attach/dettach Driver is doing netif_carrier_off during suspend, which will set the IFF_LOWER_UP flag to 0. As a result certain applications will think this as a real carrier down and behave accordingly. This will cause issues like loss of IP address, for example. To fix this use netif_device_dettach during suspend. Fixes: 0026b32d723e ('mwifiex: fix Tx timeout issue during suspend test') Signed-off-by: Cathy Luo Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 2be78170ec67..83605a2f1c36 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3391,11 +3391,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) { - mwifiex_stop_net_dev_queue(priv->netdev, adapter); - if (netif_carrier_ok(priv->netdev)) - netif_carrier_off(priv->netdev); - } + if (priv && priv->netdev) + netif_device_detach(priv->netdev); } for (i = 0; i < retry_num; i++) { @@ -3466,11 +3463,8 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy) for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; - if (priv && priv->netdev) { - if (!netif_carrier_ok(priv->netdev)) - netif_carrier_on(priv->netdev); - mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); - } + if (priv && priv->netdev) + netif_device_attach(priv->netdev); } if (!wiphy->wowlan_config) -- cgit v1.2.3-55-g7522 From 2d33140f90e805cc848dbdc0b176eb67e96b2cf5 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Thu, 3 Aug 2017 09:13:27 +0000 Subject: mwifiex: Do not change bss_num in change_virtual_intf Commit 4d7ab36f0c47 ("mwifiex: Do not change bss_type in change_virtual_intf") kept original bss_type unchanged. bss_num should keep the same style, in this way. Unique tuple (bss_type, bss_num) will be able to locate the right priv structure. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 83605a2f1c36..945d444d99a9 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -889,23 +889,15 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, switch (type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_STA); priv->bss_role = MWIFIEX_BSS_ROLE_STA; break; case NL80211_IFTYPE_P2P_CLIENT: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_P2P); priv->bss_role = MWIFIEX_BSS_ROLE_STA; break; case NL80211_IFTYPE_P2P_GO: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_P2P); priv->bss_role = MWIFIEX_BSS_ROLE_UAP; break; case NL80211_IFTYPE_AP: - priv->bss_num = mwifiex_get_unused_bss_num(adapter, - MWIFIEX_BSS_TYPE_UAP); priv->bss_role = MWIFIEX_BSS_ROLE_UAP; break; default: -- cgit v1.2.3-55-g7522 From 20e5476d6c30e56e6bc58378ae07a36904776333 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Thu, 3 Aug 2017 09:13:28 +0000 Subject: mwifiex: wrapper wps ie in pass through tlv This patch wrapper wps ie in pass through tlv, so that firmware could parse correctly. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/join.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index b89596c18b41..d87aeff70cef 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -253,7 +253,7 @@ mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer) priv->wps_ie_len, *buffer); /* Wrap the generic IE buffer with a pass through TLV type */ - ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE); + ie_header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH); ie_header.len = cpu_to_le16(priv->wps_ie_len); memcpy(*buffer, &ie_header, sizeof(ie_header)); *buffer += sizeof(ie_header); -- cgit v1.2.3-55-g7522 From 4ba28f93948df5643c24402c642fb769e09fab73 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 7 Aug 2017 01:36:06 +0000 Subject: mwifiex: p2p: use separate device address Per below statement about p2p device address in WFA P2P spec $2.4.3: The P2P Device Address of a P2P Device shall be its globally administered MAC address, or its globally administered MAC address with the locally administered bit set. This patch follow above statement, using a separate device address for p2p interface Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 5 +++- drivers/net/wireless/marvell/mwifiex/main.c | 40 ++++++++++++++++--------- drivers/net/wireless/marvell/mwifiex/main.h | 4 +++ 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 945d444d99a9..b16b19af812d 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -915,6 +915,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, adapter->rx_locked = false; spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); + mwifiex_set_mac_address(priv, dev); + return 0; } @@ -2955,6 +2957,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, } mwifiex_init_priv_params(priv, dev); + mwifiex_set_mac_address(priv, dev); + priv->netdev = dev; ret = mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE, @@ -2982,7 +2986,6 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy, dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = &priv->wdev; dev->ieee80211_ptr->iftype = priv->bss_mode; - memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); dev->flags |= IFF_BROADCAST | IFF_MULTICAST; diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index d67d70002ea9..ee40b739b289 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -940,31 +940,44 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) return 0; } -/* - * CFG802.11 network device handler for setting MAC address. - */ -static int -mwifiex_set_mac_address(struct net_device *dev, void *addr) +int mwifiex_set_mac_address(struct mwifiex_private *priv, + struct net_device *dev) { - struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); - struct sockaddr *hw_addr = addr; int ret; + u64 mac_addr; - memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN); + if (priv->bss_type != MWIFIEX_BSS_TYPE_P2P) + goto done; + + mac_addr = ether_addr_to_u64(priv->curr_addr); + mac_addr |= BIT_ULL(MWIFIEX_MAC_LOCAL_ADMIN_BIT); + u64_to_ether_addr(mac_addr, priv->curr_addr); /* Send request to firmware */ ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_MAC_ADDRESS, HostCmd_ACT_GEN_SET, 0, NULL, true); - if (!ret) - memcpy(priv->netdev->dev_addr, priv->curr_addr, ETH_ALEN); - else + if (ret) { mwifiex_dbg(priv->adapter, ERROR, "set mac address failed: ret=%d\n", ret); + return ret; + } +done: memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); + return 0; +} - return ret; +/* CFG802.11 network device handler for setting MAC address. + */ +static int +mwifiex_ndo_set_mac_address(struct net_device *dev, void *addr) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + struct sockaddr *hw_addr = addr; + + memcpy(priv->curr_addr, hw_addr->sa_data, ETH_ALEN); + return mwifiex_set_mac_address(priv, dev); } /* @@ -1257,7 +1270,7 @@ static const struct net_device_ops mwifiex_netdev_ops = { .ndo_open = mwifiex_open, .ndo_stop = mwifiex_close, .ndo_start_xmit = mwifiex_hard_start_xmit, - .ndo_set_mac_address = mwifiex_set_mac_address, + .ndo_set_mac_address = mwifiex_ndo_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = mwifiex_tx_timeout, .ndo_get_stats = mwifiex_get_stats, @@ -1301,7 +1314,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, priv->gen_idx = MWIFIEX_AUTO_IDX_MASK; priv->num_tx_timeout = 0; ether_addr_copy(priv->curr_addr, priv->adapter->perm_addr); - memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN); if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 537a0ad795ff..0aaae0878742 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -165,6 +165,8 @@ enum { /* Address alignment */ #define MWIFIEX_ALIGN_ADDR(p, a) (((long)(p) + (a) - 1) & ~((a) - 1)) +#define MWIFIEX_MAC_LOCAL_ADMIN_BIT 41 + /** *enum mwifiex_debug_level - marvell wifi debug level */ @@ -1671,6 +1673,8 @@ void mwifiex_process_tx_pause_event(struct mwifiex_private *priv, void mwifiex_process_multi_chan_event(struct mwifiex_private *priv, struct sk_buff *event_skb); void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter); +int mwifiex_set_mac_address(struct mwifiex_private *priv, + struct net_device *dev); #ifdef CONFIG_DEBUG_FS void mwifiex_debugfs_init(void); -- cgit v1.2.3-55-g7522 From fdfb0f94bfb71ce31f5417b2f6362e43b0b847f0 Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Thu, 3 Aug 2017 17:37:57 +0800 Subject: brcmfmac: set wpa_auth to WPA_AUTH_DISABLED in AP/OPEN security mode When setting wpa_auth to WPA_AUTH_NONE(1) in AP mode with WEP security, firmware will set privacy bit and add WPA OUI in VENDOR IE in beacon and probe response. The security type in softAP beacons confuse the supplicant in client side, and the user client will see [WPA-?] in supplicant scan result. So we set WPA_AUTH_DISABLED in softAP mode with OPEN security. Signed-off-by: Wright Feng Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 7e689c86d565..579089a340f6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3940,6 +3940,7 @@ brcmf_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *ndev) static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) { s32 err; + s32 wpa_val; /* set auth */ err = brcmf_fil_bsscfg_int_set(ifp, "auth", 0); @@ -3954,7 +3955,11 @@ static s32 brcmf_configure_opensecurity(struct brcmf_if *ifp) return err; } /* set upper-layer auth */ - err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", WPA_AUTH_NONE); + if (brcmf_is_ibssmode(ifp->vif)) + wpa_val = WPA_AUTH_NONE; + else + wpa_val = WPA_AUTH_DISABLED; + err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_val); if (err < 0) { brcmf_err("wpa_auth error %d\n", err); return err; -- cgit v1.2.3-55-g7522 From 0ec9eb90feec4933637fbde9d5bfbc3b62aea218 Mon Sep 17 00:00:00 2001 From: Chi-Hsien Lin Date: Thu, 3 Aug 2017 17:37:58 +0800 Subject: brcmfmac: Add support for CYW4373 SDIO/USB chipset Add support for CYW4373 SDIO/USB chipset. CYW4373 is a 1x1 dual-band 11ac chipset with 20/40/80Mhz channel support. It's a WiFi/BT combo device. Signed-off-by: Chi-Hsien Lin Reviewed-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 1 + drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c | 2 ++ drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 4 +++- drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 9 ++++++++- drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h | 3 +++ include/linux/mmc/sdio_ids.h | 1 + 6 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 984c1d0560b1..cd587325e286 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1105,6 +1105,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43455), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373), { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 05f22ff81d60..c5d1a1cbf601 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -690,6 +690,8 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) case BRCM_CC_4365_CHIP_ID: case BRCM_CC_4366_CHIP_ID: return 0x200000; + case CY_CC_4373_CHIP_ID: + return 0x160000; default: brcmf_err("unknown chip: %s\n", ci->pub.name); break; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index f3556122c6ac..613caca7dc02 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -618,6 +618,7 @@ BRCMF_FW_NVRAM_DEF(43430A1, "brcmfmac43430-sdio.bin", "brcmfmac43430-sdio.txt"); BRCMF_FW_NVRAM_DEF(43455, "brcmfmac43455-sdio.bin", "brcmfmac43455-sdio.txt"); BRCMF_FW_NVRAM_DEF(4354, "brcmfmac4354-sdio.bin", "brcmfmac4354-sdio.txt"); BRCMF_FW_NVRAM_DEF(4356, "brcmfmac4356-sdio.bin", "brcmfmac4356-sdio.txt"); +BRCMF_FW_NVRAM_DEF(4373, "brcmfmac4373-sdio.bin", "brcmfmac4373-sdio.txt"); static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), @@ -636,7 +637,8 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356) + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), + BRCMF_FW_NVRAM_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) }; static void pkt_align(struct sk_buff *p, int len, int align) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 0eea48e73331..8f20a4bb40d9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -50,6 +50,7 @@ BRCMF_FW_DEF(43143, "brcmfmac43143.bin"); BRCMF_FW_DEF(43236B, "brcmfmac43236b.bin"); BRCMF_FW_DEF(43242A, "brcmfmac43242a.bin"); BRCMF_FW_DEF(43569, "brcmfmac43569.bin"); +BRCMF_FW_DEF(4373, "brcmfmac4373.bin"); static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), @@ -58,7 +59,8 @@ static struct brcmf_firmware_mapping brcmf_usb_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43238_CHIP_ID, 0x00000008, 43236B), BRCMF_FW_ENTRY(BRCM_CC_43242_CHIP_ID, 0xFFFFFFFF, 43242A), BRCMF_FW_ENTRY(BRCM_CC_43566_CHIP_ID, 0xFFFFFFFF, 43569), - BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43569) + BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43569), + BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) }; #define TRX_MAGIC 0x30524448 /* "HDR0" */ @@ -1463,15 +1465,20 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf) #define LINKSYS_USB_DEVICE(dev_id) \ { USB_DEVICE(BRCM_USB_VENDOR_ID_LINKSYS, dev_id) } +#define CYPRESS_USB_DEVICE(dev_id) \ + { USB_DEVICE(CY_USB_VENDOR_ID_CYPRESS, dev_id) } + static struct usb_device_id brcmf_usb_devid_table[] = { BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43569_DEVICE_ID), LINKSYS_USB_DEVICE(BRCM_USB_43235_LINKSYS_DEVICE_ID), + CYPRESS_USB_DEVICE(CY_USB_4373_DEVICE_ID), { USB_DEVICE(BRCM_USB_VENDOR_ID_LG, BRCM_USB_43242_LG_DEVICE_ID) }, /* special entry for device with firmware loaded and running */ BRCMF_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID), + CYPRESS_USB_DEVICE(BRCM_USB_BCMFW_DEVICE_ID), { /* end: all zeroes */ } }; diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index f1fb8a3c7a32..57544a3a3ce4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -23,6 +23,7 @@ #define BRCM_USB_VENDOR_ID_BROADCOM 0x0a5c #define BRCM_USB_VENDOR_ID_LG 0x043e #define BRCM_USB_VENDOR_ID_LINKSYS 0x13b1 +#define CY_USB_VENDOR_ID_CYPRESS 0x04b4 #define BRCM_PCIE_VENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM /* Chipcommon Core Chip IDs */ @@ -57,6 +58,7 @@ #define BRCM_CC_4365_CHIP_ID 0x4365 #define BRCM_CC_4366_CHIP_ID 0x4366 #define BRCM_CC_4371_CHIP_ID 0x4371 +#define CY_CC_4373_CHIP_ID 0x4373 /* USB Device IDs */ #define BRCM_USB_43143_DEVICE_ID 0xbd1e @@ -66,6 +68,7 @@ #define BRCM_USB_43242_LG_DEVICE_ID 0x3101 #define BRCM_USB_43569_DEVICE_ID 0xbd27 #define BRCM_USB_BCMFW_DEVICE_ID 0x0bdc +#define CY_USB_4373_DEVICE_ID 0xbd29 /* PCIE Device IDs */ #define BRCM_PCIE_4350_DEVICE_ID 0x43a3 diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index b733eb404ffc..abacd5484bc0 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -39,6 +39,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 #define SDIO_DEVICE_ID_BROADCOM_4356 0x4356 +#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373 #define SDIO_VENDOR_ID_INTEL 0x0089 #define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402 -- cgit v1.2.3-55-g7522 From 99976fc084129e07df3a066dc15651853386da19 Mon Sep 17 00:00:00 2001 From: Wright Feng Date: Thu, 3 Aug 2017 17:37:59 +0800 Subject: brcmfmac: fix wrong num_different_channels when mchan feature enabled When the device/firmware supports multi-channel, it can have P2P connection and regular connection with AP simultaneous. In this case, the num_different_channels in wiphy info was not correct when firmware supports multi-channel (The iw wiphy# info showed "#channels <= 1" in interface combinations). It caused association failed and error message "CTRL-EVENT-FREQ-CONFLICT error" in wpa_supplicant when P2P GO interface was running at the same time. The root cause is that the num_different_channels was always overridden to 1 in brcmf_setup_ifmodes even multi-channel was enabled. We correct the logic by moving num_different_channels setting forward. Signed-off-by: Wright Feng Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 579089a340f6..65cbb3d1e509 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6461,6 +6461,8 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) if (p2p) { if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MCHAN)) combo[c].num_different_channels = 2; + else + combo[c].num_different_channels = 1; wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_DEVICE); @@ -6470,10 +6472,10 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp) c0_limits[i++].types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); } else { + combo[c].num_different_channels = 1; c0_limits[i].max = 1; c0_limits[i++].types = BIT(NL80211_IFTYPE_AP); } - combo[c].num_different_channels = 1; combo[c].max_interfaces = i; combo[c].n_limits = i; combo[c].limits = c0_limits; -- cgit v1.2.3-55-g7522 From 8b943e36e24a247d96f79d679ce1d97a17507106 Mon Sep 17 00:00:00 2001 From: Chung-Hsien Hsu Date: Mon, 7 Aug 2017 16:16:52 +0800 Subject: brcmfmac: add setting carrier state ON for successful roaming After association, ping is not working when sweeping the channel at the AP side. It is caused by having incorrect carrier state (OFF) for the STA in successful roaming. This patch sets the carrier state ON for the case. Signed-off-by: Chung-Hsien Hsu Acked-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 65cbb3d1e509..aaed4ab503ad 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5698,10 +5698,13 @@ brcmf_notify_roaming_status(struct brcmf_if *ifp, u32 status = e->status; if (event == BRCMF_E_ROAM && status == BRCMF_E_STATUS_SUCCESS) { - if (test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state)) + if (test_bit(BRCMF_VIF_STATUS_CONNECTED, + &ifp->vif->sme_state)) { brcmf_bss_roaming_done(cfg, ifp->ndev, e); - else + } else { brcmf_bss_connect_done(cfg, ifp->ndev, e, true); + brcmf_net_setcarrier(ifp, true); + } } return 0; -- cgit v1.2.3-55-g7522 From 4f2949febc22260f217e5459afef6c1b90bb049c Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Wed, 2 Aug 2017 23:27:13 +0530 Subject: wlcore: add const to bin_attribute structure Add const to bin_attribute structure as it is only passed to the functions sysfs_{remove/create}_bin_file. The corresponding arguments are of type const, so declare the structure to be const. Signed-off-by: Bhumika Goyal Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c index a9218e5b0efc..b72e2101488b 100644 --- a/drivers/net/wireless/ti/wlcore/sysfs.c +++ b/drivers/net/wireless/ti/wlcore/sysfs.c @@ -138,7 +138,7 @@ static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj, return len; } -static struct bin_attribute fwlog_attr = { +static const struct bin_attribute fwlog_attr = { .attr = {.name = "fwlog", .mode = S_IRUSR}, .read = wl1271_sysfs_read_fwlog, }; -- cgit v1.2.3-55-g7522 From cb1b82625de7cbf30bb62672776c466615af84d2 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Mon, 7 Aug 2017 10:30:51 +0530 Subject: rtlwifi: constify rate_control_ops structure rate_control_ops structure is only passed as an argument to the function ieee80211_rate_control_{register/unregister}. This argument is of type const, so declare the structure as const. Signed-off-by: Bhumika Goyal Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 951d257cd4c0..02811eda57cd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -283,7 +283,7 @@ static void rtl_rate_free_sta(void *rtlpriv, kfree(rate_priv); } -static struct rate_control_ops rtl_rate_ops = { +static const struct rate_control_ops rtl_rate_ops = { .name = "rtl_rc", .alloc = rtl_rate_alloc, .free = rtl_rate_free, -- cgit v1.2.3-55-g7522 From 2db3aaba0a9fc769435477059c1224c439061752 Mon Sep 17 00:00:00 2001 From: Michael Skeffington Date: Mon, 7 Aug 2017 12:47:36 -0400 Subject: rt2x00: Fix MMIC Countermeasures Set RX_FLAG_DECRYPTED in case of MMIC failure so that ieee80211_rx_h_decrypt() doesnt drop the frame before getting to ieee80211_rx_h_michael_mic_verify(). Signed-off-by: Michael Skeffington Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800mmio.c | 13 +++++++++++-- drivers/net/wireless/ralink/rt2x00/rt2800usb.c | 15 ++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c index ee5276e233fa..1123e2bed803 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c @@ -136,10 +136,19 @@ void rt2800mmio_fill_rxdone(struct queue_entry *entry, */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; - if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { rxdesc->flags |= RX_FLAG_DECRYPTED; - else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { + /* + * In order to check the Michael Mic, the packet must have + * been decrypted. Mac80211 doesnt check the MMIC failure + * flag to initiate MMIC countermeasures if the decoded flag + * has not been set. + */ + rxdesc->flags |= RX_FLAG_DECRYPTED; + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } } if (rt2x00_get_field32(word, RXD_W3_MY_BSS)) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 685b8e0cd67d..3e5d3a40d986 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -697,11 +697,20 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; - - if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) + + if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { + rxdesc->flags |= RX_FLAG_DECRYPTED; + } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { + /* + * In order to check the Michael Mic, the packet must have + * been decrypted. Mac80211 doesnt check the MMIC failure + * flag to initiate MMIC countermeasures if the decoded flag + * has not been set. + */ rxdesc->flags |= RX_FLAG_DECRYPTED; - else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) + rxdesc->flags |= RX_FLAG_MMIC_ERROR; + } } if (rt2x00_get_field32(word, RXD_W0_MY_BSS)) -- cgit v1.2.3-55-g7522 From 198ec9ae052922d67491ab3cc3fd704580e0daf4 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Tue, 8 Aug 2017 15:48:40 +0200 Subject: Bluetooth: document config options Kernel config options should include useful help text; I had to look up the terms on wikipedia. Signed-off-by: Pavel Machek Signed-off-by: Marcel Holtmann --- net/bluetooth/Kconfig | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig index 68f951b3e85a..db82a40875e8 100644 --- a/net/bluetooth/Kconfig +++ b/net/bluetooth/Kconfig @@ -45,6 +45,11 @@ config BT_BREDR bool "Bluetooth Classic (BR/EDR) features" depends on BT default y + help + Bluetooth Classic includes support for Basic Rate (BR) + available with Bluetooth version 1.0b or later and support + for Enhanced Data Rate (EDR) available with Bluetooth + version 2.0 or later. source "net/bluetooth/rfcomm/Kconfig" @@ -58,11 +63,18 @@ config BT_HS bool "Bluetooth High Speed (HS) features" depends on BT_BREDR default y + help + Bluetooth High Speed includes support for off-loading + Bluetooth connections via 802.11 (wifi) physical layer + available with Bluetooth version 3.0 or later. config BT_LE bool "Bluetooth Low Energy (LE) features" depends on BT default y + help + Bluetooth Low Energy includes support low-energy physical + layer available with Bluetooth version 4.0 or later. config BT_6LOWPAN tristate "Bluetooth 6LoWPAN support" -- cgit v1.2.3-55-g7522 From 6641525ce40ef45641c8f43bb19cd4e471e4cb75 Mon Sep 17 00:00:00 2001 From: Hamad Kadmany Date: Tue, 8 Aug 2017 14:16:40 +0300 Subject: wil6210: protect against invalid length of tx management frame Validate buffer length has the minimum needed size when sending management frame to protect against possible buffer overrun. Signed-off-by: Hamad Kadmany Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 3 +++ drivers/net/wireless/ath/wil6210/debugfs.c | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 0b5383a62d42..77af7492420d 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -884,6 +884,9 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, wil_hex_dump_misc("mgmt tx frame ", DUMP_PREFIX_OFFSET, 16, 1, buf, len, true); + if (len < sizeof(struct ieee80211_hdr_3addr)) + return -EINVAL; + cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL); if (!cmd) { rc = -ENOMEM; diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index f82506d276d3..a2b5d595aa19 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -801,6 +801,9 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, int rc; void *frame; + if (!len) + return -EINVAL; + frame = memdup_user(buf, len); if (IS_ERR(frame)) return PTR_ERR(frame); -- cgit v1.2.3-55-g7522 From 30868f5d4413759ba82c0703290f2483402fea39 Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 8 Aug 2017 14:16:43 +0300 Subject: wil6210: support FW RSSI reporting New FW supports reporting RSSI signal in dBm. Report RSSI to kernel in case FW has this capability. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 12 ++++++++---- drivers/net/wireless/ath/wil6210/debugfs.c | 2 ++ drivers/net/wireless/ath/wil6210/pcie_bus.c | 3 +++ drivers/net/wireless/ath/wil6210/wmi.c | 9 ++++++--- drivers/net/wireless/ath/wil6210/wmi.h | 6 ++++-- 5 files changed, 23 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 77af7492420d..5cd91145c079 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -273,12 +273,12 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, wil_dbg_wmi(wil, "Link status for CID %d: {\n" " MCS %d TSF 0x%016llx\n" - " BF status 0x%08x SNR 0x%08x SQI %d%%\n" + " BF status 0x%08x RSSI %d SQI %d%%\n" " Tx Tpt %d goodput %d Rx goodput %d\n" " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n", cid, le16_to_cpu(reply.evt.bf_mcs), le64_to_cpu(reply.evt.tsf), reply.evt.status, - le32_to_cpu(reply.evt.snr_val), + reply.evt.rssi, reply.evt.sqi, le32_to_cpu(reply.evt.tx_tpt), le32_to_cpu(reply.evt.tx_goodput), @@ -311,7 +311,11 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid, if (test_bit(wil_status_fwconnected, wil->status)) { sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); - sinfo->signal = reply.evt.sqi; + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, + wil->fw_capabilities)) + sinfo->signal = reply.evt.rssi; + else + sinfo->signal = reply.evt.sqi; } return rc; @@ -1794,7 +1798,7 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz; - /* TODO: figure this out */ + /* may change after reading FW capabilities */ wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC; wiphy->cipher_suites = wil_cipher_suites; diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index a2b5d595aa19..21b661158b1a 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1016,6 +1016,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) " TSF = 0x%016llx\n" " TxMCS = %2d TxTpt = %4d\n" " SQI = %4d\n" + " RSSI = %4d\n" " Status = 0x%08x %s\n" " Sectors(rx:tx) my %2d:%2d peer %2d:%2d\n" " Goodput(rx:tx) %4d:%4d\n" @@ -1025,6 +1026,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) le16_to_cpu(reply.evt.bf_mcs), le32_to_cpu(reply.evt.tx_tpt), reply.evt.sqi, + reply.evt.rssi, status, wil_bfstatus_str(status), le16_to_cpu(reply.evt.my_rx_sector), le16_to_cpu(reply.evt.my_tx_sector), diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c index d571feb2370e..6a3ab4bf916d 100644 --- a/drivers/net/wireless/ath/wil6210/pcie_bus.c +++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c @@ -84,6 +84,9 @@ void wil_set_capabilities(struct wil6210_priv *wil) /* extract FW capabilities from file without loading the FW */ wil_request_firmware(wil, wil->wil_fw_name, false); + + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + wil_to_wiphy(wil)->signal_type = CFG80211_SIGNAL_TYPE_MBM; } void wil_disable_irq(struct wil6210_priv *wil) diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 65ef67321fc0..a9487f2b8d60 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -381,12 +381,15 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len) ch_no = data->info.channel + 1; freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ); channel = ieee80211_get_channel(wiphy, freq); - signal = data->info.sqi; + if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities)) + signal = 100 * data->info.rssi; + else + signal = data->info.sqi; d_status = le16_to_cpu(data->info.status); fc = rx_mgmt_frame->frame_control; - wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d SNR %d SQI %d%%\n", - data->info.channel, data->info.mcs, data->info.snr, + wil_dbg_wmi(wil, "MGMT Rx: channel %d MCS %d RSSI %d SQI %d%%\n", + data->info.channel, data->info.mcs, data->info.rssi, data->info.sqi); wil_dbg_wmi(wil, "status 0x%04x len %d fc 0x%04x\n", d_status, d_len, le16_to_cpu(fc)); diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 256f63c57da0..4e31c2fd1fc6 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -60,6 +60,7 @@ enum wmi_fw_capability { WMI_FW_CAPABILITY_WMI_ONLY = 5, WMI_FW_CAPABILITY_THERMAL_THROTTLING = 7, WMI_FW_CAPABILITY_D3_SUSPEND = 8, + WMI_FW_CAPABILITY_RSSI_REPORTING = 12, WMI_FW_CAPABILITY_MAX, }; @@ -1306,7 +1307,8 @@ struct wmi_notify_req_done_event { /* beamforming status, 0: fail; 1: OK; 2: retrying */ __le32 status; __le64 tsf; - __le32 snr_val; + s8 rssi; + u8 reserved0[3]; __le32 tx_tpt; __le32 tx_goodput; __le32 rx_goodput; @@ -1602,7 +1604,7 @@ struct wmi_get_ssid_event { /* wmi_rx_mgmt_info */ struct wmi_rx_mgmt_info { u8 mcs; - s8 snr; + s8 rssi; u8 range; u8 sqi; __le16 stype; -- cgit v1.2.3-55-g7522 From c6622116c5ae56aec47dd3d63be49cabf591162a Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 8 Aug 2017 14:16:44 +0300 Subject: wil6210: check no_fw_recovery in resume failure recovery Reset 11ad device on resume failure only if no_fw_recovery is not set. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/pm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index ce1f384e7f8e..45488292a8fc 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -85,7 +85,9 @@ static int wil_resume_keep_radio_on(struct wil6210_priv *wil) /* Send WMI resume request to the device */ rc = wmi_resume(wil); if (rc) { - wil_err(wil, "device failed to resume (%d), resetting\n", rc); + wil_err(wil, "device failed to resume (%d)\n", rc); + if (no_fw_recovery) + goto out; rc = wil_down(wil); if (rc) { wil_err(wil, "wil_down failed (%d)\n", rc); -- cgit v1.2.3-55-g7522 From 262345265e599fe5ccc75ba435147d17df04bfd6 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 8 Aug 2017 14:16:45 +0300 Subject: wil6210: add statistics for suspend time Add statistics for total, min and max suspend time, that calculates the time the 11ad device was in suspend. Those statistics will help to estimate the power impact of d3hot feature. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 17 +++++++++++++++-- drivers/net/wireless/ath/wil6210/main.c | 2 ++ drivers/net/wireless/ath/wil6210/pm.c | 20 ++++++++++++++++++-- drivers/net/wireless/ath/wil6210/wil6210.h | 5 +++++ 4 files changed, 40 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 21b661158b1a..d4e88652fcc6 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1617,6 +1617,8 @@ static ssize_t wil_write_suspend_stats(struct file *file, struct wil6210_priv *wil = file->private_data; memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->suspend_stats.collection_start = ktime_get(); return len; } @@ -1628,18 +1630,27 @@ static ssize_t wil_read_suspend_stats(struct file *file, struct wil6210_priv *wil = file->private_data; static char text[400]; int n; + unsigned long long stats_collection_time = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.collection_start)); n = snprintf(text, sizeof(text), "Suspend statistics:\n" "successful suspends:%ld failed suspends:%ld\n" "successful resumes:%ld failed resumes:%ld\n" - "rejected by host:%ld rejected by device:%ld\n", + "rejected by host:%ld rejected by device:%ld\n" + "total suspend time:%lld min suspend time:%lld\n" + "max suspend time:%lld stats collection time: %lld\n", wil->suspend_stats.successful_suspends, wil->suspend_stats.failed_suspends, wil->suspend_stats.successful_resumes, wil->suspend_stats.failed_resumes, wil->suspend_stats.rejected_by_host, - wil->suspend_stats.rejected_by_device); + wil->suspend_stats.rejected_by_device, + wil->suspend_stats.total_suspend_time, + wil->suspend_stats.min_suspend_time, + wil->suspend_stats.max_suspend_time, + stats_collection_time); n = min_t(int, n, sizeof(text)); @@ -1795,6 +1806,8 @@ int wil6210_debugfs_init(struct wil6210_priv *wil) wil6210_debugfs_create_ITR_CNT(wil, dbg); + wil->suspend_stats.collection_start = ktime_get(); + return 0; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index daf944a71901..8968c2c51a1b 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -578,6 +578,8 @@ int wil_priv_init(struct wil6210_priv *wil) wil->wakeup_trigger = WMI_WAKEUP_TRIGGER_UCAST | WMI_WAKEUP_TRIGGER_BCAST; + memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); + wil->suspend_stats.min_suspend_time = ULONG_MAX; return 0; diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 45488292a8fc..820ed17ae2d4 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -300,6 +300,9 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime) wil_dbg_pm(wil, "suspend: %s => %d\n", is_runtime ? "runtime" : "system", rc); + if (!rc) + wil->suspend_stats.suspend_start_time = ktime_get(); + return rc; } @@ -309,6 +312,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) struct net_device *ndev = wil_to_ndev(wil); bool keep_radio_on = ndev->flags & IFF_UP && wil->keep_radio_on_during_sleep; + unsigned long long suspend_time_usec = 0; wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system"); @@ -326,8 +330,20 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime) else rc = wil_resume_radio_off(wil); + if (rc) + goto out; + + suspend_time_usec = + ktime_to_us(ktime_sub(ktime_get(), + wil->suspend_stats.suspend_start_time)); + wil->suspend_stats.total_suspend_time += suspend_time_usec; + if (suspend_time_usec < wil->suspend_stats.min_suspend_time) + wil->suspend_stats.min_suspend_time = suspend_time_usec; + if (suspend_time_usec > wil->suspend_stats.max_suspend_time) + wil->suspend_stats.max_suspend_time = suspend_time_usec; + out: - wil_dbg_pm(wil, "resume: %s => %d\n", - is_runtime ? "runtime" : "system", rc); + wil_dbg_pm(wil, "resume: %s => %d, suspend time %lld usec\n", + is_runtime ? "runtime" : "system", rc, suspend_time_usec); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index d085ccfc7228..45d9385e8a0f 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -90,6 +90,11 @@ struct wil_suspend_stats { unsigned long failed_resumes; unsigned long rejected_by_device; unsigned long rejected_by_host; + unsigned long long total_suspend_time; + unsigned long long min_suspend_time; + unsigned long long max_suspend_time; + ktime_t collection_start; + ktime_t suspend_start_time; }; /* Calculate MAC buffer size for the firmware. It includes all overhead, -- cgit v1.2.3-55-g7522 From d1fbf07540b7d35df693e2e1d7528d8ca9bdb2c2 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Tue, 8 Aug 2017 14:16:46 +0300 Subject: wil6210: notify wiphy on wowlan support Set wowlan to indicate that 11ad device can wake-up on any trigger and disconnect. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 5cd91145c079..9b529ea62bff 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -26,6 +26,12 @@ bool disable_ap_sme; module_param(disable_ap_sme, bool, 0444); MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME"); +#ifdef CONFIG_PM +static struct wiphy_wowlan_support wil_wowlan_support = { + .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, +}; +#endif + #define CHAN60G(_channel, _flags) { \ .band = NL80211_BAND_60GHZ, \ .center_freq = 56160 + (2160 * (_channel)), \ @@ -1808,6 +1814,10 @@ static void wil_wiphy_init(struct wiphy *wiphy) wiphy->n_vendor_commands = ARRAY_SIZE(wil_nl80211_vendor_commands); wiphy->vendor_commands = wil_nl80211_vendor_commands; + +#ifdef CONFIG_PM + wiphy->wowlan = &wil_wowlan_support; +#endif } struct wireless_dev *wil_cfg80211_init(struct device *dev) -- cgit v1.2.3-55-g7522 From 9b2a4c2d534ca45c74488bace32323b26633ff66 Mon Sep 17 00:00:00 2001 From: Hamad Kadmany Date: Tue, 8 Aug 2017 14:16:47 +0300 Subject: wil6210: fix interface-up check While wil_open is executed, any call to netif_running would return a success. In case there are failures within wil_open, should not treat the device as if it is already opened in relevant functions (like FW recovery and runtime suspend check). Fix that by checking the device up flag instead. Signed-off-by: Hamad Kadmany Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 3 ++- drivers/net/wireless/ath/wil6210/pm.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 8968c2c51a1b..b1e281442ba3 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -394,10 +394,11 @@ static void wil_fw_error_worker(struct work_struct *work) struct wil6210_priv *wil = container_of(work, struct wil6210_priv, fw_error_worker); struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); wil_dbg_misc(wil, "fw error worker\n"); - if (!netif_running(wil_to_ndev(wil))) { + if (!(ndev->flags & IFF_UP)) { wil_info(wil, "No recovery - interface is down\n"); return; } diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c index 820ed17ae2d4..8f5d1b447aaa 100644 --- a/drivers/net/wireless/ath/wil6210/pm.c +++ b/drivers/net/wireless/ath/wil6210/pm.c @@ -21,10 +21,11 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime) { int rc = 0; struct wireless_dev *wdev = wil->wdev; + struct net_device *ndev = wil_to_ndev(wil); wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system"); - if (!netif_running(wil_to_ndev(wil))) { + if (!(ndev->flags & IFF_UP)) { /* can always sleep when down */ wil_dbg_pm(wil, "Interface is down\n"); goto out; -- cgit v1.2.3-55-g7522 From eb4c02155881696ee6abb090d554b765e41d46ed Mon Sep 17 00:00:00 2001 From: Dedy Lansky Date: Tue, 8 Aug 2017 14:16:48 +0300 Subject: wil6210: store FW RF calibration result Store initial FW RF calibration result in driver. Set this calibration result back to FW after each FW reset in order to avoid future calibration procedures. Signed-off-by: Dedy Lansky Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 6 ++++++ drivers/net/wireless/ath/wil6210/wil6210.h | 6 ++++++ drivers/net/wireless/ath/wil6210/wmi.c | 5 +++++ drivers/net/wireless/ath/wil6210/wmi.h | 2 ++ 4 files changed, 19 insertions(+) diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index b1e281442ba3..b89d017ec847 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1034,6 +1034,12 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + if (wil->fw_calib_result > 0) { + __le32 val = cpu_to_le32(wil->fw_calib_result | + (CALIB_RESULT_SIGNATURE << 8)); + wil_w(wil, RGF_USER_FW_CALIB_RESULT, (u32 __force)val); + } + wil_release_cpu(wil); } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 45d9385e8a0f..78a9c9f8fc8f 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -171,6 +171,10 @@ struct RGF_ICR { #define RGF_USER_USER_SCRATCH_PAD (0x8802bc) #define RGF_USER_BL (0x880A3C) /* Boot Loader */ #define RGF_USER_FW_REV_ID (0x880a8c) /* chip revision */ +#define RGF_USER_FW_CALIB_RESULT (0x880a90) /* b0-7:result + * b8-15:signature + */ + #define CALIB_RESULT_SIGNATURE (0x11) #define RGF_USER_CLKS_CTL_0 (0x880abc) #define BIT_USER_CLKS_CAR_AHB_SW_SEL BIT(1) /* ref clk/PLL */ #define BIT_USER_CLKS_RST_PWGD BIT(11) /* reset on "power good" */ @@ -724,6 +728,8 @@ struct wil6210_priv { enum wmi_ps_profile_type ps_profile; + int fw_calib_result; + #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP struct notifier_block pm_notify; diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index a9487f2b8d60..ffdd2fa401b1 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -344,6 +344,11 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len) strlcpy(wdev->wiphy->fw_version, wil->fw_version, sizeof(wdev->wiphy->fw_version)); + if (len > offsetof(struct wmi_ready_event, rfc_read_calib_result)) { + wil_dbg_wmi(wil, "rfc calibration result %d\n", + evt->rfc_read_calib_result); + wil->fw_calib_result = evt->rfc_read_calib_result; + } wil_set_recovery_state(wil, fw_recovery_idle); set_bit(wil_status_fwready, wil->status); /* let the reset sequence continue */ diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h index 4e31c2fd1fc6..1b426d7ef81f 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.h +++ b/drivers/net/wireless/ath/wil6210/wmi.h @@ -1300,6 +1300,8 @@ struct wmi_ready_event { /* enum wmi_phy_capability */ u8 phy_capability; u8 numof_additional_mids; + u8 rfc_read_calib_result; + u8 reserved[3]; } __packed; /* WMI_NOTIFY_REQ_DONE_EVENTID */ -- cgit v1.2.3-55-g7522 From 38d16ab2b2132beeb9777de1508ce2150b2dcd35 Mon Sep 17 00:00:00 2001 From: Gidon Studinski Date: Tue, 8 Aug 2017 14:16:50 +0300 Subject: wil6210: move vring_idle_trsh definition to wil6210_priv vring_idle_trsh is used in the operational driver, hence should not be defined as a debugfs variable. Signed-off-by: Gidon Studinski Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 5 +---- drivers/net/wireless/ath/wil6210/main.c | 1 + drivers/net/wireless/ath/wil6210/txrx.c | 6 +++--- drivers/net/wireless/ath/wil6210/wil6210.h | 2 +- 4 files changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index d4e88652fcc6..6db00c167d2e 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -20,7 +20,6 @@ #include #include #include - #include "wil6210.h" #include "wmi.h" #include "txrx.h" @@ -30,7 +29,6 @@ static u32 mem_addr; static u32 dbg_txdesc_index; static u32 dbg_vring_index; /* 24+ for Rx, 0..23 for Tx */ -u32 vring_idle_trsh = 16; /* HW fetches up to 16 descriptors at once */ enum dbg_off_type { doff_u32 = 0, @@ -1763,6 +1761,7 @@ static const struct dbg_off dbg_wil_off[] = { WIL_FIELD(chip_revision, 0444, doff_u8), WIL_FIELD(abft_len, 0644, doff_u8), WIL_FIELD(wakeup_trigger, 0644, doff_u8), + WIL_FIELD(vring_idle_trsh, 0644, doff_u32), {}, }; @@ -1778,8 +1777,6 @@ static const struct dbg_off dbg_statics[] = { {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32}, {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32}, {"mem_addr", 0644, (ulong)&mem_addr, doff_u32}, - {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh, - doff_u32}, {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, {}, }; diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index b89d017ec847..e2ea49077b78 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -581,6 +581,7 @@ int wil_priv_init(struct wil6210_priv *wil) WMI_WAKEUP_TRIGGER_BCAST; memset(&wil->suspend_stats, 0, sizeof(wil->suspend_stats)); wil->suspend_stats.min_suspend_time = ULONG_MAX; + wil->vring_idle_trsh = 16; return 0; diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index ec57bcce9601..389c718cd257 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1666,7 +1666,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + descs_used)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -1813,7 +1813,7 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, /* performance monitoring */ used = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used, used + nr_frags + 1)) { txdata->idle += get_cycles() - txdata->last_idle; wil_dbg_txrx(wil, "Ring[%2d] not idle %d -> %d\n", @@ -2175,7 +2175,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid) /* performance monitoring */ used_new = wil_vring_used_tx(vring); - if (wil_val_in_range(vring_idle_trsh, + if (wil_val_in_range(wil->vring_idle_trsh, used_new, used_before_complete)) { wil_dbg_txrx(wil, "Ring[%2d] idle %d -> %d\n", ringid, used_before_complete, used_new); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 78a9c9f8fc8f..73dbd62dfed7 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -30,7 +30,6 @@ extern bool no_fw_recovery; extern unsigned int mtu_max; extern unsigned short rx_ring_overflow_thrsh; extern int agg_wsize; -extern u32 vring_idle_trsh; extern bool rx_align_2; extern bool rx_large_buf; extern bool debug_fw; @@ -693,6 +692,7 @@ struct wil6210_priv { u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ struct wil_sta_info sta[WIL6210_MAX_CID]; int bcast_vring; + u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */ bool use_extended_dma_addr; /* indicates whether we are using 48 bits */ /* scan */ struct cfg80211_scan_request *scan_request; -- cgit v1.2.3-55-g7522 From 94221ae75c869b28bceb76ae300b55d35c186406 Mon Sep 17 00:00:00 2001 From: Gangfeng Huang Date: Sat, 27 May 2017 09:17:53 +0800 Subject: igb: Fix error of RX network flow classification After add an ethertype filter, if user change the adapter speed several times, the error "ethtool -N: etype filters are all used" is reported by igb driver. In older patch, function igb_nfc_filter_exit() and igb_nfc_filter_restore() is not paried. igb_nfc_filter_restore() exist in igb_up(), but function igb_nfc_filter_exit() is exist in __igb_close(). In the process of speed changing, only igb_nfc_filter_restore() is called, it will take a position of ethertype bitmap. Reproduce steps: Step 1: Add a etype filter by ethtool $ethtool -N eth0 flow-type ether proto 0x88F8 action 1 Step 2: Change the adapter speed to 100M/full duplex $ethtool -s eth0 speed 100 duplex full Step 3: Change the adapter speed to 1000M/full duplex ethtool -s eth0 speed 1000 duplex full Repeat step2 and step3, then dmesg the system log, you can find the error message, add new ethtype filter is also failed. This fixing is move igb_nfc_filter_exit() from __igb_close() to igb_down() to make igb_nfc_filter_restore()/igb_nfc_filter_exit() is paired. Signed-off-by: Gangfeng Huang Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ec62410b035a..6a63ea564a57 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1791,6 +1791,8 @@ void igb_down(struct igb_adapter *adapter) wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); /* flush and sleep below */ + igb_nfc_filter_exit(adapter); + netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); @@ -3317,8 +3319,6 @@ static int __igb_close(struct net_device *netdev, bool suspending) igb_down(adapter); igb_free_irq(adapter); - igb_nfc_filter_exit(adapter); - igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); -- cgit v1.2.3-55-g7522 From 26bd4e2db06be8a367cba076c6600b4d5c3e65af Mon Sep 17 00:00:00 2001 From: Cliff Spradlin Date: Mon, 19 Jun 2017 13:30:43 -0700 Subject: igb: protect TX timestamping from API misuse HW timestamping can only be requested for a packet if the NIC is first setup via ioctl(SIOCSHWTSTAMP). If this step was skipped, then the igb driver still allowed TX packets to request HW timestamping. In this situation, the _IGB_PTP_TX_IN_PROGRESS flag was set and would never clear. This prevented any future HW timestamping requests to succeed. Fix this by checking that the NIC is configured for HW TX timestamping before accepting a HW TX timestamping request. Signed-off-by: Cliff Spradlin Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6a63ea564a57..5d0a75c1ba0c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5380,7 +5380,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { struct igb_adapter *adapter = netdev_priv(tx_ring->netdev); - if (!test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, + if (adapter->tstamp_config.tx_type & HWTSTAMP_TX_ON && + !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state)) { skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_flags |= IGB_TX_FLAGS_TSTAMP; -- cgit v1.2.3-55-g7522 From d75372a2daf5dc48207ee9e5592917e893cddb87 Mon Sep 17 00:00:00 2001 From: Gustavo A R Silva Date: Tue, 20 Jun 2017 16:22:34 -0500 Subject: e1000e: add check on e1e_wphy() return value Check return value from call to e1e_wphy(). This value is being checked during previous calls to function e1e_wphy() and it seems a check was missing here. Addresses-Coverity-ID: 1226905 Signed-off-by: Gustavo A R Silva Reviewed-by: Ethan Zhao Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 68ea8b4555ab..d6d4ed7acf03 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2437,6 +2437,8 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) if (hw->phy.revision < 2) { e1000e_phy_sw_reset(hw); ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); + if (ret_val) + return ret_val; } } -- cgit v1.2.3-55-g7522 From 2643e6e90210e16c978919617170089b7c2164f7 Mon Sep 17 00:00:00 2001 From: Corinna Vinschen Date: Fri, 23 Jun 2017 14:26:30 +0200 Subject: igb: Remove incorrect "unexpected SYS WRAP" log message TSAUXC.DisableSystime is never set, so SYSTIM runs into a SYS WRAP every 1100 secs on 80580/i350/i354 (40 bit SYSTIM) and every 35000 secs on 80576 (45 bit SYSTIM). This wrap event sets the TSICR.SysWrap bit unconditionally. However, checking TSIM at interrupt time shows that this event does not actually cause the interrupt. Rather, it's just bycatch while the actual interrupt is caused by, for instance, TSICR.TXTS. The conclusion is that the SYS WRAP is actually expected, so the "unexpected SYS WRAP" message is entirely bogus and just helps to confuse users. Drop it. Signed-off-by: Corinna Vinschen Acked-by: Jacob Keller Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 5d0a75c1ba0c..1a99164d5d11 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5746,8 +5746,6 @@ static void igb_tsync_interrupt(struct igb_adapter *adapter) event.type = PTP_CLOCK_PPS; if (adapter->ptp_caps.pps) ptp_clock_event(adapter->ptp_clock, &event); - else - dev_err(&adapter->pdev->dev, "unexpected SYS WRAP"); ack |= TSINTR_SYS_WRAP; } -- cgit v1.2.3-55-g7522 From 42e6b92de8f8d7c736cd819ff5af6208a6bdf8ae Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:26:40 +0530 Subject: net: irda: irda-usb: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/irda/irda-usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 6f3c805f7211..723e49bc4baa 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -72,7 +72,7 @@ static int qos_mtt_bits = 0; /* These are the currently known IrDA USB dongles. Add new dongles here */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* ACTiSYS Corp., ACT-IR2000U FIR-USB Adapter */ { USB_DEVICE(0x9c4, 0x011), .driver_info = IUC_SPEED_BUG | IUC_NO_WINDOW }, /* Look like ACTiSYS, Report : IBM Corp., IBM UltraPort IrDA */ -- cgit v1.2.3-55-g7522 From fa00f26ff6d53ec7874f9b737acf7ab844a0cc5c Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:26:41 +0530 Subject: net: irda: kingsun: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/irda/kingsun-sir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c index 24c0f169a7b1..4fd4ac2fe09f 100644 --- a/drivers/net/irda/kingsun-sir.c +++ b/drivers/net/irda/kingsun-sir.c @@ -85,7 +85,7 @@ #define KING_PRODUCT_ID 0x4200 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ { USB_DEVICE(KING_VENDOR_ID, KING_PRODUCT_ID) }, { } -- cgit v1.2.3-55-g7522 From 10ffb422700b865c2549e8506395fc732b8bf773 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:26:42 +0530 Subject: net: irda: ks959: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/irda/ks959-sir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c index 3affded3e30d..8025741e7586 100644 --- a/drivers/net/irda/ks959-sir.c +++ b/drivers/net/irda/ks959-sir.c @@ -133,7 +133,7 @@ #define KS959_PRODUCT_ID 0x4959 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KS959_VENDOR_ID, KS959_PRODUCT_ID)}, {} -- cgit v1.2.3-55-g7522 From 7f4e87e971c8c0129c179fee053d6fb627573be8 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:26:43 +0530 Subject: net: irda: ksdazzle: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/irda/ksdazzle-sir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c index 741452c7ce35..d2a0755df596 100644 --- a/drivers/net/irda/ksdazzle-sir.c +++ b/drivers/net/irda/ksdazzle-sir.c @@ -97,7 +97,7 @@ #define KSDAZZLE_PRODUCT_ID 0x4100 /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* KingSun Co,Ltd IrDA/USB Bridge */ {USB_DEVICE(KSDAZZLE_VENDOR_ID, KSDAZZLE_PRODUCT_ID)}, {} -- cgit v1.2.3-55-g7522 From 4730279f10a1196ca1f26d545e1728bfb6c5c2c2 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:26:44 +0530 Subject: net: irda: mcs7780: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/irda/mcs7780.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 765de3bedb88..c3f0b254b344 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c @@ -66,7 +66,7 @@ #define MCS_VENDOR_ID 0x9710 #define MCS_PRODUCT_ID 0x7780 -static struct usb_device_id mcs_table[] = { +static const struct usb_device_id mcs_table[] = { /* MosChip Corp., MCS7780 FIR-USB Adapter */ {USB_DEVICE(MCS_VENDOR_ID, MCS_PRODUCT_ID)}, {}, -- cgit v1.2.3-55-g7522 From d1bb5aaa294c8f553a8825a1f20ea800369bf162 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:26:45 +0530 Subject: net: irda: stir4200: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/irda/stir4200.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index 7ee514879531..ee2cb70b688d 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c @@ -183,7 +183,7 @@ struct stir_cb { /* These are the currently known USB ids */ -static struct usb_device_id dongles[] = { +static const struct usb_device_id dongles[] = { /* SigmaTel, Inc, STIr4200 IrDA/USB Bridge */ { USB_DEVICE(0x066f, 0x4200) }, { } -- cgit v1.2.3-55-g7522 From 2f622c4951aade21d9d8c66dc9d6276012a1d8b5 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:28:01 +0530 Subject: net: usb: catc: constify usb_device_id and fix space before '[' error usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Fix checkpatch.pl error: ERROR: space prohibited before open square bracket '['. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/usb/catc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c index fce92f0e5abd..dbc90313f472 100644 --- a/drivers/net/usb/catc.c +++ b/drivers/net/usb/catc.c @@ -961,7 +961,7 @@ static void catc_disconnect(struct usb_interface *intf) * Module functions and tables. */ -static struct usb_device_id catc_id_table [] = { +static const struct usb_device_id catc_id_table[] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ -- cgit v1.2.3-55-g7522 From 7f04c61d91c8cec3b89d62d74c5ec4ed1b0919aa Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:28:02 +0530 Subject: net: usb: cdc-phonet: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/usb/cdc-phonet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 2952cb570996..288ecd999171 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -304,7 +304,7 @@ static void usbpn_setup(struct net_device *dev) /* * USB driver callbacks */ -static struct usb_device_id usbpn_ids[] = { +static const struct usb_device_id usbpn_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS -- cgit v1.2.3-55-g7522 From 9befbe13eba9a6cd8d5dda1d6716bb5163f3b1c2 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:28:03 +0530 Subject: net: usb: ipheth: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/usb/ipheth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 0f213ea22c75..d49c7103085e 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -87,7 +87,7 @@ #define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ) #define IPHETH_CARRIER_ON 0x04 -static struct usb_device_id ipheth_table[] = { +static const struct usb_device_id ipheth_table[] = { { USB_DEVICE_AND_INTERFACE_INFO( USB_VENDOR_APPLE, USB_PRODUCT_IPHONE, IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, -- cgit v1.2.3-55-g7522 From 06fa59a018f22a59446e54995cd84f0f7ec939af Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:28:04 +0530 Subject: net: usb: kaweth: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/usb/kaweth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 92e4fd29ae44..f1605833c5cf 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -125,7 +125,7 @@ static int kaweth_resume(struct usb_interface *intf); /**************************************************************** * usb_device_id ****************************************************************/ -static struct usb_device_id usb_klsi_table[] = { +static const struct usb_device_id usb_klsi_table[] = { { USB_DEVICE(0x03e8, 0x0008) }, /* AOX Endpoints USB Ethernet */ { USB_DEVICE(0x04bb, 0x0901) }, /* I-O DATA USB-ET/T */ { USB_DEVICE(0x0506, 0x03e8) }, /* 3Com 3C19250 */ -- cgit v1.2.3-55-g7522 From 9b4355fb039a4c4b77c3ed92c049b012b92dd36b Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:28:05 +0530 Subject: net: usb: r8152: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 6cfffeff6108..ceb78e2ea4f0 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -5303,7 +5303,7 @@ static void rtl8152_disconnect(struct usb_interface *intf) .bInterfaceProtocol = USB_CDC_PROTO_NONE /* table of devices that work with this driver */ -static struct usb_device_id rtl8152_table[] = { +static const struct usb_device_id rtl8152_table[] = { {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152)}, {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, -- cgit v1.2.3-55-g7522 From e1cb90f2b83b6b48deeba0ac9f1920693cbad7e1 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 21:28:41 +0530 Subject: net: usb: rtl8150: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/usb/rtl8150.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index daaa88a66f40..5f565bd574da 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -112,7 +112,7 @@ #undef EEPROM_WRITE /* table of devices that work with this driver */ -static struct usb_device_id rtl8150_table[] = { +static const struct usb_device_id rtl8150_table[] = { {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8150)}, {USB_DEVICE(VENDOR_ID_MELCO, PRODUCT_ID_LUAKTX)}, {USB_DEVICE(VENDOR_ID_MICRONET, PRODUCT_ID_SP128AR)}, -- cgit v1.2.3-55-g7522 From 09fc97ba3e25d851da9958671e48eee527e87909 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Wed, 28 Jun 2017 09:22:24 -0600 Subject: igb: add argument names to mailbox op function declarations Signed-off-by: Greg Edwards Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_hw.h | 15 ++++++++------- drivers/net/ethernet/intel/igb/e1000_mbx.h | 12 ++++++------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 2fb2213cd562..fd7865a8d2e3 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -491,13 +491,14 @@ struct e1000_fc_info { struct e1000_mbx_operations { s32 (*init_params)(struct e1000_hw *hw); - s32 (*read)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write)(struct e1000_hw *, u32 *, u16, u16); - s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); - s32 (*check_for_msg)(struct e1000_hw *, u16); - s32 (*check_for_ack)(struct e1000_hw *, u16); - s32 (*check_for_rst)(struct e1000_hw *, u16); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id); + s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); + s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); }; struct e1000_mbx_stats { diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index 3e7fed73df15..73d90aeb48b2 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -67,11 +67,11 @@ #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_check_for_msg(struct e1000_hw *, u16); -s32 igb_check_for_ack(struct e1000_hw *, u16); -s32 igb_check_for_rst(struct e1000_hw *, u16); -s32 igb_init_mbx_params_pf(struct e1000_hw *); +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); +s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_init_mbx_params_pf(struct e1000_hw *hw); #endif /* _E1000_MBX_H_ */ -- cgit v1.2.3-55-g7522 From 1a6c4a3b1e6f8fa1185457300514cd9f902de1a1 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Wed, 28 Jun 2017 09:22:25 -0600 Subject: igb: expose mailbox unlock method Add a mailbox unlock method to e1000_mbx_operations, which will be used to unlock the PF/VF mailbox by the PF. Signed-off-by: Greg Edwards Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_hw.h | 1 + drivers/net/ethernet/intel/igb/e1000_mbx.c | 39 ++++++++++++++++++++++++++++++ drivers/net/ethernet/intel/igb/e1000_mbx.h | 1 + 3 files changed, 41 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index fd7865a8d2e3..6076f258a0a5 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -499,6 +499,7 @@ struct e1000_mbx_operations { s32 (*check_for_msg)(struct e1000_hw *hw, u16 mbx_id); s32 (*check_for_ack)(struct e1000_hw *hw, u16 mbx_id); s32 (*check_for_rst)(struct e1000_hw *hw, u16 mbx_id); + s32 (*unlock)(struct e1000_hw *hw, u16 mbx_id); }; struct e1000_mbx_stats { diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 00e263f0c030..6aa44723507b 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -124,6 +124,24 @@ s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) return ret_val; } +/** + * igb_unlock_mbx - unlock the mailbox + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the mailbox was unlocked or else ERR_MBX + **/ +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (mbx->ops.unlock) + ret_val = mbx->ops.unlock(hw, mbx_id); + + return ret_val; +} + /** * igb_poll_for_msg - Wait for message notification * @hw: pointer to the HW structure @@ -340,6 +358,26 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) return ret_val; } +/** + * igb_release_mbx_lock_pf - release mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we released the mailbox lock + **/ +static s32 igb_release_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 p2v_mailbox; + + /* drop PF lock of mailbox, if set */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + wr32(E1000_P2VMAILBOX(vf_number), + p2v_mailbox & ~E1000_P2VMAILBOX_PFU); + + return 0; +} + /** * igb_write_mbx_pf - Places a message in the mailbox * @hw: pointer to the HW structure @@ -437,6 +475,7 @@ s32 igb_init_mbx_params_pf(struct e1000_hw *hw) mbx->ops.check_for_msg = igb_check_for_msg_pf; mbx->ops.check_for_ack = igb_check_for_ack_pf; mbx->ops.check_for_rst = igb_check_for_rst_pf; + mbx->ops.unlock = igb_release_mbx_lock_pf; mbx->stats.msgs_tx = 0; mbx->stats.msgs_rx = 0; diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index 73d90aeb48b2..a98c5dc60afd 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -72,6 +72,7 @@ s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id); +s32 igb_unlock_mbx(struct e1000_hw *hw, u16 mbx_id); s32 igb_init_mbx_params_pf(struct e1000_hw *hw); #endif /* _E1000_MBX_H_ */ -- cgit v1.2.3-55-g7522 From f1174f77b50c94eecaa658fdc56fa69b421de4b8 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:26:19 +0100 Subject: bpf/verifier: rework value tracking Unifies adjusted and unadjusted register value types (e.g. FRAME_POINTER is now just a PTR_TO_STACK with zero offset). Tracks value alignment by means of tracking known & unknown bits. This also replaces the 'reg->imm' (leading zero bits) calculations for (what were) UNKNOWN_VALUEs. If pointer leaks are allowed, and adjust_ptr_min_max_vals returns -EACCES, treat the pointer as an unknown scalar and try again, because we might be able to conclude something about the result (e.g. pointer & 0x40 is either 0 or 0x40). Verifier hooks in the netronome/nfp driver were changed to match the new data structures. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/verifier.c | 24 +- include/linux/bpf.h | 34 +- include/linux/bpf_verifier.h | 34 +- include/linux/tnum.h | 79 + kernel/bpf/Makefile | 2 +- kernel/bpf/tnum.c | 164 ++ kernel/bpf/verifier.c | 1780 +++++++++++---------- 7 files changed, 1265 insertions(+), 852 deletions(-) create mode 100644 include/linux/tnum.h create mode 100644 kernel/bpf/tnum.c diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index d696ba46f70a..5b783a91b115 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -79,28 +79,32 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, const struct bpf_verifier_env *env) { const struct bpf_reg_state *reg0 = &env->cur_state.regs[0]; + u64 imm; if (nfp_prog->act == NN_ACT_XDP) return 0; - if (reg0->type != CONST_IMM) { - pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off); + pr_info("unsupported exit state: %d, var_off: %s\n", + reg0->type, tn_buf); return -EINVAL; } - if (nfp_prog->act != NN_ACT_DIRECT && - reg0->imm != 0 && (reg0->imm & ~0U) != ~0U) { + imm = reg0->var_off.value; + if (nfp_prog->act != NN_ACT_DIRECT && imm != 0 && (imm & ~0U) != ~0U) { pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + reg0->type, imm); return -EINVAL; } - if (nfp_prog->act == NN_ACT_DIRECT && reg0->imm <= TC_ACT_REDIRECT && - reg0->imm != TC_ACT_SHOT && reg0->imm != TC_ACT_STOLEN && - reg0->imm != TC_ACT_QUEUED) { + if (nfp_prog->act == NN_ACT_DIRECT && imm <= TC_ACT_REDIRECT && + imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && + imm != TC_ACT_QUEUED) { pr_info("unsupported exit state: %d, imm: %llx\n", - reg0->type, reg0->imm); + reg0->type, imm); return -EINVAL; } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6353c7474dba..39229c455cba 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -117,35 +117,25 @@ enum bpf_access_type { }; /* types of values stored in eBPF registers */ +/* Pointer types represent: + * pointer + * pointer + imm + * pointer + (u16) var + * pointer + (u16) var + imm + * if (range > 0) then [ptr, ptr + range - off) is safe to access + * if (id > 0) means that some 'var' was added + * if (off > 0) means that 'imm' was added + */ enum bpf_reg_type { NOT_INIT = 0, /* nothing was written into register */ - UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ + SCALAR_VALUE, /* reg doesn't contain a valid pointer */ PTR_TO_CTX, /* reg points to bpf_context */ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ PTR_TO_MAP_VALUE, /* reg points to map element value */ PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ - FRAME_PTR, /* reg == frame_pointer */ - PTR_TO_STACK, /* reg == frame_pointer + imm */ - CONST_IMM, /* constant integer value */ - - /* PTR_TO_PACKET represents: - * skb->data - * skb->data + imm - * skb->data + (u16) var - * skb->data + (u16) var + imm - * if (range > 0) then [ptr, ptr + range - off) is safe to access - * if (id > 0) means that some 'var' was added - * if (off > 0) menas that 'imm' was added - */ - PTR_TO_PACKET, + PTR_TO_STACK, /* reg == frame_pointer + offset */ + PTR_TO_PACKET, /* reg points to skb->data */ PTR_TO_PACKET_END, /* skb->data + headlen */ - - /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map - * elem value. We only allow this if we can statically verify that - * access from this register are going to fall within the size of the - * map element. - */ - PTR_TO_MAP_VALUE_ADJ, }; struct bpf_prog; diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 8e5d31f6faef..85936fa92d12 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -9,6 +9,7 @@ #include /* for enum bpf_reg_type */ #include /* for MAX_BPF_STACK */ +#include /* Just some arbitrary values so we can safely do math without overflowing and * are obviously wrong for any sort of memory access. @@ -19,30 +20,37 @@ struct bpf_reg_state { enum bpf_reg_type type; union { - /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ - s64 imm; - - /* valid when type == PTR_TO_PACKET* */ - struct { - u16 off; - u16 range; - }; + /* valid when type == PTR_TO_PACKET */ + u16 range; /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ struct bpf_map *map_ptr; }; + /* Fixed part of pointer offset, pointer types only */ + s32 off; + /* For PTR_TO_PACKET, used to find other pointers with the same variable + * offset, so they can share range knowledge. + * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we + * came from, when one is tested for != NULL. + */ u32 id; + /* These three fields must be last. See states_equal() */ + /* For scalar types (SCALAR_VALUE), this represents our knowledge of + * the actual value. + * For pointer types, this represents the variable part of the offset + * from the pointed-to object, and is shared with all bpf_reg_states + * with the same id as us. + */ + struct tnum var_off; /* Used to determine if any memory access using this register will - * result in a bad access. These two fields must be last. - * See states_equal() + * result in a bad access. + * These refer to the same value as var_off, not necessarily the actual + * contents of the register. */ s64 min_value; u64 max_value; - u32 min_align; - u32 aux_off; - u32 aux_off_align; bool value_from_signed; }; diff --git a/include/linux/tnum.h b/include/linux/tnum.h new file mode 100644 index 000000000000..a0b07bf1842b --- /dev/null +++ b/include/linux/tnum.h @@ -0,0 +1,79 @@ +/* tnum: tracked (or tristate) numbers + * + * A tnum tracks knowledge about the bits of a value. Each bit can be either + * known (0 or 1), or unknown (x). Arithmetic operations on tnums will + * propagate the unknown bits such that the tnum result represents all the + * possible results for possible values of the operands. + */ +#include + +struct tnum { + u64 value; + u64 mask; +}; + +/* Constructors */ +/* Represent a known constant as a tnum. */ +struct tnum tnum_const(u64 value); +/* A completely unknown value */ +extern const struct tnum tnum_unknown; + +/* Arithmetic and logical ops */ +/* Shift a tnum left (by a fixed shift) */ +struct tnum tnum_lshift(struct tnum a, u8 shift); +/* Shift a tnum right (by a fixed shift) */ +struct tnum tnum_rshift(struct tnum a, u8 shift); +/* Add two tnums, return @a + @b */ +struct tnum tnum_add(struct tnum a, struct tnum b); +/* Subtract two tnums, return @a - @b */ +struct tnum tnum_sub(struct tnum a, struct tnum b); +/* Bitwise-AND, return @a & @b */ +struct tnum tnum_and(struct tnum a, struct tnum b); +/* Bitwise-OR, return @a | @b */ +struct tnum tnum_or(struct tnum a, struct tnum b); +/* Bitwise-XOR, return @a ^ @b */ +struct tnum tnum_xor(struct tnum a, struct tnum b); +/* Multiply two tnums, return @a * @b */ +struct tnum tnum_mul(struct tnum a, struct tnum b); + +/* Return a tnum representing numbers satisfying both @a and @b */ +struct tnum tnum_intersect(struct tnum a, struct tnum b); + +/* Return @a with all but the lowest @size bytes cleared */ +struct tnum tnum_cast(struct tnum a, u8 size); + +/* Returns true if @a is a known constant */ +static inline bool tnum_is_const(struct tnum a) +{ + return !a.mask; +} + +/* Returns true if @a == tnum_const(@b) */ +static inline bool tnum_equals_const(struct tnum a, u64 b) +{ + return tnum_is_const(a) && a.value == b; +} + +/* Returns true if @a is completely unknown */ +static inline bool tnum_is_unknown(struct tnum a) +{ + return !~a.mask; +} + +/* Returns true if @a is known to be a multiple of @size. + * @size must be a power of two. + */ +bool tnum_is_aligned(struct tnum a, u64 size); + +/* Returns true if @b represents a subset of @a. */ +bool tnum_in(struct tnum a, struct tnum b); + +/* Formatting functions. These have snprintf-like semantics: they will write + * up to @size bytes (including the terminating NUL byte), and return the number + * of bytes (excluding the terminating NUL) which would have been written had + * sufficient space been available. (Thus tnum_sbin always returns 64.) + */ +/* Format a tnum as a pair of hex numbers (value; mask) */ +int tnum_strn(char *str, size_t size, struct tnum a); +/* Format a tnum as tristate binary expansion */ +int tnum_sbin(char *str, size_t size, struct tnum a); diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 48e92705be59..2f0bcda40e90 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -1,6 +1,6 @@ obj-y := core.o -obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o +obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o ifeq ($(CONFIG_NET),y) obj-$(CONFIG_BPF_SYSCALL) += devmap.o diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c new file mode 100644 index 000000000000..92eeeb1974a2 --- /dev/null +++ b/kernel/bpf/tnum.c @@ -0,0 +1,164 @@ +/* tnum: tracked (or tristate) numbers + * + * A tnum tracks knowledge about the bits of a value. Each bit can be either + * known (0 or 1), or unknown (x). Arithmetic operations on tnums will + * propagate the unknown bits such that the tnum result represents all the + * possible results for possible values of the operands. + */ +#include +#include + +#define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m} +/* A completely unknown value */ +const struct tnum tnum_unknown = { .value = 0, .mask = -1 }; + +struct tnum tnum_const(u64 value) +{ + return TNUM(value, 0); +} + +struct tnum tnum_lshift(struct tnum a, u8 shift) +{ + return TNUM(a.value << shift, a.mask << shift); +} + +struct tnum tnum_rshift(struct tnum a, u8 shift) +{ + return TNUM(a.value >> shift, a.mask >> shift); +} + +struct tnum tnum_add(struct tnum a, struct tnum b) +{ + u64 sm, sv, sigma, chi, mu; + + sm = a.mask + b.mask; + sv = a.value + b.value; + sigma = sm + sv; + chi = sigma ^ sv; + mu = chi | a.mask | b.mask; + return TNUM(sv & ~mu, mu); +} + +struct tnum tnum_sub(struct tnum a, struct tnum b) +{ + u64 dv, alpha, beta, chi, mu; + + dv = a.value - b.value; + alpha = dv + a.mask; + beta = dv - b.mask; + chi = alpha ^ beta; + mu = chi | a.mask | b.mask; + return TNUM(dv & ~mu, mu); +} + +struct tnum tnum_and(struct tnum a, struct tnum b) +{ + u64 alpha, beta, v; + + alpha = a.value | a.mask; + beta = b.value | b.mask; + v = a.value & b.value; + return TNUM(v, alpha & beta & ~v); +} + +struct tnum tnum_or(struct tnum a, struct tnum b) +{ + u64 v, mu; + + v = a.value | b.value; + mu = a.mask | b.mask; + return TNUM(v, mu & ~v); +} + +struct tnum tnum_xor(struct tnum a, struct tnum b) +{ + u64 v, mu; + + v = a.value ^ b.value; + mu = a.mask | b.mask; + return TNUM(v & ~mu, mu); +} + +/* half-multiply add: acc += (unknown * mask * value). + * An intermediate step in the multiply algorithm. + */ +static struct tnum hma(struct tnum acc, u64 value, u64 mask) +{ + while (mask) { + if (mask & 1) + acc = tnum_add(acc, TNUM(0, value)); + mask >>= 1; + value <<= 1; + } + return acc; +} + +struct tnum tnum_mul(struct tnum a, struct tnum b) +{ + struct tnum acc; + u64 pi; + + pi = a.value * b.value; + acc = hma(TNUM(pi, 0), a.mask, b.mask | b.value); + return hma(acc, b.mask, a.value); +} + +/* Note that if a and b disagree - i.e. one has a 'known 1' where the other has + * a 'known 0' - this will return a 'known 1' for that bit. + */ +struct tnum tnum_intersect(struct tnum a, struct tnum b) +{ + u64 v, mu; + + v = a.value | b.value; + mu = a.mask & b.mask; + return TNUM(v & ~mu, mu); +} + +struct tnum tnum_cast(struct tnum a, u8 size) +{ + a.value &= (1ULL << (size * 8)) - 1; + a.mask &= (1ULL << (size * 8)) - 1; + return a; +} + +bool tnum_is_aligned(struct tnum a, u64 size) +{ + if (!size) + return true; + return !((a.value | a.mask) & (size - 1)); +} + +bool tnum_in(struct tnum a, struct tnum b) +{ + if (b.mask & ~a.mask) + return false; + b.value &= ~a.mask; + return a.value == b.value; +} + +int tnum_strn(char *str, size_t size, struct tnum a) +{ + return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask); +} +EXPORT_SYMBOL_GPL(tnum_strn); + +int tnum_sbin(char *str, size_t size, struct tnum a) +{ + size_t n; + + for (n = 64; n; n--) { + if (n < size) { + if (a.mask & 1) + str[n - 1] = 'x'; + else if (a.value & 1) + str[n - 1] = '1'; + else + str[n - 1] = '0'; + } + a.mask >>= 1; + a.value >>= 1; + } + str[min(size - 1, (size_t)64)] = 0; + return 64; +} diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f6e8b3887eab..c3f88b466c30 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -61,12 +61,12 @@ * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * - * Most of the time the registers have UNKNOWN_VALUE type, which + * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. - * (like pointer plus pointer becomes UNKNOWN_VALUE type) + * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register - * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer + * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' @@ -180,15 +180,12 @@ static __printf(1, 2) void verbose(const char *fmt, ...) /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", - [UNKNOWN_VALUE] = "inv", + [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", - [PTR_TO_MAP_VALUE_ADJ] = "map_value_adj", - [FRAME_PTR] = "fp", [PTR_TO_STACK] = "fp", - [CONST_IMM] = "imm", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_END] = "pkt_end", }; @@ -221,32 +218,36 @@ static void print_verifier_state(struct bpf_verifier_state *state) if (t == NOT_INIT) continue; verbose(" R%d=%s", i, reg_type_str[t]); - if (t == CONST_IMM || t == PTR_TO_STACK) - verbose("%lld", reg->imm); - else if (t == PTR_TO_PACKET) - verbose("(id=%d,off=%d,r=%d)", - reg->id, reg->off, reg->range); - else if (t == UNKNOWN_VALUE && reg->imm) - verbose("%lld", reg->imm); - else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || - t == PTR_TO_MAP_VALUE_OR_NULL || - t == PTR_TO_MAP_VALUE_ADJ) - verbose("(ks=%d,vs=%d,id=%u)", - reg->map_ptr->key_size, - reg->map_ptr->value_size, - reg->id); - if (reg->min_value != BPF_REGISTER_MIN_RANGE) - verbose(",min_value=%lld", - (long long)reg->min_value); - if (reg->max_value != BPF_REGISTER_MAX_RANGE) - verbose(",max_value=%llu", - (unsigned long long)reg->max_value); - if (reg->min_align) - verbose(",min_align=%u", reg->min_align); - if (reg->aux_off) - verbose(",aux_off=%u", reg->aux_off); - if (reg->aux_off_align) - verbose(",aux_off_align=%u", reg->aux_off_align); + if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && + tnum_is_const(reg->var_off)) { + /* reg->off should be 0 for SCALAR_VALUE */ + verbose("%lld", reg->var_off.value + reg->off); + } else { + verbose("(id=%d", reg->id); + if (t != SCALAR_VALUE) + verbose(",off=%d", reg->off); + if (t == PTR_TO_PACKET) + verbose(",r=%d", reg->range); + else if (t == CONST_PTR_TO_MAP || + t == PTR_TO_MAP_VALUE || + t == PTR_TO_MAP_VALUE_OR_NULL) + verbose(",ks=%d,vs=%d", + reg->map_ptr->key_size, + reg->map_ptr->value_size); + if (reg->min_value != BPF_REGISTER_MIN_RANGE) + verbose(",min_value=%lld", + (long long)reg->min_value); + if (reg->max_value != BPF_REGISTER_MAX_RANGE) + verbose(",max_value=%llu", + (unsigned long long)reg->max_value); + if (!tnum_is_unknown(reg->var_off)) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose(",var_off=%s", tn_buf); + } + verbose(")"); + } } for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { if (state->stack_slot_type[i] == STACK_SPILL) @@ -463,14 +464,69 @@ static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; -static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno) +static void __mark_reg_not_init(struct bpf_reg_state *reg); + +/* Mark the 'variable offset' part of a register as zero. This should be + * used only on registers holding a pointer type. + */ +static void __mark_reg_known_zero(struct bpf_reg_state *reg) { - BUG_ON(regno >= MAX_BPF_REG); + reg->var_off = tnum_const(0); + reg->min_value = 0; + reg->max_value = 0; +} - memset(®s[regno], 0, sizeof(regs[regno])); - regs[regno].type = NOT_INIT; - regs[regno].min_value = BPF_REGISTER_MIN_RANGE; - regs[regno].max_value = BPF_REGISTER_MAX_RANGE; +static void mark_reg_known_zero(struct bpf_reg_state *regs, u32 regno) +{ + if (WARN_ON(regno >= MAX_BPF_REG)) { + verbose("mark_reg_known_zero(regs, %u)\n", regno); + /* Something bad happened, let's kill all regs */ + for (regno = 0; regno < MAX_BPF_REG; regno++) + __mark_reg_not_init(regs + regno); + return; + } + __mark_reg_known_zero(regs + regno); +} + +/* Mark a register as having a completely unknown (scalar) value. */ +static void __mark_reg_unknown(struct bpf_reg_state *reg) +{ + reg->type = SCALAR_VALUE; + reg->id = 0; + reg->off = 0; + reg->var_off = tnum_unknown; + reg->min_value = BPF_REGISTER_MIN_RANGE; + reg->max_value = BPF_REGISTER_MAX_RANGE; +} + +static void mark_reg_unknown(struct bpf_reg_state *regs, u32 regno) +{ + if (WARN_ON(regno >= MAX_BPF_REG)) { + verbose("mark_reg_unknown(regs, %u)\n", regno); + /* Something bad happened, let's kill all regs */ + for (regno = 0; regno < MAX_BPF_REG; regno++) + __mark_reg_not_init(regs + regno); + return; + } + __mark_reg_unknown(regs + regno); +} + +static void __mark_reg_not_init(struct bpf_reg_state *reg) +{ + __mark_reg_unknown(reg); + reg->type = NOT_INIT; +} + +static void mark_reg_not_init(struct bpf_reg_state *regs, u32 regno) +{ + if (WARN_ON(regno >= MAX_BPF_REG)) { + verbose("mark_reg_not_init(regs, %u)\n", regno); + /* Something bad happened, let's kill all regs */ + for (regno = 0; regno < MAX_BPF_REG; regno++) + __mark_reg_not_init(regs + regno); + return; + } + __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_reg_state *regs) @@ -481,23 +537,12 @@ static void init_reg_state(struct bpf_reg_state *regs) mark_reg_not_init(regs, i); /* frame pointer */ - regs[BPF_REG_FP].type = FRAME_PTR; + regs[BPF_REG_FP].type = PTR_TO_STACK; + mark_reg_known_zero(regs, BPF_REG_FP); /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; -} - -static void __mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) -{ - regs[regno].type = UNKNOWN_VALUE; - regs[regno].id = 0; - regs[regno].imm = 0; -} - -static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) -{ - BUG_ON(regno >= MAX_BPF_REG); - __mark_reg_unknown_value(regs, regno); + mark_reg_known_zero(regs, BPF_REG_1); } static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) @@ -505,14 +550,6 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) regs[regno].min_value = BPF_REGISTER_MIN_RANGE; regs[regno].max_value = BPF_REGISTER_MAX_RANGE; regs[regno].value_from_signed = false; - regs[regno].min_align = 0; -} - -static void mark_reg_unknown_value_and_range(struct bpf_reg_state *regs, - u32 regno) -{ - mark_reg_unknown_value(regs, regno); - reset_reg_range_values(regs, regno); } enum reg_arg_type { @@ -542,7 +579,7 @@ static int check_reg_arg(struct bpf_reg_state *regs, u32 regno, return -EACCES; } if (t == DST_OP) - mark_reg_unknown_value(regs, regno); + mark_reg_unknown(regs, regno); } return 0; } @@ -552,12 +589,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type) switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: - case PTR_TO_MAP_VALUE_ADJ: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_END: - case FRAME_PTR: case CONST_PTR_TO_MAP: return true; default: @@ -637,14 +672,13 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size, } if (value_regno >= 0) /* have read misc data from the stack */ - mark_reg_unknown_value_and_range(state->regs, - value_regno); + mark_reg_unknown(state->regs, value_regno); return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ -static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, +static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size) { struct bpf_map *map = env->cur_state.regs[regno].map_ptr; @@ -657,22 +691,25 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, return 0; } -/* check read/write into an adjusted map element */ -static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno, +/* check read/write into a map element with possible variable offset */ +static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size) { struct bpf_verifier_state *state = &env->cur_state; struct bpf_reg_state *reg = &state->regs[regno]; int err; - /* We adjusted the register to this map value, so we - * need to change off and size to min_value and max_value - * respectively to make sure our theoretical access will be - * safe. + /* We may have adjusted the register to this map value, so we + * need to try adding each of min_value and max_value to off + * to make sure our theoretical access will be safe. */ if (log_level) print_verifier_state(state); - env->varlen_map_value_access = true; + /* If the offset is variable, we will need to be stricter in state + * pruning from now on. + */ + if (!tnum_is_const(reg->var_off)) + env->varlen_map_value_access = true; /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our @@ -684,10 +721,9 @@ static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno, regno); return -EACCES; } - err = check_map_access(env, regno, reg->min_value + off, size); + err = __check_map_access(env, regno, reg->min_value + off, size); if (err) { - verbose("R%d min value is outside of the array range\n", - regno); + verbose("R%d min value is outside of the array range\n", regno); return err; } @@ -699,7 +735,10 @@ static int check_map_access_adj(struct bpf_verifier_env *env, u32 regno, regno); return -EACCES; } - return check_map_access(env, regno, reg->max_value + off, size); + err = __check_map_access(env, regno, reg->max_value + off, size); + if (err) + verbose("R%d max value is outside of the array range\n", regno); + return err; } #define MAX_PACKET_OFF 0xffff @@ -729,14 +768,13 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, } } -static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, - int size) +static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, + int off, int size) { struct bpf_reg_state *regs = env->cur_state.regs; struct bpf_reg_state *reg = ®s[regno]; - off += reg->off; - if (off < 0 || size <= 0 || off + size > reg->range) { + if (off < 0 || size <= 0 || (u64)off + size > reg->range) { verbose("invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; @@ -744,7 +782,35 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, return 0; } -/* check access to 'struct bpf_context' fields */ +static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, + int size) +{ + struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *reg = ®s[regno]; + int err; + + /* We may have added a variable offset to the packet pointer; but any + * reg->range we have comes after that. We are only checking the fixed + * offset. + */ + + /* We don't allow negative numbers, because we aren't tracking enough + * detail to prove they're safe. + */ + if (reg->min_value < 0) { + verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", + regno); + return -EACCES; + } + err = __check_packet_access(env, regno, off, size); + if (err) { + verbose("R%d offset is outside of the packet\n", regno); + return err; + } + return err; +} + +/* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { @@ -784,13 +850,7 @@ static bool __is_pointer_value(bool allow_ptr_leaks, if (allow_ptr_leaks) return false; - switch (reg->type) { - case UNKNOWN_VALUE: - case CONST_IMM: - return false; - default: - return true; - } + return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) @@ -801,23 +861,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno) static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, int off, int size, bool strict) { + struct tnum reg_off; int ip_align; - int reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; - reg_off = reg->off; - if (reg->id) { - if (reg->aux_off_align % size) { - verbose("Packet access is only %u byte aligned, %d byte access not allowed\n", - reg->aux_off_align, size); - return -EACCES; - } - reg_off += reg->aux_off; - } - /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms @@ -827,20 +877,37 @@ static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg, * unconditional IP align value of '2'. */ ip_align = 2; - if ((ip_align + reg_off + off) % size != 0) { - verbose("misaligned packet access off %d+%d+%d size %d\n", - ip_align, reg_off, off, size); + + reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); + if (!tnum_is_aligned(reg_off, size)) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose("misaligned packet access off %d+%s+%d+%d size %d\n", + ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } -static int check_val_ptr_alignment(const struct bpf_reg_state *reg, - int size, bool strict) +static int check_generic_ptr_alignment(const struct bpf_reg_state *reg, + const char *pointer_desc, + int off, int size, bool strict) { - if (strict && size != 1) { - verbose("Unknown alignment. Only byte-sized access allowed in value access.\n"); + struct tnum reg_off; + + /* Byte size accesses are always allowed. */ + if (!strict || size == 1) + return 0; + + reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); + if (!tnum_is_aligned(reg_off, size)) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose("misaligned %saccess off %s+%d+%d size %d\n", + pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } @@ -852,21 +919,25 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, int off, int size) { bool strict = env->strict_alignment; + const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: + /* special case, because of NET_IP_ALIGN */ return check_pkt_ptr_alignment(reg, off, size, strict); - case PTR_TO_MAP_VALUE_ADJ: - return check_val_ptr_alignment(reg, size, strict); + case PTR_TO_MAP_VALUE: + pointer_desc = "value "; + break; + case PTR_TO_CTX: + pointer_desc = "context "; + break; + case PTR_TO_STACK: + pointer_desc = "stack "; + break; default: - if (off % size != 0) { - verbose("misaligned access off %d size %d\n", - off, size); - return -EACCES; - } - - return 0; + break; } + return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict); } /* check whether memory at (regno + off) is accessible for t = (read | write) @@ -883,52 +954,79 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn struct bpf_reg_state *reg = &state->regs[regno]; int size, err = 0; - if (reg->type == PTR_TO_STACK) - off += reg->imm; - size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; + /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size); if (err) return err; - if (reg->type == PTR_TO_MAP_VALUE || - reg->type == PTR_TO_MAP_VALUE_ADJ) { + /* for access checks, reg->off is just part of off */ + off += reg->off; + + if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose("R%d leaks addr into map\n", value_regno); return -EACCES; } - if (reg->type == PTR_TO_MAP_VALUE_ADJ) - err = check_map_access_adj(env, regno, off, size); - else - err = check_map_access(env, regno, off, size); + err = check_map_access(env, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) - mark_reg_unknown_value_and_range(state->regs, - value_regno); + mark_reg_unknown(state->regs, value_regno); } else if (reg->type == PTR_TO_CTX) { - enum bpf_reg_type reg_type = UNKNOWN_VALUE; + enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose("R%d leaks addr into ctx\n", value_regno); return -EACCES; } + /* ctx accesses must be at a fixed offset, so that we can + * determine what type of data were returned. + */ + if (!tnum_is_const(reg->var_off)) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose("variable ctx access var_off=%s off=%d size=%d", + tn_buf, off, size); + return -EACCES; + } + off += reg->var_off.value; err = check_ctx_access(env, insn_idx, off, size, t, ®_type); if (!err && t == BPF_READ && value_regno >= 0) { - mark_reg_unknown_value_and_range(state->regs, - value_regno); - /* note that reg.[id|off|range] == 0 */ + /* ctx access returns either a scalar, or a + * PTR_TO_PACKET[_END]. In the latter case, we know + * the offset is zero. + */ + if (reg_type == SCALAR_VALUE) + mark_reg_unknown(state->regs, value_regno); + else + mark_reg_known_zero(state->regs, value_regno); + state->regs[value_regno].id = 0; + state->regs[value_regno].off = 0; + state->regs[value_regno].range = 0; state->regs[value_regno].type = reg_type; - state->regs[value_regno].aux_off = 0; - state->regs[value_regno].aux_off_align = 0; } - } else if (reg->type == FRAME_PTR || reg->type == PTR_TO_STACK) { + } else if (reg->type == PTR_TO_STACK) { + /* stack accesses must be at a fixed offset, so that we can + * determine what type of data were returned. + * See check_stack_read(). + */ + if (!tnum_is_const(reg->var_off)) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose("variable stack access var_off=%s off=%d size=%d", + tn_buf, off, size); + return -EACCES; + } + off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose("invalid stack off=%d size=%d\n", off, size); return -EACCES; @@ -948,7 +1046,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else { err = check_stack_read(state, off, size, value_regno); } - } else if (state->regs[regno].type == PTR_TO_PACKET) { + } else if (reg->type == PTR_TO_PACKET) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose("cannot write into packet\n"); return -EACCES; @@ -960,21 +1058,24 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } err = check_packet_access(env, regno, off, size); if (!err && t == BPF_READ && value_regno >= 0) - mark_reg_unknown_value_and_range(state->regs, - value_regno); + mark_reg_unknown(state->regs, value_regno); } else { verbose("R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } - if (!err && size <= 2 && value_regno >= 0 && env->allow_ptr_leaks && - state->regs[value_regno].type == UNKNOWN_VALUE) { - /* 1 or 2 byte load zero-extends, determine the number of - * zero upper bits. Not doing it fo 4 byte load, since - * such values cannot be added to ptr_to_packet anyway. - */ - state->regs[value_regno].imm = 64 - size * 8; + if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && + state->regs[value_regno].type == SCALAR_VALUE) { + /* b/h/w load zero-extends, mark upper bits as known 0 */ + state->regs[value_regno].var_off = tnum_cast( + state->regs[value_regno].var_off, size); + /* sign bit is known zero, so we can bound the value */ + state->regs[value_regno].min_value = 0; + state->regs[value_regno].max_value = min_t(u64, + state->regs[value_regno].var_off.value | + state->regs[value_regno].var_off.mask, + BPF_REGISTER_MAX_RANGE); } return err; } @@ -1016,9 +1117,17 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins BPF_SIZE(insn->code), BPF_WRITE, -1); } +/* Does this register contain a constant zero? */ +static bool register_is_null(struct bpf_reg_state reg) +{ + return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); +} + /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary - * and all elements of stack are initialized + * and all elements of stack are initialized. + * Unlike most pointer bounds-checking functions, this one doesn't take an + * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, @@ -1029,9 +1138,9 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int off, i; if (regs[regno].type != PTR_TO_STACK) { + /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && - regs[regno].type == CONST_IMM && - regs[regno].imm == 0) + register_is_null(regs[regno])) return 0; verbose("R%d type=%s expected=%s\n", regno, @@ -1040,7 +1149,15 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, return -EACCES; } - off = regs[regno].imm; + /* Only allow fixed-offset stack reads */ + if (!tnum_is_const(regs[regno].var_off)) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); + verbose("invalid variable stack read R%d var_off=%s\n", + regno, tn_buf); + } + off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size <= 0) { verbose("invalid stack type R%d off=%d access_size=%d\n", @@ -1071,16 +1188,14 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { - struct bpf_reg_state *regs = env->cur_state.regs; + struct bpf_reg_state *regs = env->cur_state.regs, *reg = ®s[regno]; - switch (regs[regno].type) { + switch (reg->type) { case PTR_TO_PACKET: - return check_packet_access(env, regno, 0, access_size); + return check_packet_access(env, regno, reg->off, access_size); case PTR_TO_MAP_VALUE: - return check_map_access(env, regno, 0, access_size); - case PTR_TO_MAP_VALUE_ADJ: - return check_map_access_adj(env, regno, 0, access_size); - default: /* const_imm|ptr_to_stack or invalid ptr */ + return check_map_access(env, regno, reg->off, access_size); + default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } @@ -1123,11 +1238,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { - expected_type = CONST_IMM; - /* One exception. Allow UNKNOWN_VALUE registers when the - * boundaries are known and don't cause unsafe memory accesses - */ - if (type != UNKNOWN_VALUE && type != expected_type) + expected_type = SCALAR_VALUE; + if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; @@ -1141,13 +1253,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be - * passed in as argument, it's a CONST_IMM type. Final test + * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ - if (type == CONST_IMM && reg->imm == 0) + if (register_is_null(*reg)) /* final test in check_stack_boundary() */; else if (type != PTR_TO_PACKET && type != PTR_TO_MAP_VALUE && - type != PTR_TO_MAP_VALUE_ADJ && type != expected_type) + type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { @@ -1173,7 +1285,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, return -EACCES; } if (type == PTR_TO_PACKET) - err = check_packet_access(env, regno, 0, + err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size); else err = check_stack_boundary(env, regno, @@ -1189,7 +1301,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, return -EACCES; } if (type == PTR_TO_PACKET) - err = check_packet_access(env, regno, 0, + err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size); else err = check_stack_boundary(env, regno, @@ -1209,10 +1321,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, return -EACCES; } - /* If the register is UNKNOWN_VALUE, the access check happens - * using its boundaries. Otherwise, just use its imm + /* The register is SCALAR_VALUE; the access check + * happens using its boundaries. */ - if (type == UNKNOWN_VALUE) { + + if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could @@ -1220,35 +1333,28 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, */ meta = NULL; - if (reg->min_value < 0) { - verbose("R%d min value is negative, either use unsigned or 'var &= const'\n", - regno); - return -EACCES; - } - - if (reg->min_value == 0) { - err = check_helper_mem_access(env, regno - 1, 0, - zero_size_allowed, - meta); - if (err) - return err; - } + if (reg->min_value < 0) { + verbose("R%d min value is negative, either use unsigned or 'var &= const'\n", + regno); + return -EACCES; + } - if (reg->max_value == BPF_REGISTER_MAX_RANGE) { - verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", - regno); - return -EACCES; - } - err = check_helper_mem_access(env, regno - 1, - reg->max_value, - zero_size_allowed, meta); + if (reg->min_value == 0) { + err = check_helper_mem_access(env, regno - 1, 0, + zero_size_allowed, + meta); if (err) return err; - } else { - /* register is CONST_IMM */ - err = check_helper_mem_access(env, regno - 1, reg->imm, - zero_size_allowed, meta); } + + if (reg->max_value == BPF_REGISTER_MAX_RANGE) { + verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", + regno); + return -EACCES; + } + err = check_helper_mem_access(env, regno - 1, + reg->max_value, + zero_size_allowed, meta); } return err; @@ -1352,6 +1458,9 @@ static int check_raw_mode(const struct bpf_func_proto *fn) return count > 1 ? -EINVAL : 0; } +/* Packet data might have moved, any old PTR_TO_PACKET[_END] are now invalid, + * so turn them into unknown SCALAR_VALUE. + */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *state = &env->cur_state; @@ -1361,7 +1470,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == PTR_TO_PACKET || regs[i].type == PTR_TO_PACKET_END) - mark_reg_unknown_value(regs, i); + mark_reg_unknown(regs, i); for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { if (state->stack_slot_type[i] != STACK_SPILL) @@ -1370,8 +1479,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) if (reg->type != PTR_TO_PACKET && reg->type != PTR_TO_PACKET_END) continue; - __mark_reg_unknown_value(state->spilled_regs, - i / BPF_REG_SIZE); + __mark_reg_unknown(reg); } } @@ -1451,14 +1559,17 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) /* update return register */ if (fn->ret_type == RET_INTEGER) { - regs[BPF_REG_0].type = UNKNOWN_VALUE; + /* sets type to SCALAR_VALUE */ + mark_reg_unknown(regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { struct bpf_insn_aux_data *insn_aux; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; - regs[BPF_REG_0].max_value = regs[BPF_REG_0].min_value = 0; + /* There is no offset yet applied, variable or fixed */ + mark_reg_known_zero(regs, BPF_REG_0); + regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() @@ -1489,456 +1600,337 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) return 0; } -static int check_packet_ptr_add(struct bpf_verifier_env *env, - struct bpf_insn *insn) +static void check_reg_overflow(struct bpf_reg_state *reg) { - struct bpf_reg_state *regs = env->cur_state.regs; - struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; - struct bpf_reg_state *src_reg = ®s[insn->src_reg]; - struct bpf_reg_state tmp_reg; - s32 imm; - - if (BPF_SRC(insn->code) == BPF_K) { - /* pkt_ptr += imm */ - imm = insn->imm; - -add_imm: - if (imm < 0) { - verbose("addition of negative constant to packet pointer is not allowed\n"); - return -EACCES; - } - if (imm >= MAX_PACKET_OFF || - imm + dst_reg->off >= MAX_PACKET_OFF) { - verbose("constant %d is too large to add to packet pointer\n", - imm); - return -EACCES; - } - /* a constant was added to pkt_ptr. - * Remember it while keeping the same 'id' - */ - dst_reg->off += imm; - } else { - bool had_id; - - if (src_reg->type == PTR_TO_PACKET) { - /* R6=pkt(id=0,off=0,r=62) R7=imm22; r7 += r6 */ - tmp_reg = *dst_reg; /* save r7 state */ - *dst_reg = *src_reg; /* copy pkt_ptr state r6 into r7 */ - src_reg = &tmp_reg; /* pretend it's src_reg state */ - /* if the checks below reject it, the copy won't matter, - * since we're rejecting the whole program. If all ok, - * then imm22 state will be added to r7 - * and r7 will be pkt(id=0,off=22,r=62) while - * r6 will stay as pkt(id=0,off=0,r=62) - */ - } - - if (src_reg->type == CONST_IMM) { - /* pkt_ptr += reg where reg is known constant */ - imm = src_reg->imm; - goto add_imm; - } - /* disallow pkt_ptr += reg - * if reg is not uknown_value with guaranteed zero upper bits - * otherwise pkt_ptr may overflow and addition will become - * subtraction which is not allowed - */ - if (src_reg->type != UNKNOWN_VALUE) { - verbose("cannot add '%s' to ptr_to_packet\n", - reg_type_str[src_reg->type]); - return -EACCES; - } - if (src_reg->imm < 48) { - verbose("cannot add integer value with %lld upper zero bits to ptr_to_packet\n", - src_reg->imm); - return -EACCES; - } - - had_id = (dst_reg->id != 0); + if (reg->max_value > BPF_REGISTER_MAX_RANGE) + reg->max_value = BPF_REGISTER_MAX_RANGE; + if (reg->min_value < BPF_REGISTER_MIN_RANGE || + reg->min_value > BPF_REGISTER_MAX_RANGE) + reg->min_value = BPF_REGISTER_MIN_RANGE; +} - /* dst_reg stays as pkt_ptr type and since some positive - * integer value was added to the pointer, increment its 'id' - */ - dst_reg->id = ++env->id_gen; - - /* something was added to pkt_ptr, set range to zero */ - dst_reg->aux_off += dst_reg->off; - dst_reg->off = 0; - dst_reg->range = 0; - if (had_id) - dst_reg->aux_off_align = min(dst_reg->aux_off_align, - src_reg->min_align); - else - dst_reg->aux_off_align = src_reg->min_align; +static void coerce_reg_to_32(struct bpf_reg_state *reg) +{ + /* 32-bit values can't be negative as an s64 */ + if (reg->min_value < 0) + reg->min_value = 0; + /* clear high 32 bits */ + reg->var_off = tnum_cast(reg->var_off, 4); + /* Did value become known? Then update bounds */ + if (tnum_is_const(reg->var_off)) { + if ((s64)reg->var_off.value > BPF_REGISTER_MIN_RANGE) + reg->min_value = reg->var_off.value; + if (reg->var_off.value < BPF_REGISTER_MAX_RANGE) + reg->max_value = reg->var_off.value; } - return 0; } -static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn) +/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. + * Caller must check_reg_overflow all argument regs beforehand. + * Caller should also handle BPF_MOV case separately. + * If we return -EACCES, caller may want to try again treating pointer as a + * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. + */ +static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + struct bpf_insn *insn, + const struct bpf_reg_state *ptr_reg, + const struct bpf_reg_state *off_reg) { - struct bpf_reg_state *regs = env->cur_state.regs; - struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; + struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; + bool known = tnum_is_const(off_reg->var_off); + s64 min_val = off_reg->min_value; + u64 max_val = off_reg->max_value; u8 opcode = BPF_OP(insn->code); - s64 imm_log2; + u32 dst = insn->dst_reg; - /* for type == UNKNOWN_VALUE: - * imm > 0 -> number of zero upper bits - * imm == 0 -> don't track which is the same as all bits can be non-zero - */ + dst_reg = ®s[dst]; - if (BPF_SRC(insn->code) == BPF_X) { - struct bpf_reg_state *src_reg = ®s[insn->src_reg]; - - if (src_reg->type == UNKNOWN_VALUE && src_reg->imm > 0 && - dst_reg->imm && opcode == BPF_ADD) { - /* dreg += sreg - * where both have zero upper bits. Adding them - * can only result making one more bit non-zero - * in the larger value. - * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) - * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) - */ - dst_reg->imm = min(dst_reg->imm, src_reg->imm); - dst_reg->imm--; - return 0; - } - if (src_reg->type == CONST_IMM && src_reg->imm > 0 && - dst_reg->imm && opcode == BPF_ADD) { - /* dreg += sreg - * where dreg has zero upper bits and sreg is const. - * Adding them can only result making one more bit - * non-zero in the larger value. - */ - imm_log2 = __ilog2_u64((long long)src_reg->imm); - dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); - dst_reg->imm--; - return 0; - } - /* all other cases non supported yet, just mark dst_reg */ - dst_reg->imm = 0; - return 0; + if (WARN_ON_ONCE(known && (min_val != max_val))) { + print_verifier_state(&env->cur_state); + verbose("verifier internal error\n"); + return -EINVAL; + } + + if (BPF_CLASS(insn->code) != BPF_ALU64) { + /* 32-bit ALU ops on pointers produce (meaningless) scalars */ + if (!env->allow_ptr_leaks) + verbose("R%d 32-bit pointer arithmetic prohibited\n", + dst); + return -EACCES; } - /* sign extend 32-bit imm into 64-bit to make sure that - * negative values occupy bit 63. Note ilog2() would have - * been incorrect, since sizeof(insn->imm) == 4 + if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { + if (!env->allow_ptr_leaks) + verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", + dst); + return -EACCES; + } + if (ptr_reg->type == CONST_PTR_TO_MAP) { + if (!env->allow_ptr_leaks) + verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", + dst); + return -EACCES; + } + if (ptr_reg->type == PTR_TO_PACKET_END) { + if (!env->allow_ptr_leaks) + verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", + dst); + return -EACCES; + } + + /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. + * The id may be overwritten later if we create a new variable offset. */ - imm_log2 = __ilog2_u64((long long)insn->imm); + dst_reg->type = ptr_reg->type; + dst_reg->id = ptr_reg->id; - if (dst_reg->imm && opcode == BPF_LSH) { - /* reg <<= imm - * if reg was a result of 2 byte load, then its imm == 48 - * which means that upper 48 bits are zero and shifting this reg - * left by 4 would mean that upper 44 bits are still zero + switch (opcode) { + case BPF_ADD: + /* We can take a fixed offset as long as it doesn't overflow + * the s32 'off' field */ - dst_reg->imm -= insn->imm; - } else if (dst_reg->imm && opcode == BPF_MUL) { - /* reg *= imm - * if multiplying by 14 subtract 4 - * This is conservative calculation of upper zero bits. - * It's not trying to special case insn->imm == 1 or 0 cases + if (known && (ptr_reg->off + min_val == + (s64)(s32)(ptr_reg->off + min_val))) { + /* pointer += K. Accumulate it into fixed offset */ + dst_reg->min_value = ptr_reg->min_value; + dst_reg->max_value = ptr_reg->max_value; + dst_reg->var_off = ptr_reg->var_off; + dst_reg->off = ptr_reg->off + min_val; + dst_reg->range = ptr_reg->range; + break; + } + if (max_val == BPF_REGISTER_MAX_RANGE) { + if (!env->allow_ptr_leaks) + verbose("R%d tried to add unbounded value to pointer\n", + dst); + return -EACCES; + } + /* A new variable offset is created. Note that off_reg->off + * == 0, since it's a scalar. + * dst_reg gets the pointer type and since some positive + * integer value was added to the pointer, give it a new 'id' + * if it's a PTR_TO_PACKET. + * this creates a new 'base' pointer, off_reg (variable) gets + * added into the variable offset, and we copy the fixed offset + * from ptr_reg. */ - dst_reg->imm -= imm_log2 + 1; - } else if (opcode == BPF_AND) { - /* reg &= imm */ - dst_reg->imm = 63 - imm_log2; - } else if (dst_reg->imm && opcode == BPF_ADD) { - /* reg += imm */ - dst_reg->imm = min(dst_reg->imm, 63 - imm_log2); - dst_reg->imm--; - } else if (opcode == BPF_RSH) { - /* reg >>= imm - * which means that after right shift, upper bits will be zero - * note that verifier already checked that - * 0 <= imm < 64 for shift insn + if (min_val <= BPF_REGISTER_MIN_RANGE) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value += min_val; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value += max_val; + dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); + dst_reg->off = ptr_reg->off; + if (ptr_reg->type == PTR_TO_PACKET) { + dst_reg->id = ++env->id_gen; + /* something was added to pkt_ptr, set range to zero */ + dst_reg->range = 0; + } + break; + case BPF_SUB: + if (dst_reg == off_reg) { + /* scalar -= pointer. Creates an unknown scalar */ + if (!env->allow_ptr_leaks) + verbose("R%d tried to subtract pointer from scalar\n", + dst); + return -EACCES; + } + /* We don't allow subtraction from FP, because (according to + * test_verifier.c test "invalid fp arithmetic", JITs might not + * be able to deal with it. */ - dst_reg->imm += insn->imm; - if (unlikely(dst_reg->imm > 64)) - /* some dumb code did: - * r2 = *(u32 *)mem; - * r2 >>= 32; - * and all bits are zero now */ - dst_reg->imm = 64; - } else { - /* all other alu ops, means that we don't know what will - * happen to the value, mark it with unknown number of zero bits + if (ptr_reg->type == PTR_TO_STACK) { + if (!env->allow_ptr_leaks) + verbose("R%d subtraction from stack pointer prohibited\n", + dst); + return -EACCES; + } + if (known && (ptr_reg->off - min_val == + (s64)(s32)(ptr_reg->off - min_val))) { + /* pointer -= K. Subtract it from fixed offset */ + dst_reg->min_value = ptr_reg->min_value; + dst_reg->max_value = ptr_reg->max_value; + dst_reg->var_off = ptr_reg->var_off; + dst_reg->id = ptr_reg->id; + dst_reg->off = ptr_reg->off - min_val; + dst_reg->range = ptr_reg->range; + break; + } + /* Subtracting a negative value will just confuse everything. + * This can happen if off_reg is an immediate. */ - dst_reg->imm = 0; - } - - if (dst_reg->imm < 0) { - /* all 64 bits of the register can contain non-zero bits - * and such value cannot be added to ptr_to_packet, since it - * may overflow, mark it as unknown to avoid further eval + if ((s64)max_val < 0) { + if (!env->allow_ptr_leaks) + verbose("R%d tried to subtract negative max_val %lld from pointer\n", + dst, (s64)max_val); + return -EACCES; + } + /* A new variable offset is created. If the subtrahend is known + * nonnegative, then any reg->range we had before is still good. */ - dst_reg->imm = 0; - } - return 0; -} - -static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env, - struct bpf_insn *insn) -{ - struct bpf_reg_state *regs = env->cur_state.regs; - struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; - struct bpf_reg_state *src_reg = ®s[insn->src_reg]; - u8 opcode = BPF_OP(insn->code); - s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm); - - /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */ - if (src_reg->imm > 0 && dst_reg->imm) { - switch (opcode) { - case BPF_ADD: - /* dreg += sreg - * where both have zero upper bits. Adding them - * can only result making one more bit non-zero - * in the larger value. - * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47) - * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47) - */ - dst_reg->imm = min(src_reg->imm, 63 - imm_log2); - dst_reg->imm--; - break; - case BPF_AND: - /* dreg &= sreg - * AND can not extend zero bits only shrink - * Ex. 0x00..00ffffff - * & 0x0f..ffffffff - * ---------------- - * 0x00..00ffffff - */ - dst_reg->imm = max(src_reg->imm, 63 - imm_log2); - break; - case BPF_OR: - /* dreg |= sreg - * OR can only extend zero bits - * Ex. 0x00..00ffffff - * | 0x0f..ffffffff - * ---------------- - * 0x0f..00ffffff - */ - dst_reg->imm = min(src_reg->imm, 63 - imm_log2); - break; - case BPF_SUB: - case BPF_MUL: - case BPF_RSH: - case BPF_LSH: - /* These may be flushed out later */ - default: - mark_reg_unknown_value(regs, insn->dst_reg); + if (max_val >= BPF_REGISTER_MAX_RANGE) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) + dst_reg->min_value -= max_val; + if (min_val <= BPF_REGISTER_MIN_RANGE) + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) + dst_reg->max_value -= min_val; + dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); + dst_reg->off = ptr_reg->off; + if (ptr_reg->type == PTR_TO_PACKET) { + dst_reg->id = ++env->id_gen; + /* something was added to pkt_ptr, set range to zero */ + if (min_val < 0) + dst_reg->range = 0; } - } else { - mark_reg_unknown_value(regs, insn->dst_reg); + break; + case BPF_AND: + case BPF_OR: + case BPF_XOR: + /* bitwise ops on pointers are troublesome, prohibit for now. + * (However, in principle we could allow some cases, e.g. + * ptr &= ~3 which would reduce min_value by 3.) + */ + if (!env->allow_ptr_leaks) + verbose("R%d bitwise operator %s on pointer prohibited\n", + dst, bpf_alu_string[opcode >> 4]); + return -EACCES; + default: + /* other operators (e.g. MUL,LSH) produce non-pointer results */ + if (!env->allow_ptr_leaks) + verbose("R%d pointer arithmetic with %s operator prohibited\n", + dst, bpf_alu_string[opcode >> 4]); + return -EACCES; } - dst_reg->type = UNKNOWN_VALUE; + check_reg_overflow(dst_reg); return 0; } -static int evaluate_reg_imm_alu(struct bpf_verifier_env *env, - struct bpf_insn *insn) +static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, + struct bpf_insn *insn, + struct bpf_reg_state *dst_reg, + struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = env->cur_state.regs; - struct bpf_reg_state *dst_reg = ®s[insn->dst_reg]; - struct bpf_reg_state *src_reg = ®s[insn->src_reg]; - u8 opcode = BPF_OP(insn->code); - u64 dst_imm = dst_reg->imm; - - if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE) - return evaluate_reg_imm_alu_unknown(env, insn); - - /* dst_reg->type == CONST_IMM here. Simulate execution of insns - * containing ALU ops. Don't care about overflow or negative - * values, just add/sub/... them; registers are in u64. - */ - if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) { - dst_imm += insn->imm; - } else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) { - dst_imm += src_reg->imm; - } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) { - dst_imm -= insn->imm; - } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) { - dst_imm -= src_reg->imm; - } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) { - dst_imm *= insn->imm; - } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) { - dst_imm *= src_reg->imm; - } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) { - dst_imm |= insn->imm; - } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) { - dst_imm |= src_reg->imm; - } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) { - dst_imm &= insn->imm; - } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) { - dst_imm &= src_reg->imm; - } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) { - dst_imm >>= insn->imm; - } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) { - dst_imm >>= src_reg->imm; - } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) { - dst_imm <<= insn->imm; - } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X && - src_reg->type == CONST_IMM) { - dst_imm <<= src_reg->imm; - } else { - mark_reg_unknown_value(regs, insn->dst_reg); - goto out; - } - - dst_reg->imm = dst_imm; -out: - return 0; -} - -static void check_reg_overflow(struct bpf_reg_state *reg) -{ - if (reg->max_value > BPF_REGISTER_MAX_RANGE) - reg->max_value = BPF_REGISTER_MAX_RANGE; - if (reg->min_value < BPF_REGISTER_MIN_RANGE || - reg->min_value > BPF_REGISTER_MAX_RANGE) - reg->min_value = BPF_REGISTER_MIN_RANGE; -} - -static u32 calc_align(u32 imm) -{ - if (!imm) - return 1U << 31; - return imm - ((imm - 1) & imm); -} - -static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, - struct bpf_insn *insn) -{ - struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; s64 min_val = BPF_REGISTER_MIN_RANGE; u64 max_val = BPF_REGISTER_MAX_RANGE; u8 opcode = BPF_OP(insn->code); - u32 dst_align, src_align; + bool src_known, dst_known; - dst_reg = ®s[insn->dst_reg]; - src_align = 0; - if (BPF_SRC(insn->code) == BPF_X) { - check_reg_overflow(®s[insn->src_reg]); - min_val = regs[insn->src_reg].min_value; - max_val = regs[insn->src_reg].max_value; - - /* If the source register is a random pointer then the - * min_value/max_value values represent the range of the known - * accesses into that value, not the actual min/max value of the - * register itself. In this case we have to reset the reg range - * values so we know it is not safe to look at. - */ - if (regs[insn->src_reg].type != CONST_IMM && - regs[insn->src_reg].type != UNKNOWN_VALUE) { - min_val = BPF_REGISTER_MIN_RANGE; - max_val = BPF_REGISTER_MAX_RANGE; - src_align = 0; - } else { - src_align = regs[insn->src_reg].min_align; - } - } else if (insn->imm < BPF_REGISTER_MAX_RANGE && - (s64)insn->imm > BPF_REGISTER_MIN_RANGE) { - min_val = max_val = insn->imm; - src_align = calc_align(insn->imm); - } - - dst_align = dst_reg->min_align; - - /* We don't know anything about what was done to this register, mark it - * as unknown. Also, if both derived bounds came from signed/unsigned - * mixed compares and one side is unbounded, we cannot really do anything - * with them as boundaries cannot be trusted. Thus, arithmetic of two - * regs of such kind will get invalidated bounds on the dst side. - */ - if ((min_val == BPF_REGISTER_MIN_RANGE && - max_val == BPF_REGISTER_MAX_RANGE) || - (BPF_SRC(insn->code) == BPF_X && - ((min_val != BPF_REGISTER_MIN_RANGE && - max_val == BPF_REGISTER_MAX_RANGE) || - (min_val == BPF_REGISTER_MIN_RANGE && - max_val != BPF_REGISTER_MAX_RANGE) || - (dst_reg->min_value != BPF_REGISTER_MIN_RANGE && - dst_reg->max_value == BPF_REGISTER_MAX_RANGE) || - (dst_reg->min_value == BPF_REGISTER_MIN_RANGE && - dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) && - regs[insn->dst_reg].value_from_signed != - regs[insn->src_reg].value_from_signed)) { - reset_reg_range_values(regs, insn->dst_reg); - return; - } - - /* If one of our values was at the end of our ranges then we can't just - * do our normal operations to the register, we need to set the values - * to the min/max since they are undefined. - */ - if (opcode != BPF_SUB) { - if (min_val == BPF_REGISTER_MIN_RANGE) - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - if (max_val == BPF_REGISTER_MAX_RANGE) - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + if (BPF_CLASS(insn->code) != BPF_ALU64) { + /* 32-bit ALU ops are (32,32)->64 */ + coerce_reg_to_32(dst_reg); + coerce_reg_to_32(&src_reg); } + min_val = src_reg.min_value; + max_val = src_reg.max_value; + src_known = tnum_is_const(src_reg.var_off); + dst_known = tnum_is_const(dst_reg->var_off); switch (opcode) { case BPF_ADD: + if (min_val == BPF_REGISTER_MIN_RANGE) + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) dst_reg->min_value += min_val; + /* if max_val is MAX_RANGE, this will saturate dst->max */ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) dst_reg->max_value += max_val; - dst_reg->min_align = min(src_align, dst_align); + dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: - /* If one of our values was at the end of our ranges, then the - * _opposite_ value in the dst_reg goes to the end of our range. - */ - if (min_val == BPF_REGISTER_MIN_RANGE) - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; if (max_val == BPF_REGISTER_MAX_RANGE) dst_reg->min_value = BPF_REGISTER_MIN_RANGE; if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) dst_reg->min_value -= max_val; + if (min_val == BPF_REGISTER_MIN_RANGE) + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) dst_reg->max_value -= min_val; - dst_reg->min_align = min(src_align, dst_align); + dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: - if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) - dst_reg->min_value *= min_val; + if (min_val < 0 || dst_reg->min_value < 0) { + /* Ain't nobody got time to multiply that sign */ + __mark_reg_unknown(dst_reg); + break; + } + dst_reg->min_value *= min_val; + /* if max_val is MAX_RANGE, this will saturate dst->max. + * We know MAX_RANGE ** 2 won't overflow a u64, because + * MAX_RANGE itself fits in a u32. + */ + BUILD_BUG_ON(BPF_REGISTER_MAX_RANGE > (u32)-1); if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) dst_reg->max_value *= max_val; - dst_reg->min_align = max(src_align, dst_align); + dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); break; case BPF_AND: - /* Disallow AND'ing of negative numbers, ain't nobody got time - * for that. Otherwise the minimum is 0 and the max is the max - * value we could AND against. + if (src_known && dst_known) { + u64 value = dst_reg->var_off.value & src_reg.var_off.value; + + dst_reg->var_off = tnum_const(value); + dst_reg->min_value = dst_reg->max_value = min_t(u64, + value, BPF_REGISTER_MAX_RANGE); + break; + } + /* Lose min_value when AND'ing negative numbers, ain't nobody + * got time for that. Otherwise we get our minimum from the + * var_off, since that's inherently bitwise. + * Our maximum is the minimum of the operands' maxima. */ - if (min_val < 0) + dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); + if (min_val < 0 && dst_reg->min_value < 0) dst_reg->min_value = BPF_REGISTER_MIN_RANGE; else - dst_reg->min_value = 0; - dst_reg->max_value = max_val; - dst_reg->min_align = max(src_align, dst_align); + dst_reg->min_value = dst_reg->var_off.value; + dst_reg->max_value = min(dst_reg->max_value, max_val); + break; + case BPF_OR: + if (src_known && dst_known) { + u64 value = dst_reg->var_off.value | src_reg.var_off.value; + + dst_reg->var_off = tnum_const(value); + dst_reg->min_value = dst_reg->max_value = min_t(u64, + value, BPF_REGISTER_MAX_RANGE); + break; + } + /* Lose ranges when OR'ing negative numbers, ain't nobody got + * time for that. Otherwise we get our maximum from the var_off, + * and our minimum is the maximum of the operands' minima. + */ + dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); + if (min_val < 0 || dst_reg->min_value < 0) { + dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + } else { + dst_reg->min_value = max(dst_reg->min_value, min_val); + dst_reg->max_value = dst_reg->var_off.value | dst_reg->var_off.mask; + } break; case BPF_LSH: + if (min_val < 0) { + /* LSH by a negative number is undefined */ + mark_reg_unknown(regs, insn->dst_reg); + break; + } /* Gotta have special overflow logic here, if we're shifting * more than MAX_RANGE then just assume we have an invalid * range. */ if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) { dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - dst_reg->min_align = 1; + dst_reg->var_off = tnum_unknown; } else { if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) dst_reg->min_value <<= min_val; - if (!dst_reg->min_align) - dst_reg->min_align = 1; - dst_reg->min_align <<= min_val; + if (src_known) + dst_reg->var_off = tnum_lshift(dst_reg->var_off, min_val); + else + dst_reg->var_off = tnum_lshift(tnum_unknown, min_val); } if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) dst_reg->max_value = BPF_REGISTER_MAX_RANGE; @@ -1946,37 +1938,139 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, dst_reg->max_value <<= max_val; break; case BPF_RSH: - /* RSH by a negative number is undefined, and the BPF_RSH is an - * unsigned shift, so make the appropriate casts. - */ - if (min_val < 0 || dst_reg->min_value < 0) { - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; + if (min_val < 0) { + /* RSH by a negative number is undefined */ + mark_reg_unknown(regs, insn->dst_reg); + break; + } + /* BPF_RSH is an unsigned shift, so make the appropriate casts */ + if (dst_reg->min_value < 0) { + if (min_val) + /* Sign bit will be cleared */ + dst_reg->min_value = 0; } else { dst_reg->min_value = (u64)(dst_reg->min_value) >> min_val; } - if (min_val < 0) { - dst_reg->min_align = 1; - } else { - dst_reg->min_align >>= (u64) min_val; - if (!dst_reg->min_align) - dst_reg->min_align = 1; - } - if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value >>= max_val; + if (src_known) + dst_reg->var_off = tnum_rshift(dst_reg->var_off, min_val); + else + dst_reg->var_off = tnum_rshift(tnum_unknown, min_val); + if (dst_reg->max_value == BPF_REGISTER_MAX_RANGE) + dst_reg->max_value = ~0; + dst_reg->max_value >>= max_val; break; default: - reset_reg_range_values(regs, insn->dst_reg); + mark_reg_unknown(regs, insn->dst_reg); break; } check_reg_overflow(dst_reg); + return 0; +} + +/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max + * and var_off. + */ +static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, + struct bpf_insn *insn) +{ + struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg, *src_reg; + struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; + u8 opcode = BPF_OP(insn->code); + int rc; + + dst_reg = ®s[insn->dst_reg]; + check_reg_overflow(dst_reg); + src_reg = NULL; + if (dst_reg->type != SCALAR_VALUE) + ptr_reg = dst_reg; + if (BPF_SRC(insn->code) == BPF_X) { + src_reg = ®s[insn->src_reg]; + check_reg_overflow(src_reg); + + if (src_reg->type != SCALAR_VALUE) { + if (dst_reg->type != SCALAR_VALUE) { + /* Combining two pointers by any ALU op yields + * an arbitrary scalar. + */ + if (!env->allow_ptr_leaks) { + verbose("R%d pointer %s pointer prohibited\n", + insn->dst_reg, + bpf_alu_string[opcode >> 4]); + return -EACCES; + } + mark_reg_unknown(regs, insn->dst_reg); + return 0; + } else { + /* scalar += pointer + * This is legal, but we have to reverse our + * src/dest handling in computing the range + */ + rc = adjust_ptr_min_max_vals(env, insn, + src_reg, dst_reg); + if (rc == -EACCES && env->allow_ptr_leaks) { + /* scalar += unknown scalar */ + __mark_reg_unknown(&off_reg); + return adjust_scalar_min_max_vals( + env, insn, + dst_reg, off_reg); + } + return rc; + } + } else if (ptr_reg) { + /* pointer += scalar */ + rc = adjust_ptr_min_max_vals(env, insn, + dst_reg, src_reg); + if (rc == -EACCES && env->allow_ptr_leaks) { + /* unknown scalar += scalar */ + __mark_reg_unknown(dst_reg); + return adjust_scalar_min_max_vals( + env, insn, dst_reg, *src_reg); + } + return rc; + } + } else { + /* Pretend the src is a reg with a known value, since we only + * need to be able to read from this state. + */ + off_reg.type = SCALAR_VALUE; + off_reg.var_off = tnum_const(insn->imm); + off_reg.min_value = insn->imm; + off_reg.max_value = insn->imm; + src_reg = &off_reg; + check_reg_overflow(src_reg); + if (ptr_reg) { /* pointer += K */ + rc = adjust_ptr_min_max_vals(env, insn, + ptr_reg, src_reg); + if (rc == -EACCES && env->allow_ptr_leaks) { + /* unknown scalar += K */ + __mark_reg_unknown(dst_reg); + return adjust_scalar_min_max_vals( + env, insn, dst_reg, off_reg); + } + return rc; + } + } + + /* Got here implies adding two SCALAR_VALUEs */ + if (WARN_ON_ONCE(ptr_reg)) { + print_verifier_state(&env->cur_state); + verbose("verifier internal error: unexpected ptr_reg\n"); + return -EINVAL; + } + if (WARN_ON(!src_reg)) { + print_verifier_state(&env->cur_state); + verbose("verifier internal error: no src_reg\n"); + return -EINVAL; + } + return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { - struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; + struct bpf_reg_state *regs = env->cur_state.regs; u8 opcode = BPF_OP(insn->code); int err; @@ -2036,11 +2130,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) if (err) return err; - /* we are setting our register to something new, we need to - * reset its range values. - */ - reset_reg_range_values(regs, insn->dst_reg); - if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 @@ -2048,24 +2137,29 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) */ regs[insn->dst_reg] = regs[insn->src_reg]; } else { + /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose("R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } - mark_reg_unknown_value(regs, insn->dst_reg); + mark_reg_unknown(regs, insn->dst_reg); + /* high 32 bits are known zero. But this is + * still out of range for max_value, so leave + * that. + */ + regs[insn->dst_reg].var_off = tnum_cast( + regs[insn->dst_reg].var_off, 4); } } else { /* case: R = imm * remember the value we stored into this reg */ - regs[insn->dst_reg].type = CONST_IMM; - regs[insn->dst_reg].imm = insn->imm; - regs[insn->dst_reg].id = 0; + regs[insn->dst_reg].type = SCALAR_VALUE; + regs[insn->dst_reg].var_off = tnum_const(insn->imm); regs[insn->dst_reg].max_value = insn->imm; regs[insn->dst_reg].min_value = insn->imm; - regs[insn->dst_reg].min_align = calc_align(insn->imm); - regs[insn->dst_reg].value_from_signed = false; + regs[insn->dst_reg].id = 0; } } else if (opcode > BPF_END) { @@ -2116,68 +2210,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) if (err) return err; - dst_reg = ®s[insn->dst_reg]; - - /* first we want to adjust our ranges. */ - adjust_reg_min_max_vals(env, insn); - - /* pattern match 'bpf_add Rx, imm' instruction */ - if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && - dst_reg->type == FRAME_PTR && BPF_SRC(insn->code) == BPF_K) { - dst_reg->type = PTR_TO_STACK; - dst_reg->imm = insn->imm; - return 0; - } else if (opcode == BPF_ADD && - BPF_CLASS(insn->code) == BPF_ALU64 && - dst_reg->type == PTR_TO_STACK && - ((BPF_SRC(insn->code) == BPF_X && - regs[insn->src_reg].type == CONST_IMM) || - BPF_SRC(insn->code) == BPF_K)) { - if (BPF_SRC(insn->code) == BPF_X) - dst_reg->imm += regs[insn->src_reg].imm; - else - dst_reg->imm += insn->imm; - return 0; - } else if (opcode == BPF_ADD && - BPF_CLASS(insn->code) == BPF_ALU64 && - (dst_reg->type == PTR_TO_PACKET || - (BPF_SRC(insn->code) == BPF_X && - regs[insn->src_reg].type == PTR_TO_PACKET))) { - /* ptr_to_packet += K|X */ - return check_packet_ptr_add(env, insn); - } else if (BPF_CLASS(insn->code) == BPF_ALU64 && - dst_reg->type == UNKNOWN_VALUE && - env->allow_ptr_leaks) { - /* unknown += K|X */ - return evaluate_reg_alu(env, insn); - } else if (BPF_CLASS(insn->code) == BPF_ALU64 && - dst_reg->type == CONST_IMM && - env->allow_ptr_leaks) { - /* reg_imm += K|X */ - return evaluate_reg_imm_alu(env, insn); - } else if (is_pointer_value(env, insn->dst_reg)) { - verbose("R%d pointer arithmetic prohibited\n", - insn->dst_reg); - return -EACCES; - } else if (BPF_SRC(insn->code) == BPF_X && - is_pointer_value(env, insn->src_reg)) { - verbose("R%d pointer arithmetic prohibited\n", - insn->src_reg); - return -EACCES; - } - - /* If we did pointer math on a map value then just set it to our - * PTR_TO_MAP_VALUE_ADJ type so we can deal with any stores or - * loads to this register appropriately, otherwise just mark the - * register as unknown. - */ - if (env->allow_ptr_leaks && - BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD && - (dst_reg->type == PTR_TO_MAP_VALUE || - dst_reg->type == PTR_TO_MAP_VALUE_ADJ)) - dst_reg->type = PTR_TO_MAP_VALUE_ADJ; - else - mark_reg_unknown_value(regs, insn->dst_reg); + return adjust_reg_min_max_vals(env, insn); } return 0; @@ -2189,6 +2222,17 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, struct bpf_reg_state *regs = state->regs, *reg; int i; + if (dst_reg->off < 0) + /* This doesn't give us any range */ + return; + + if (dst_reg->max_value > MAX_PACKET_OFF || + dst_reg->max_value + dst_reg->off > MAX_PACKET_OFF) + /* Risk of overflow. For instance, ptr + (1<<63) may be less + * than pkt_end, but that's because it's also less than pkt. + */ + return; + /* LLVM can generate two kind of checks: * * Type 1: @@ -2219,30 +2263,44 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, * so that range of bytes [r3, r3 + 8) is safe to access. */ + /* If our ids match, then we must have the same max_value. And we + * don't care about the other reg's fixed offset, since if it's too big + * the range won't allow anything. + * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. + */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ - regs[i].range = max(regs[i].range, dst_reg->off); + regs[i].range = max_t(u16, regs[i].range, dst_reg->off); for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { if (state->stack_slot_type[i] != STACK_SPILL) continue; reg = &state->spilled_regs[i / BPF_REG_SIZE]; if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) - reg->range = max(reg->range, dst_reg->off); + reg->range = max_t(u16, reg->range, dst_reg->off); } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. + * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { bool value_from_signed = true; - bool is_range = true; + + /* If the dst_reg is a pointer, we can't learn anything about its + * variable offset from the compare (unless src_reg were a pointer into + * the same object, but we don't bother with that. + * Since false_reg and true_reg have the same type by construction, we + * only need to check one of them for pointerness. + */ + if (__is_pointer_value(false, false_reg)) + return; switch (opcode) { case BPF_JEQ: @@ -2250,14 +2308,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, * true then we know for sure. */ true_reg->max_value = true_reg->min_value = val; - is_range = false; + true_reg->var_off = tnum_const(val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ false_reg->max_value = false_reg->min_value = val; - is_range = false; + false_reg->var_off = tnum_const(val); break; case BPF_JGT: value_from_signed = false; @@ -2305,23 +2363,19 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, check_reg_overflow(false_reg); check_reg_overflow(true_reg); - if (is_range) { - if (__is_pointer_value(false, false_reg)) - reset_reg_range_values(false_reg, 0); - if (__is_pointer_value(false, true_reg)) - reset_reg_range_values(true_reg, 0); - } } -/* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg - * is the variable reg. +/* Same as above, but for the case that dst_reg holds a constant and src_reg is + * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { bool value_from_signed = true; - bool is_range = true; + + if (__is_pointer_value(false, false_reg)) + return; switch (opcode) { case BPF_JEQ: @@ -2329,14 +2383,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, * true then we know for sure. */ true_reg->max_value = true_reg->min_value = val; - is_range = false; + true_reg->var_off = tnum_const(val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ false_reg->max_value = false_reg->min_value = val; - is_range = false; + false_reg->var_off = tnum_const(val); break; case BPF_JGT: value_from_signed = false; @@ -2385,27 +2439,60 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, check_reg_overflow(false_reg); check_reg_overflow(true_reg); - if (is_range) { - if (__is_pointer_value(false, false_reg)) - reset_reg_range_values(false_reg, 0); - if (__is_pointer_value(false, true_reg)) - reset_reg_range_values(true_reg, 0); +} + +/* Regs are known to be equal, so intersect their min/max/var_off */ +static void __reg_combine_min_max(struct bpf_reg_state *src_reg, + struct bpf_reg_state *dst_reg) +{ + src_reg->min_value = dst_reg->min_value = max(src_reg->min_value, + dst_reg->min_value); + src_reg->max_value = dst_reg->max_value = min(src_reg->max_value, + dst_reg->max_value); + src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, + dst_reg->var_off); + check_reg_overflow(src_reg); + check_reg_overflow(dst_reg); +} + +static void reg_combine_min_max(struct bpf_reg_state *true_src, + struct bpf_reg_state *true_dst, + struct bpf_reg_state *false_src, + struct bpf_reg_state *false_dst, + u8 opcode) +{ + switch (opcode) { + case BPF_JEQ: + __reg_combine_min_max(true_src, true_dst); + break; + case BPF_JNE: + __reg_combine_min_max(false_src, false_dst); } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, - enum bpf_reg_type type) + bool is_null) { struct bpf_reg_state *reg = ®s[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { - if (type == UNKNOWN_VALUE) { - __mark_reg_unknown_value(regs, regno); + /* Old offset (both fixed and variable parts) should + * have been known-zero, because we don't allow pointer + * arithmetic on pointers that might be NULL. + */ + if (WARN_ON_ONCE(reg->min_value || reg->max_value || + reg->var_off.value || reg->var_off.mask || + reg->off)) { + reg->min_value = reg->max_value = reg->off = 0; + reg->var_off = tnum_const(0); + } + if (is_null) { + reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { - reg->type = type; + reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances @@ -2419,19 +2506,19 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, - enum bpf_reg_type type) + bool is_null) { struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i; for (i = 0; i < MAX_BPF_REG; i++) - mark_map_reg(regs, i, id, type); + mark_map_reg(regs, i, id, is_null); for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { if (state->stack_slot_type[i] != STACK_SPILL) continue; - mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, type); + mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, id, is_null); } } @@ -2481,7 +2568,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && - dst_reg->type == CONST_IMM && dst_reg->imm == insn->imm) { + dst_reg->type == SCALAR_VALUE && + tnum_equals_const(dst_reg->var_off, insn->imm)) { if (opcode == BPF_JEQ) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through @@ -2503,17 +2591,30 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. + * this is only legit if both are scalars (or pointers to the same + * object, I suppose, but we don't support that right now), because + * otherwise the different base pointers mean the offsets aren't + * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { - if (regs[insn->src_reg].type == CONST_IMM) - reg_set_min_max(&other_branch->regs[insn->dst_reg], - dst_reg, regs[insn->src_reg].imm, - opcode); - else if (dst_reg->type == CONST_IMM) - reg_set_min_max_inv(&other_branch->regs[insn->src_reg], - ®s[insn->src_reg], dst_reg->imm, - opcode); - } else { + if (dst_reg->type == SCALAR_VALUE && + regs[insn->src_reg].type == SCALAR_VALUE) { + if (tnum_is_const(regs[insn->src_reg].var_off)) + reg_set_min_max(&other_branch->regs[insn->dst_reg], + dst_reg, regs[insn->src_reg].var_off.value, + opcode); + else if (tnum_is_const(dst_reg->var_off)) + reg_set_min_max_inv(&other_branch->regs[insn->src_reg], + ®s[insn->src_reg], + dst_reg->var_off.value, opcode); + else if (opcode == BPF_JEQ || opcode == BPF_JNE) + /* Comparing for equality, we can combine knowledge */ + reg_combine_min_max(&other_branch->regs[insn->src_reg], + &other_branch->regs[insn->dst_reg], + ®s[insn->src_reg], + ®s[insn->dst_reg], opcode); + } + } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, insn->imm, opcode); } @@ -2525,10 +2626,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ - mark_map_regs(this_branch, insn->dst_reg, - opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); - mark_map_regs(other_branch, insn->dst_reg, - opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); + mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); + mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && dst_reg->type == PTR_TO_PACKET && regs[insn->src_reg].type == PTR_TO_PACKET_END) { @@ -2576,8 +2675,11 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; - regs[insn->dst_reg].type = CONST_IMM; - regs[insn->dst_reg].imm = imm; + regs[insn->dst_reg].type = SCALAR_VALUE; + regs[insn->dst_reg].min_value = imm; + regs[insn->dst_reg].max_value = imm; + check_reg_overflow(®s[insn->dst_reg]); + regs[insn->dst_reg].var_off = tnum_const(imm); regs[insn->dst_reg].id = 0; return 0; } @@ -2659,7 +2761,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) /* mark destination R0 register as readable, since it contains * the value fetched from the packet */ - regs[BPF_REG_0].type = UNKNOWN_VALUE; + mark_reg_unknown(regs, BPF_REG_0); return 0; } @@ -2862,57 +2964,145 @@ err_free: return ret; } -/* the following conditions reduce the number of explored insns - * from ~140k to ~80k for ultra large programs that use a lot of ptr_to_packet +/* check %cur's range satisfies %old's */ +static bool range_within(struct bpf_reg_state *old, + struct bpf_reg_state *cur) +{ + return old->min_value <= cur->min_value && + old->max_value >= cur->max_value; +} + +/* Maximum number of register states that can exist at once */ +#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) +struct idpair { + u32 old; + u32 cur; +}; + +/* If in the old state two registers had the same id, then they need to have + * the same id in the new state as well. But that id could be different from + * the old state, so we need to track the mapping from old to new ids. + * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent + * regs with old id 5 must also have new id 9 for the new state to be safe. But + * regs with a different old id could still have new id 9, we don't care about + * that. + * So we look through our idmap to see if this old id has been seen before. If + * so, we require the new id to match; otherwise, we add the id pair to the map. */ -static bool compare_ptrs_to_packet(struct bpf_verifier_env *env, - struct bpf_reg_state *old, - struct bpf_reg_state *cur) +static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { - if (old->id != cur->id) - return false; + unsigned int i; + + for (i = 0; i < ID_MAP_SIZE; i++) { + if (!idmap[i].old) { + /* Reached an empty slot; haven't seen this id before */ + idmap[i].old = old_id; + idmap[i].cur = cur_id; + return true; + } + if (idmap[i].old == old_id) + return idmap[i].cur == cur_id; + } + /* We ran out of idmap slots, which should be impossible */ + WARN_ON_ONCE(1); + return false; +} - /* old ptr_to_packet is more conservative, since it allows smaller - * range. Ex: - * old(off=0,r=10) is equal to cur(off=0,r=20), because - * old(off=0,r=10) means that with range=10 the verifier proceeded - * further and found no issues with the program. Now we're in the same - * spot with cur(off=0,r=20), so we're safe too, since anything further - * will only be looking at most 10 bytes after this pointer. - */ - if (old->off == cur->off && old->range < cur->range) +/* Returns true if (rold safe implies rcur safe) */ +static bool regsafe(struct bpf_reg_state *rold, + struct bpf_reg_state *rcur, + bool varlen_map_access, struct idpair *idmap) +{ + if (memcmp(rold, rcur, sizeof(*rold)) == 0) return true; - /* old(off=20,r=10) is equal to cur(off=22,re=22 or 5 or 0) - * since both cannot be used for packet access and safe(old) - * pointer has smaller off that could be used for further - * 'if (ptr > data_end)' check - * Ex: - * old(off=20,r=10) and cur(off=22,r=22) and cur(off=22,r=0) mean - * that we cannot access the packet. - * The safe range is: - * [ptr, ptr + range - off) - * so whenever off >=range, it means no safe bytes from this pointer. - * When comparing old->off <= cur->off, it means that older code - * went with smaller offset and that offset was later - * used to figure out the safe range after 'if (ptr > data_end)' check - * Say, 'old' state was explored like: - * ... R3(off=0, r=0) - * R4 = R3 + 20 - * ... now R4(off=20,r=0) <-- here - * if (R4 > data_end) - * ... R4(off=20,r=20), R3(off=0,r=20) and R3 can be used to access. - * ... the code further went all the way to bpf_exit. - * Now the 'cur' state at the mark 'here' has R4(off=30,r=0). - * old_R4(off=20,r=0) equal to cur_R4(off=30,r=0), since if the verifier - * goes further, such cur_R4 will give larger safe packet range after - * 'if (R4 > data_end)' and all further insn were already good with r=20, - * so they will be good with r=30 and we can prune the search. - */ - if (!env->strict_alignment && old->off <= cur->off && - old->off >= old->range && cur->off >= cur->range) + if (rold->type == NOT_INIT) + /* explored state can't have used this */ return true; + if (rcur->type == NOT_INIT) + return false; + switch (rold->type) { + case SCALAR_VALUE: + if (rcur->type == SCALAR_VALUE) { + /* new val must satisfy old val knowledge */ + return range_within(rold, rcur) && + tnum_in(rold->var_off, rcur->var_off); + } else { + /* if we knew anything about the old value, we're not + * equal, because we can't know anything about the + * scalar value of the pointer in the new value. + */ + return rold->min_value == BPF_REGISTER_MIN_RANGE && + rold->max_value == BPF_REGISTER_MAX_RANGE && + tnum_is_unknown(rold->var_off); + } + case PTR_TO_MAP_VALUE: + if (varlen_map_access) { + /* If the new min/max/var_off satisfy the old ones and + * everything else matches, we are OK. + * We don't care about the 'id' value, because nothing + * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) + */ + return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && + range_within(rold, rcur) && + tnum_in(rold->var_off, rcur->var_off); + } else { + /* If the ranges/var_off were not the same, but + * everything else was and we didn't do a variable + * access into a map then we are a-ok. + */ + return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0; + } + case PTR_TO_MAP_VALUE_OR_NULL: + /* a PTR_TO_MAP_VALUE could be safe to use as a + * PTR_TO_MAP_VALUE_OR_NULL into the same map. + * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- + * checked, doing so could have affected others with the same + * id, and we can't check for that because we lost the id when + * we converted to a PTR_TO_MAP_VALUE. + */ + if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) + return false; + if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) + return false; + /* Check our ids match any regs they're supposed to */ + return check_ids(rold->id, rcur->id, idmap); + case PTR_TO_PACKET: + if (rcur->type != PTR_TO_PACKET) + return false; + /* We must have at least as much range as the old ptr + * did, so that any accesses which were safe before are + * still safe. This is true even if old range < old off, + * since someone could have accessed through (ptr - k), or + * even done ptr -= k in a register, to get a safe access. + */ + if (rold->range > rcur->range) + return false; + /* If the offsets don't match, we can't trust our alignment; + * nor can we be sure that we won't fall out of range. + */ + if (rold->off != rcur->off) + return false; + /* id relations must be preserved */ + if (rold->id && !check_ids(rold->id, rcur->id, idmap)) + return false; + /* new val must satisfy old val knowledge */ + return range_within(rold, rcur) && + tnum_in(rold->var_off, rcur->var_off); + case PTR_TO_CTX: + case CONST_PTR_TO_MAP: + case PTR_TO_STACK: + case PTR_TO_PACKET_END: + /* Only valid matches are exact, which memcmp() above + * would have accepted + */ + default: + /* Don't know what's going on, just say it's not safe */ + return false; + } + /* Shouldn't get here; if we do, say it's not safe */ + WARN_ON_ONCE(1); return false; } @@ -2947,43 +3137,19 @@ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *cur) { bool varlen_map_access = env->varlen_map_value_access; - struct bpf_reg_state *rold, *rcur; + struct idpair *idmap; + bool ret = false; int i; - for (i = 0; i < MAX_BPF_REG; i++) { - rold = &old->regs[i]; - rcur = &cur->regs[i]; - - if (memcmp(rold, rcur, sizeof(*rold)) == 0) - continue; - - /* If the ranges were not the same, but everything else was and - * we didn't do a variable access into a map then we are a-ok. - */ - if (!varlen_map_access && - memcmp(rold, rcur, offsetofend(struct bpf_reg_state, id)) == 0) - continue; - - /* If we didn't map access then again we don't care about the - * mismatched range values and it's ok if our old type was - * UNKNOWN and we didn't go to a NOT_INIT'ed reg. - */ - if (rold->type == NOT_INIT || - (!varlen_map_access && rold->type == UNKNOWN_VALUE && - rcur->type != NOT_INIT)) - continue; - - /* Don't care about the reg->id in this case. */ - if (rold->type == PTR_TO_MAP_VALUE_OR_NULL && - rcur->type == PTR_TO_MAP_VALUE_OR_NULL && - rold->map_ptr == rcur->map_ptr) - continue; - - if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && - compare_ptrs_to_packet(env, rold, rcur)) - continue; - + idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); + /* If we failed to allocate the idmap, just say it's not safe */ + if (!idmap) return false; + + for (i = 0; i < MAX_BPF_REG; i++) { + if (!regsafe(&old->regs[i], &cur->regs[i], varlen_map_access, + idmap)) + goto out_free; } for (i = 0; i < MAX_BPF_STACK; i++) { @@ -2995,29 +3161,32 @@ static bool states_equal(struct bpf_verifier_env *env, * this verifier states are not equivalent, * return false to continue verification of this path */ - return false; + goto out_free; if (i % BPF_REG_SIZE) continue; if (old->stack_slot_type[i] != STACK_SPILL) continue; - if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], - &cur->spilled_regs[i / BPF_REG_SIZE], - sizeof(old->spilled_regs[0]))) - /* when explored and current stack slot types are - * the same, check that stored pointers types + if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE], + &cur->spilled_regs[i / BPF_REG_SIZE], + varlen_map_access, idmap)) + /* when explored and current stack slot are both storing + * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored - * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -8} + * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: - * (bpf_reg_state) {.type = PTR_TO_STACK, .imm = -16} + * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ - return false; + goto out_free; else continue; } - return true; + ret = true; +out_free: + kfree(idmap); + return ret; } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) @@ -3331,7 +3500,6 @@ process_bpf_exit: verbose("invalid BPF_LD mode\n"); return -EINVAL; } - reset_reg_range_values(regs, insn->dst_reg); } else { verbose("unknown insn class %d\n", class); return -EINVAL; -- cgit v1.2.3-55-g7522 From b03c9f9fdc37dab81ea04d5dacdc5995d4c224c2 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:26:36 +0100 Subject: bpf/verifier: track signed and unsigned min/max values Allows us to, sometimes, combine information from a signed check of one bound and an unsigned check of the other. We now track the full range of possible values, rather than restricting ourselves to [0, 1<<30) and considering anything beyond that as unknown. While this is probably not necessary, it makes the code more straightforward and symmetrical between signed and unsigned bounds. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- include/linux/bpf_verifier.h | 23 +- include/linux/tnum.h | 2 + kernel/bpf/tnum.c | 16 + kernel/bpf/verifier.c | 737 +++++++++++++++++++++++++------------------ 4 files changed, 461 insertions(+), 317 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 85936fa92d12..c61c3033522e 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -11,11 +11,15 @@ #include /* for MAX_BPF_STACK */ #include - /* Just some arbitrary values so we can safely do math without overflowing and - * are obviously wrong for any sort of memory access. - */ -#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) -#define BPF_REGISTER_MIN_RANGE -1 +/* Maximum variable offset umax_value permitted when resolving memory accesses. + * In practice this is far bigger than any realistic pointer offset; this limit + * ensures that umax_value + (int)off + (int)size cannot overflow a u64. + */ +#define BPF_MAX_VAR_OFF (1ULL << 31) +/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures + * that converting umax_value to int cannot overflow. + */ +#define BPF_MAX_VAR_SIZ INT_MAX struct bpf_reg_state { enum bpf_reg_type type; @@ -36,7 +40,7 @@ struct bpf_reg_state { * came from, when one is tested for != NULL. */ u32 id; - /* These three fields must be last. See states_equal() */ + /* These five fields must be last. See states_equal() */ /* For scalar types (SCALAR_VALUE), this represents our knowledge of * the actual value. * For pointer types, this represents the variable part of the offset @@ -49,9 +53,10 @@ struct bpf_reg_state { * These refer to the same value as var_off, not necessarily the actual * contents of the register. */ - s64 min_value; - u64 max_value; - bool value_from_signed; + s64 smin_value; /* minimum possible (s64)value */ + s64 smax_value; /* maximum possible (s64)value */ + u64 umin_value; /* minimum possible (u64)value */ + u64 umax_value; /* maximum possible (u64)value */ }; enum bpf_stack_slot_type { diff --git a/include/linux/tnum.h b/include/linux/tnum.h index a0b07bf1842b..0d2d3da46139 100644 --- a/include/linux/tnum.h +++ b/include/linux/tnum.h @@ -17,6 +17,8 @@ struct tnum { struct tnum tnum_const(u64 value); /* A completely unknown value */ extern const struct tnum tnum_unknown; +/* A value that's unknown except that @min <= value <= @max */ +struct tnum tnum_range(u64 min, u64 max); /* Arithmetic and logical ops */ /* Shift a tnum left (by a fixed shift) */ diff --git a/kernel/bpf/tnum.c b/kernel/bpf/tnum.c index 92eeeb1974a2..1f4bf68c12db 100644 --- a/kernel/bpf/tnum.c +++ b/kernel/bpf/tnum.c @@ -17,6 +17,22 @@ struct tnum tnum_const(u64 value) return TNUM(value, 0); } +struct tnum tnum_range(u64 min, u64 max) +{ + u64 chi = min ^ max, delta; + u8 bits = fls64(chi); + + /* special case, needed because 1ULL << 64 is undefined */ + if (bits > 63) + return tnum_unknown; + /* e.g. if chi = 4, bits = 3, delta = (1<<3) - 1 = 7. + * if chi = 0, bits = 0, delta = (1<<0) - 1 = 0, so we return + * constant min (since min == max). + */ + delta = (1ULL << bits) - 1; + return TNUM(min & ~delta, delta); +} + struct tnum tnum_lshift(struct tnum a, u8 shift) { return TNUM(a.value << shift, a.mask << shift); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c3f88b466c30..7557800bf7a7 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -234,12 +234,20 @@ static void print_verifier_state(struct bpf_verifier_state *state) verbose(",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); - if (reg->min_value != BPF_REGISTER_MIN_RANGE) - verbose(",min_value=%lld", - (long long)reg->min_value); - if (reg->max_value != BPF_REGISTER_MAX_RANGE) - verbose(",max_value=%llu", - (unsigned long long)reg->max_value); + if (reg->smin_value != reg->umin_value && + reg->smin_value != S64_MIN) + verbose(",smin_value=%lld", + (long long)reg->smin_value); + if (reg->smax_value != reg->umax_value && + reg->smax_value != S64_MAX) + verbose(",smax_value=%lld", + (long long)reg->smax_value); + if (reg->umin_value != 0) + verbose(",umin_value=%llu", + (unsigned long long)reg->umin_value); + if (reg->umax_value != U64_MAX) + verbose(",umax_value=%llu", + (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; @@ -466,14 +474,25 @@ static const int caller_saved[CALLER_SAVED_REGS] = { static void __mark_reg_not_init(struct bpf_reg_state *reg); +/* Mark the unknown part of a register (variable offset or scalar value) as + * known to have the value @imm. + */ +static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) +{ + reg->id = 0; + reg->var_off = tnum_const(imm); + reg->smin_value = (s64)imm; + reg->smax_value = (s64)imm; + reg->umin_value = imm; + reg->umax_value = imm; +} + /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { - reg->var_off = tnum_const(0); - reg->min_value = 0; - reg->max_value = 0; + __mark_reg_known(reg, 0); } static void mark_reg_known_zero(struct bpf_reg_state *regs, u32 regno) @@ -488,6 +507,72 @@ static void mark_reg_known_zero(struct bpf_reg_state *regs, u32 regno) __mark_reg_known_zero(regs + regno); } +/* Attempts to improve min/max values based on var_off information */ +static void __update_reg_bounds(struct bpf_reg_state *reg) +{ + /* min signed is max(sign bit) | min(other bits) */ + reg->smin_value = max_t(s64, reg->smin_value, + reg->var_off.value | (reg->var_off.mask & S64_MIN)); + /* max signed is min(sign bit) | max(other bits) */ + reg->smax_value = min_t(s64, reg->smax_value, + reg->var_off.value | (reg->var_off.mask & S64_MAX)); + reg->umin_value = max(reg->umin_value, reg->var_off.value); + reg->umax_value = min(reg->umax_value, + reg->var_off.value | reg->var_off.mask); +} + +/* Uses signed min/max values to inform unsigned, and vice-versa */ +static void __reg_deduce_bounds(struct bpf_reg_state *reg) +{ + /* Learn sign from signed bounds. + * If we cannot cross the sign boundary, then signed and unsigned bounds + * are the same, so combine. This works even in the negative case, e.g. + * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. + */ + if (reg->smin_value >= 0 || reg->smax_value < 0) { + reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, + reg->umin_value); + reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, + reg->umax_value); + return; + } + /* Learn sign from unsigned bounds. Signed bounds cross the sign + * boundary, so we must be careful. + */ + if ((s64)reg->umax_value >= 0) { + /* Positive. We can't learn anything from the smin, but smax + * is positive, hence safe. + */ + reg->smin_value = reg->umin_value; + reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, + reg->umax_value); + } else if ((s64)reg->umin_value < 0) { + /* Negative. We can't learn anything from the smax, but smin + * is negative, hence safe. + */ + reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, + reg->umin_value); + reg->smax_value = reg->umax_value; + } +} + +/* Attempts to improve var_off based on unsigned min/max information */ +static void __reg_bound_offset(struct bpf_reg_state *reg) +{ + reg->var_off = tnum_intersect(reg->var_off, + tnum_range(reg->umin_value, + reg->umax_value)); +} + +/* Reset the min/max bounds of a register */ +static void __mark_reg_unbounded(struct bpf_reg_state *reg) +{ + reg->smin_value = S64_MIN; + reg->smax_value = S64_MAX; + reg->umin_value = 0; + reg->umax_value = U64_MAX; +} + /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { @@ -495,8 +580,7 @@ static void __mark_reg_unknown(struct bpf_reg_state *reg) reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; - reg->min_value = BPF_REGISTER_MIN_RANGE; - reg->max_value = BPF_REGISTER_MAX_RANGE; + __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_reg_state *regs, u32 regno) @@ -545,13 +629,6 @@ static void init_reg_state(struct bpf_reg_state *regs) mark_reg_known_zero(regs, BPF_REG_1); } -static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno) -{ - regs[regno].min_value = BPF_REGISTER_MIN_RANGE; - regs[regno].max_value = BPF_REGISTER_MAX_RANGE; - regs[regno].value_from_signed = false; -} - enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ @@ -716,26 +793,27 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, * index'es we need to make sure that whatever we use * will have a set floor within our range. */ - if (reg->min_value < 0) { + if (reg->smin_value < 0) { verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } - err = __check_map_access(env, regno, reg->min_value + off, size); + err = __check_map_access(env, regno, reg->smin_value + off, size); if (err) { verbose("R%d min value is outside of the array range\n", regno); return err; } - /* If we haven't set a max value then we need to bail - * since we can't be sure we won't do bad things. + /* If we haven't set a max value then we need to bail since we can't be + * sure we won't do bad things. + * If reg->umax_value + off could overflow, treat that as unbounded too. */ - if (reg->max_value == BPF_REGISTER_MAX_RANGE) { + if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose("R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } - err = __check_map_access(env, regno, reg->max_value + off, size); + err = __check_map_access(env, regno, reg->umax_value + off, size); if (err) verbose("R%d max value is outside of the array range\n", regno); return err; @@ -797,7 +875,7 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ - if (reg->min_value < 0) { + if (reg->smin_value < 0) { verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; @@ -1070,12 +1148,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn /* b/h/w load zero-extends, mark upper bits as known 0 */ state->regs[value_regno].var_off = tnum_cast( state->regs[value_regno].var_off, size); - /* sign bit is known zero, so we can bound the value */ - state->regs[value_regno].min_value = 0; - state->regs[value_regno].max_value = min_t(u64, - state->regs[value_regno].var_off.value | - state->regs[value_regno].var_off.mask, - BPF_REGISTER_MAX_RANGE); + __update_reg_bounds(&state->regs[value_regno]); } return err; } @@ -1333,13 +1406,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, */ meta = NULL; - if (reg->min_value < 0) { + if (reg->smin_value < 0) { verbose("R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } - if (reg->min_value == 0) { + if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); @@ -1347,13 +1420,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, return err; } - if (reg->max_value == BPF_REGISTER_MAX_RANGE) { + if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose("R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, - reg->max_value, + reg->umax_value, zero_size_allowed, meta); } @@ -1600,33 +1673,35 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) return 0; } -static void check_reg_overflow(struct bpf_reg_state *reg) -{ - if (reg->max_value > BPF_REGISTER_MAX_RANGE) - reg->max_value = BPF_REGISTER_MAX_RANGE; - if (reg->min_value < BPF_REGISTER_MIN_RANGE || - reg->min_value > BPF_REGISTER_MAX_RANGE) - reg->min_value = BPF_REGISTER_MIN_RANGE; -} - static void coerce_reg_to_32(struct bpf_reg_state *reg) { - /* 32-bit values can't be negative as an s64 */ - if (reg->min_value < 0) - reg->min_value = 0; /* clear high 32 bits */ reg->var_off = tnum_cast(reg->var_off, 4); - /* Did value become known? Then update bounds */ - if (tnum_is_const(reg->var_off)) { - if ((s64)reg->var_off.value > BPF_REGISTER_MIN_RANGE) - reg->min_value = reg->var_off.value; - if (reg->var_off.value < BPF_REGISTER_MAX_RANGE) - reg->max_value = reg->var_off.value; - } + /* Update bounds */ + __update_reg_bounds(reg); +} + +static bool signed_add_overflows(s64 a, s64 b) +{ + /* Do the add in u64, where overflow is well-defined */ + s64 res = (s64)((u64)a + (u64)b); + + if (b < 0) + return res > a; + return res < a; +} + +static bool signed_sub_overflows(s64 a, s64 b) +{ + /* Do the sub in u64, where overflow is well-defined */ + s64 res = (s64)((u64)a - (u64)b); + + if (b < 0) + return res < a; + return res > a; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. - * Caller must check_reg_overflow all argument regs beforehand. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. @@ -1638,16 +1713,23 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, { struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; bool known = tnum_is_const(off_reg->var_off); - s64 min_val = off_reg->min_value; - u64 max_val = off_reg->max_value; + s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, + smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; + u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, + umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = ®s[dst]; - if (WARN_ON_ONCE(known && (min_val != max_val))) { + if (WARN_ON_ONCE(known && (smin_val != smax_val))) { + print_verifier_state(&env->cur_state); + verbose("verifier internal error: known but bad sbounds\n"); + return -EINVAL; + } + if (WARN_ON_ONCE(known && (umin_val != umax_val))) { print_verifier_state(&env->cur_state); - verbose("verifier internal error\n"); + verbose("verifier internal error: known but bad ubounds\n"); return -EINVAL; } @@ -1689,22 +1771,18 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ - if (known && (ptr_reg->off + min_val == - (s64)(s32)(ptr_reg->off + min_val))) { + if (known && (ptr_reg->off + smin_val == + (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ - dst_reg->min_value = ptr_reg->min_value; - dst_reg->max_value = ptr_reg->max_value; + dst_reg->smin_value = smin_ptr; + dst_reg->smax_value = smax_ptr; + dst_reg->umin_value = umin_ptr; + dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; - dst_reg->off = ptr_reg->off + min_val; + dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } - if (max_val == BPF_REGISTER_MAX_RANGE) { - if (!env->allow_ptr_leaks) - verbose("R%d tried to add unbounded value to pointer\n", - dst); - return -EACCES; - } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive @@ -1714,12 +1792,22 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ - if (min_val <= BPF_REGISTER_MIN_RANGE) - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) - dst_reg->min_value += min_val; - if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value += max_val; + if (signed_add_overflows(smin_ptr, smin_val) || + signed_add_overflows(smax_ptr, smax_val)) { + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } else { + dst_reg->smin_value = smin_ptr + smin_val; + dst_reg->smax_value = smax_ptr + smax_val; + } + if (umin_ptr + umin_val < umin_ptr || + umax_ptr + umax_val < umax_ptr) { + dst_reg->umin_value = 0; + dst_reg->umax_value = U64_MAX; + } else { + dst_reg->umin_value = umin_ptr + umin_val; + dst_reg->umax_value = umax_ptr + umax_val; + } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (ptr_reg->type == PTR_TO_PACKET) { @@ -1746,43 +1834,46 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, dst); return -EACCES; } - if (known && (ptr_reg->off - min_val == - (s64)(s32)(ptr_reg->off - min_val))) { + if (known && (ptr_reg->off - smin_val == + (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ - dst_reg->min_value = ptr_reg->min_value; - dst_reg->max_value = ptr_reg->max_value; + dst_reg->smin_value = smin_ptr; + dst_reg->smax_value = smax_ptr; + dst_reg->umin_value = umin_ptr; + dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; - dst_reg->off = ptr_reg->off - min_val; + dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } - /* Subtracting a negative value will just confuse everything. - * This can happen if off_reg is an immediate. - */ - if ((s64)max_val < 0) { - if (!env->allow_ptr_leaks) - verbose("R%d tried to subtract negative max_val %lld from pointer\n", - dst, (s64)max_val); - return -EACCES; - } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ - if (max_val >= BPF_REGISTER_MAX_RANGE) - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) - dst_reg->min_value -= max_val; - if (min_val <= BPF_REGISTER_MIN_RANGE) - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; - if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value -= min_val; + if (signed_sub_overflows(smin_ptr, smax_val) || + signed_sub_overflows(smax_ptr, smin_val)) { + /* Overflow possible, we know nothing */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } else { + dst_reg->smin_value = smin_ptr - smax_val; + dst_reg->smax_value = smax_ptr - smin_val; + } + if (umin_ptr < umax_val) { + /* Overflow possible, we know nothing */ + dst_reg->umin_value = 0; + dst_reg->umax_value = U64_MAX; + } else { + /* Cannot overflow (as long as bounds are consistent) */ + dst_reg->umin_value = umin_ptr - umax_val; + dst_reg->umax_value = umax_ptr - umin_val; + } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (ptr_reg->type == PTR_TO_PACKET) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ - if (min_val < 0) + if (smin_val < 0) dst_reg->range = 0; } break; @@ -1805,7 +1896,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, return -EACCES; } - check_reg_overflow(dst_reg); + __update_reg_bounds(dst_reg); + __reg_deduce_bounds(dst_reg); + __reg_bound_offset(dst_reg); return 0; } @@ -1815,157 +1908,217 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = env->cur_state.regs; - s64 min_val = BPF_REGISTER_MIN_RANGE; - u64 max_val = BPF_REGISTER_MAX_RANGE; u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; + s64 smin_val, smax_val; + u64 umin_val, umax_val; if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->64 */ coerce_reg_to_32(dst_reg); coerce_reg_to_32(&src_reg); } - min_val = src_reg.min_value; - max_val = src_reg.max_value; + smin_val = src_reg.smin_value; + smax_val = src_reg.smax_value; + umin_val = src_reg.umin_value; + umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); switch (opcode) { case BPF_ADD: - if (min_val == BPF_REGISTER_MIN_RANGE) - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) - dst_reg->min_value += min_val; - /* if max_val is MAX_RANGE, this will saturate dst->max */ - if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value += max_val; + if (signed_add_overflows(dst_reg->smin_value, smin_val) || + signed_add_overflows(dst_reg->smax_value, smax_val)) { + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } else { + dst_reg->smin_value += smin_val; + dst_reg->smax_value += smax_val; + } + if (dst_reg->umin_value + umin_val < umin_val || + dst_reg->umax_value + umax_val < umax_val) { + dst_reg->umin_value = 0; + dst_reg->umax_value = U64_MAX; + } else { + dst_reg->umin_value += umin_val; + dst_reg->umax_value += umax_val; + } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: - if (max_val == BPF_REGISTER_MAX_RANGE) - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) - dst_reg->min_value -= max_val; - if (min_val == BPF_REGISTER_MIN_RANGE) - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; - if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value -= min_val; + if (signed_sub_overflows(dst_reg->smin_value, smax_val) || + signed_sub_overflows(dst_reg->smax_value, smin_val)) { + /* Overflow possible, we know nothing */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } else { + dst_reg->smin_value -= smax_val; + dst_reg->smax_value -= smin_val; + } + if (dst_reg->umin_value < umax_val) { + /* Overflow possible, we know nothing */ + dst_reg->umin_value = 0; + dst_reg->umax_value = U64_MAX; + } else { + /* Cannot overflow (as long as bounds are consistent) */ + dst_reg->umin_value -= umax_val; + dst_reg->umax_value -= umin_val; + } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: - if (min_val < 0 || dst_reg->min_value < 0) { + dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); + if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ - __mark_reg_unknown(dst_reg); + __mark_reg_unbounded(dst_reg); + __update_reg_bounds(dst_reg); break; } - dst_reg->min_value *= min_val; - /* if max_val is MAX_RANGE, this will saturate dst->max. - * We know MAX_RANGE ** 2 won't overflow a u64, because - * MAX_RANGE itself fits in a u32. + /* Both values are positive, so we can work with unsigned and + * copy the result to signed (unless it exceeds S64_MAX). */ - BUILD_BUG_ON(BPF_REGISTER_MAX_RANGE > (u32)-1); - if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value *= max_val; - dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); + if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { + /* Potential overflow, we know nothing */ + __mark_reg_unbounded(dst_reg); + /* (except what we can learn from the var_off) */ + __update_reg_bounds(dst_reg); + break; + } + dst_reg->umin_value *= umin_val; + dst_reg->umax_value *= umax_val; + if (dst_reg->umax_value > S64_MAX) { + /* Overflow possible, we know nothing */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } else { + dst_reg->smin_value = dst_reg->umin_value; + dst_reg->smax_value = dst_reg->umax_value; + } break; case BPF_AND: if (src_known && dst_known) { - u64 value = dst_reg->var_off.value & src_reg.var_off.value; - - dst_reg->var_off = tnum_const(value); - dst_reg->min_value = dst_reg->max_value = min_t(u64, - value, BPF_REGISTER_MAX_RANGE); + __mark_reg_known(dst_reg, dst_reg->var_off.value & + src_reg.var_off.value); break; } - /* Lose min_value when AND'ing negative numbers, ain't nobody - * got time for that. Otherwise we get our minimum from the - * var_off, since that's inherently bitwise. - * Our maximum is the minimum of the operands' maxima. + /* We get our minimum from the var_off, since that's inherently + * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); - if (min_val < 0 && dst_reg->min_value < 0) - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - else - dst_reg->min_value = dst_reg->var_off.value; - dst_reg->max_value = min(dst_reg->max_value, max_val); + dst_reg->umin_value = dst_reg->var_off.value; + dst_reg->umax_value = min(dst_reg->umax_value, umax_val); + if (dst_reg->smin_value < 0 || smin_val < 0) { + /* Lose signed bounds when ANDing negative numbers, + * ain't nobody got time for that. + */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } else { + /* ANDing two positives gives a positive, so safe to + * cast result into s64. + */ + dst_reg->smin_value = dst_reg->umin_value; + dst_reg->smax_value = dst_reg->umax_value; + } + /* We may learn something more from the var_off */ + __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { - u64 value = dst_reg->var_off.value | src_reg.var_off.value; - - dst_reg->var_off = tnum_const(value); - dst_reg->min_value = dst_reg->max_value = min_t(u64, - value, BPF_REGISTER_MAX_RANGE); + __mark_reg_known(dst_reg, dst_reg->var_off.value | + src_reg.var_off.value); break; } - /* Lose ranges when OR'ing negative numbers, ain't nobody got - * time for that. Otherwise we get our maximum from the var_off, - * and our minimum is the maximum of the operands' minima. + /* We get our maximum from the var_off, and our minimum is the + * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); - if (min_val < 0 || dst_reg->min_value < 0) { - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; + dst_reg->umin_value = max(dst_reg->umin_value, umin_val); + dst_reg->umax_value = dst_reg->var_off.value | + dst_reg->var_off.mask; + if (dst_reg->smin_value < 0 || smin_val < 0) { + /* Lose signed bounds when ORing negative numbers, + * ain't nobody got time for that. + */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; } else { - dst_reg->min_value = max(dst_reg->min_value, min_val); - dst_reg->max_value = dst_reg->var_off.value | dst_reg->var_off.mask; + /* ORing two positives gives a positive, so safe to + * cast result into s64. + */ + dst_reg->smin_value = dst_reg->umin_value; + dst_reg->smax_value = dst_reg->umax_value; } + /* We may learn something more from the var_off */ + __update_reg_bounds(dst_reg); break; case BPF_LSH: - if (min_val < 0) { - /* LSH by a negative number is undefined */ + if (umax_val > 63) { + /* Shifts greater than 63 are undefined. This includes + * shifts by a negative number. + */ mark_reg_unknown(regs, insn->dst_reg); break; } - /* Gotta have special overflow logic here, if we're shifting - * more than MAX_RANGE then just assume we have an invalid - * range. + /* We lose all sign bit information (except what we can pick + * up from var_off) */ - if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) { - dst_reg->min_value = BPF_REGISTER_MIN_RANGE; - dst_reg->var_off = tnum_unknown; + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + /* If we might shift our top bit out, then we know nothing */ + if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { + dst_reg->umin_value = 0; + dst_reg->umax_value = U64_MAX; } else { - if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) - dst_reg->min_value <<= min_val; - if (src_known) - dst_reg->var_off = tnum_lshift(dst_reg->var_off, min_val); - else - dst_reg->var_off = tnum_lshift(tnum_unknown, min_val); + dst_reg->umin_value <<= umin_val; + dst_reg->umax_value <<= umax_val; } - if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) - dst_reg->max_value = BPF_REGISTER_MAX_RANGE; - else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) - dst_reg->max_value <<= max_val; + if (src_known) + dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); + else + dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); + /* We may learn something more from the var_off */ + __update_reg_bounds(dst_reg); break; case BPF_RSH: - if (min_val < 0) { - /* RSH by a negative number is undefined */ + if (umax_val > 63) { + /* Shifts greater than 63 are undefined. This includes + * shifts by a negative number. + */ mark_reg_unknown(regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift, so make the appropriate casts */ - if (dst_reg->min_value < 0) { - if (min_val) + if (dst_reg->smin_value < 0) { + if (umin_val) { /* Sign bit will be cleared */ - dst_reg->min_value = 0; + dst_reg->smin_value = 0; + } else { + /* Lost sign bit information */ + dst_reg->smin_value = S64_MIN; + dst_reg->smax_value = S64_MAX; + } } else { - dst_reg->min_value = - (u64)(dst_reg->min_value) >> min_val; + dst_reg->smin_value = + (u64)(dst_reg->smin_value) >> umax_val; } if (src_known) - dst_reg->var_off = tnum_rshift(dst_reg->var_off, min_val); + dst_reg->var_off = tnum_rshift(dst_reg->var_off, + umin_val); else - dst_reg->var_off = tnum_rshift(tnum_unknown, min_val); - if (dst_reg->max_value == BPF_REGISTER_MAX_RANGE) - dst_reg->max_value = ~0; - dst_reg->max_value >>= max_val; + dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); + dst_reg->umin_value >>= umax_val; + dst_reg->umax_value >>= umin_val; + /* We may learn something more from the var_off */ + __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(regs, insn->dst_reg); break; } - check_reg_overflow(dst_reg); + __reg_deduce_bounds(dst_reg); + __reg_bound_offset(dst_reg); return 0; } @@ -1981,14 +2134,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, int rc; dst_reg = ®s[insn->dst_reg]; - check_reg_overflow(dst_reg); src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = ®s[insn->src_reg]; - check_reg_overflow(src_reg); - if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields @@ -2035,11 +2185,8 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; - off_reg.var_off = tnum_const(insn->imm); - off_reg.min_value = insn->imm; - off_reg.max_value = insn->imm; + __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; - check_reg_overflow(src_reg); if (ptr_reg) { /* pointer += K */ rc = adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); @@ -2144,22 +2291,17 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EACCES; } mark_reg_unknown(regs, insn->dst_reg); - /* high 32 bits are known zero. But this is - * still out of range for max_value, so leave - * that. - */ + /* high 32 bits are known zero. */ regs[insn->dst_reg].var_off = tnum_cast( regs[insn->dst_reg].var_off, 4); + __update_reg_bounds(®s[insn->dst_reg]); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; - regs[insn->dst_reg].var_off = tnum_const(insn->imm); - regs[insn->dst_reg].max_value = insn->imm; - regs[insn->dst_reg].min_value = insn->imm; - regs[insn->dst_reg].id = 0; + __mark_reg_known(regs + insn->dst_reg, insn->imm); } } else if (opcode > BPF_END) { @@ -2226,8 +2368,8 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, /* This doesn't give us any range */ return; - if (dst_reg->max_value > MAX_PACKET_OFF || - dst_reg->max_value + dst_reg->off > MAX_PACKET_OFF) + if (dst_reg->umax_value > MAX_PACKET_OFF || + dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ @@ -2291,8 +2433,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { - bool value_from_signed = true; - /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. @@ -2307,62 +2447,45 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ - true_reg->max_value = true_reg->min_value = val; - true_reg->var_off = tnum_const(val); + __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ - false_reg->max_value = false_reg->min_value = val; - false_reg->var_off = tnum_const(val); + __mark_reg_known(false_reg, val); break; case BPF_JGT: - value_from_signed = false; - /* fallthrough */ + false_reg->umax_value = min(false_reg->umax_value, val); + true_reg->umin_value = max(true_reg->umin_value, val + 1); + break; case BPF_JSGT: - if (true_reg->value_from_signed != value_from_signed) - reset_reg_range_values(true_reg, 0); - if (false_reg->value_from_signed != value_from_signed) - reset_reg_range_values(false_reg, 0); - if (opcode == BPF_JGT) { - /* Unsigned comparison, the minimum value is 0. */ - false_reg->min_value = 0; - } - /* If this is false then we know the maximum val is val, - * otherwise we know the min val is val+1. - */ - false_reg->max_value = val; - false_reg->value_from_signed = value_from_signed; - true_reg->min_value = val + 1; - true_reg->value_from_signed = value_from_signed; + false_reg->smax_value = min_t(s64, false_reg->smax_value, val); + true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JGE: - value_from_signed = false; - /* fallthrough */ + false_reg->umax_value = min(false_reg->umax_value, val - 1); + true_reg->umin_value = max(true_reg->umin_value, val); + break; case BPF_JSGE: - if (true_reg->value_from_signed != value_from_signed) - reset_reg_range_values(true_reg, 0); - if (false_reg->value_from_signed != value_from_signed) - reset_reg_range_values(false_reg, 0); - if (opcode == BPF_JGE) { - /* Unsigned comparison, the minimum value is 0. */ - false_reg->min_value = 0; - } - /* If this is false then we know the maximum value is val - 1, - * otherwise we know the mimimum value is val. - */ - false_reg->max_value = val - 1; - false_reg->value_from_signed = value_from_signed; - true_reg->min_value = val; - true_reg->value_from_signed = value_from_signed; + false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); + true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; default: break; } - check_reg_overflow(false_reg); - check_reg_overflow(true_reg); + __reg_deduce_bounds(false_reg); + __reg_deduce_bounds(true_reg); + /* We might have learned some bits from the bounds. */ + __reg_bound_offset(false_reg); + __reg_bound_offset(true_reg); + /* Intersecting with the old var_off might have improved our bounds + * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), + * then new var_off is (0; 0x7f...fc) which improves our umax. + */ + __update_reg_bounds(false_reg); + __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is @@ -2372,8 +2495,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { - bool value_from_signed = true; - if (__is_pointer_value(false, false_reg)) return; @@ -2382,77 +2503,76 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ - true_reg->max_value = true_reg->min_value = val; - true_reg->var_off = tnum_const(val); + __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ - false_reg->max_value = false_reg->min_value = val; - false_reg->var_off = tnum_const(val); + __mark_reg_known(false_reg, val); break; case BPF_JGT: - value_from_signed = false; - /* fallthrough */ + true_reg->umax_value = min(true_reg->umax_value, val - 1); + false_reg->umin_value = max(false_reg->umin_value, val); + break; case BPF_JSGT: - if (true_reg->value_from_signed != value_from_signed) - reset_reg_range_values(true_reg, 0); - if (false_reg->value_from_signed != value_from_signed) - reset_reg_range_values(false_reg, 0); - if (opcode == BPF_JGT) { - /* Unsigned comparison, the minimum value is 0. */ - true_reg->min_value = 0; - } - /* - * If this is false, then the val is <= the register, if it is - * true the register <= to the val. - */ - false_reg->min_value = val; - false_reg->value_from_signed = value_from_signed; - true_reg->max_value = val - 1; - true_reg->value_from_signed = value_from_signed; + true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); + false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JGE: - value_from_signed = false; - /* fallthrough */ + true_reg->umax_value = min(true_reg->umax_value, val); + false_reg->umin_value = max(false_reg->umin_value, val + 1); + break; case BPF_JSGE: - if (true_reg->value_from_signed != value_from_signed) - reset_reg_range_values(true_reg, 0); - if (false_reg->value_from_signed != value_from_signed) - reset_reg_range_values(false_reg, 0); - if (opcode == BPF_JGE) { - /* Unsigned comparison, the minimum value is 0. */ - true_reg->min_value = 0; - } - /* If this is false then constant < register, if it is true then - * the register < constant. - */ - false_reg->min_value = val + 1; - false_reg->value_from_signed = value_from_signed; - true_reg->max_value = val; - true_reg->value_from_signed = value_from_signed; + true_reg->smax_value = min_t(s64, true_reg->smax_value, val); + false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; default: break; } - check_reg_overflow(false_reg); - check_reg_overflow(true_reg); + __reg_deduce_bounds(false_reg); + __reg_deduce_bounds(true_reg); + /* We might have learned some bits from the bounds. */ + __reg_bound_offset(false_reg); + __reg_bound_offset(true_reg); + /* Intersecting with the old var_off might have improved our bounds + * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), + * then new var_off is (0; 0x7f...fc) which improves our umax. + */ + __update_reg_bounds(false_reg); + __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { - src_reg->min_value = dst_reg->min_value = max(src_reg->min_value, - dst_reg->min_value); - src_reg->max_value = dst_reg->max_value = min(src_reg->max_value, - dst_reg->max_value); + src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, + dst_reg->umin_value); + src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, + dst_reg->umax_value); + src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, + dst_reg->smin_value); + src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, + dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); - check_reg_overflow(src_reg); - check_reg_overflow(dst_reg); + /* We might have learned new bounds from the var_off. */ + __update_reg_bounds(src_reg); + __update_reg_bounds(dst_reg); + /* We might have learned something about the sign bit. */ + __reg_deduce_bounds(src_reg); + __reg_deduce_bounds(dst_reg); + /* We might have learned some bits from the bounds. */ + __reg_bound_offset(src_reg); + __reg_bound_offset(dst_reg); + /* Intersecting with the old var_off might have improved our bounds + * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), + * then new var_off is (0; 0x7f...fc) which improves our umax. + */ + __update_reg_bounds(src_reg); + __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, @@ -2467,6 +2587,7 @@ static void reg_combine_min_max(struct bpf_reg_state *true_src, break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); + break; } } @@ -2480,11 +2601,11 @@ static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ - if (WARN_ON_ONCE(reg->min_value || reg->max_value || - reg->var_off.value || reg->var_off.mask || + if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || + !tnum_equals_const(reg->var_off, 0) || reg->off)) { - reg->min_value = reg->max_value = reg->off = 0; - reg->var_off = tnum_const(0); + __mark_reg_known_zero(reg); + reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; @@ -2676,11 +2797,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; - regs[insn->dst_reg].min_value = imm; - regs[insn->dst_reg].max_value = imm; - check_reg_overflow(®s[insn->dst_reg]); - regs[insn->dst_reg].var_off = tnum_const(imm); - regs[insn->dst_reg].id = 0; + __mark_reg_known(®s[insn->dst_reg], imm); return 0; } @@ -2968,8 +3085,10 @@ err_free: static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { - return old->min_value <= cur->min_value && - old->max_value >= cur->max_value; + return old->umin_value <= cur->umin_value && + old->umax_value >= cur->umax_value && + old->smin_value <= cur->smin_value && + old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ @@ -3032,8 +3151,10 @@ static bool regsafe(struct bpf_reg_state *rold, * equal, because we can't know anything about the * scalar value of the pointer in the new value. */ - return rold->min_value == BPF_REGISTER_MIN_RANGE && - rold->max_value == BPF_REGISTER_MAX_RANGE && + return rold->umin_value == 0 && + rold->umax_value == U64_MAX && + rold->smin_value == S64_MIN && + rold->smax_value == S64_MAX && tnum_is_unknown(rold->var_off); } case PTR_TO_MAP_VALUE: -- cgit v1.2.3-55-g7522 From 7d1238f21026e277936fff408b73bc19e89239a8 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:26:56 +0100 Subject: bpf/verifier: more concise register state logs for constant var_off Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 46 +++++++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7557800bf7a7..08a6fa0369c2 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -234,25 +234,33 @@ static void print_verifier_state(struct bpf_verifier_state *state) verbose(",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); - if (reg->smin_value != reg->umin_value && - reg->smin_value != S64_MIN) - verbose(",smin_value=%lld", - (long long)reg->smin_value); - if (reg->smax_value != reg->umax_value && - reg->smax_value != S64_MAX) - verbose(",smax_value=%lld", - (long long)reg->smax_value); - if (reg->umin_value != 0) - verbose(",umin_value=%llu", - (unsigned long long)reg->umin_value); - if (reg->umax_value != U64_MAX) - verbose(",umax_value=%llu", - (unsigned long long)reg->umax_value); - if (!tnum_is_unknown(reg->var_off)) { - char tn_buf[48]; - - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); - verbose(",var_off=%s", tn_buf); + if (tnum_is_const(reg->var_off)) { + /* Typically an immediate SCALAR_VALUE, but + * could be a pointer whose offset is too big + * for reg->off + */ + verbose(",imm=%llx", reg->var_off.value); + } else { + if (reg->smin_value != reg->umin_value && + reg->smin_value != S64_MIN) + verbose(",smin_value=%lld", + (long long)reg->smin_value); + if (reg->smax_value != reg->umax_value && + reg->smax_value != S64_MAX) + verbose(",smax_value=%lld", + (long long)reg->smax_value); + if (reg->umin_value != 0) + verbose(",umin_value=%llu", + (unsigned long long)reg->umin_value); + if (reg->umax_value != U64_MAX) + verbose(",umax_value=%llu", + (unsigned long long)reg->umax_value); + if (!tnum_is_unknown(reg->var_off)) { + char tn_buf[48]; + + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); + verbose(",var_off=%s", tn_buf); + } } verbose(")"); } -- cgit v1.2.3-55-g7522 From f65b18493f4f13e8ff38425f22f9b2c7bc435197 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:27:12 +0100 Subject: selftests/bpf: change test_verifier expectations Some of the verifier's error messages have changed, and some constructs that previously couldn't be verified are now accepted. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 332 +++++++++++++--------------- 1 file changed, 152 insertions(+), 180 deletions(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index addea82f76c9..06914941f376 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -421,7 +421,7 @@ static struct bpf_test tests[] = { BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R1 pointer arithmetic", + .errstr_unpriv = "R1 subtraction from stack pointer", .result_unpriv = REJECT, .errstr = "R1 invalid mem access", .result = REJECT, @@ -603,8 +603,9 @@ static struct bpf_test tests[] = { BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned stack access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "invalid map_fd for function call", @@ -650,8 +651,9 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr = "misaligned access", + .errstr = "misaligned value access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "sometimes access memory with incorrect alignment", @@ -672,6 +674,7 @@ static struct bpf_test tests[] = { .errstr = "R0 invalid mem access", .errstr_unpriv = "R0 leaks addr", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "jump test 1", @@ -1215,8 +1218,9 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, cb[0]) + 1), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned context access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "check __sk_buff->hash, offset 0, half store not permitted", @@ -1319,8 +1323,9 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, cb[0]) + 2), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned context access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "check cb access: word, unaligned 2", @@ -1330,8 +1335,9 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, cb[4]) + 1), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned context access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "check cb access: word, unaligned 3", @@ -1341,8 +1347,9 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, cb[4]) + 2), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned context access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "check cb access: word, unaligned 4", @@ -1352,8 +1359,9 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, cb[4]) + 3), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned context access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "check cb access: double", @@ -1379,8 +1387,9 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, cb[1])), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned context access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "check cb access: double, unaligned 2", @@ -1390,8 +1399,9 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, cb[3])), BPF_EXIT_INSN(), }, - .errstr = "misaligned access", + .errstr = "misaligned context access", .result = REJECT, + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "check cb access: double, oob 1", @@ -1523,7 +1533,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "misaligned access off -6 size 8", + .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "PTR_TO_STACK store/load - bad alignment on reg", @@ -1535,7 +1546,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "misaligned access off -2 size 8", + .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", + .flags = F_LOAD_WITH_STRICT_ALIGNMENT, }, { "PTR_TO_STACK store/load - out of bounds low", @@ -1579,8 +1591,6 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = ACCEPT, - .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer arithmetic", }, { "unpriv: add pointer to pointer", @@ -1591,7 +1601,7 @@ static struct bpf_test tests[] = { }, .result = ACCEPT, .result_unpriv = REJECT, - .errstr_unpriv = "R1 pointer arithmetic", + .errstr_unpriv = "R1 pointer += pointer", }, { "unpriv: neg pointer", @@ -1932,10 +1942,7 @@ static struct bpf_test tests[] = { BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), BPF_EXIT_INSN(), }, - .errstr_unpriv = "pointer arithmetic prohibited", - .result_unpriv = REJECT, - .errstr = "R1 invalid mem access", - .result = REJECT, + .result = ACCEPT, }, { "unpriv: cmp of stack pointer", @@ -1999,7 +2006,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "invalid stack type R3", + .errstr = "R4 min value is negative", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { @@ -2016,7 +2023,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "invalid stack type R3", + .errstr = "R4 min value is negative", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { @@ -2218,7 +2225,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "invalid stack type R3 off=-1 access_size=-1", + .errstr = "R4 min value is negative", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { @@ -2235,7 +2242,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "invalid stack type R3 off=-1 access_size=2147483647", + .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { @@ -2252,7 +2259,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "invalid stack type R3 off=-512 access_size=2147483647", + .errstr = "R4 unbounded memory access, use 'var &= const' or 'if (var < const)'", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { @@ -2652,7 +2659,7 @@ static struct bpf_test tests[] = { BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1), BPF_JMP_A(-6), }, - .errstr = "misaligned packet access off 2+15+-4 size 4", + .errstr = "misaligned packet access off 2+(0x0; 0x0)+15+-4 size 4", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .flags = F_LOAD_WITH_STRICT_ALIGNMENT, @@ -2795,7 +2802,7 @@ static struct bpf_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "cannot add integer value with 47 upper zero bits to ptr_to_packet", + .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)", }, { "direct packet access: test24 (x += pkt_ptr, 5)", @@ -3112,7 +3119,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to packet: test14, cls helper fail sub", + "helper access to packet: test14, cls helper ok sub", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -3132,12 +3139,36 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "helper access to packet: test15, cls helper fail sub", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_7, 6), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 12), + BPF_MOV64_IMM(BPF_REG_2, 4), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_csum_diff), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, .result = REJECT, - .errstr = "type=inv expected=fp", + .errstr = "invalid access to packet", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to packet: test15, cls helper fail range 1", + "helper access to packet: test16, cls helper fail range 1", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -3162,7 +3193,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to packet: test16, cls helper fail range 2", + "helper access to packet: test17, cls helper fail range 2", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -3183,11 +3214,11 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "invalid access to packet", + .errstr = "R2 min value is negative", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to packet: test17, cls helper fail range 3", + "helper access to packet: test18, cls helper fail range 3", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -3208,11 +3239,11 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .result = REJECT, - .errstr = "invalid access to packet", + .errstr = "R2 min value is negative", .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to packet: test18, cls helper fail range zero", + "helper access to packet: test19, cls helper fail range zero", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -3237,7 +3268,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to packet: test19, pkt end as input", + "helper access to packet: test20, pkt end as input", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -3262,7 +3293,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to packet: test20, wrong reg", + "helper access to packet: test21, wrong reg", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -3322,7 +3353,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .result_unpriv = REJECT, .result = ACCEPT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -3346,7 +3377,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .result_unpriv = REJECT, .result = ACCEPT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -3374,7 +3405,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .result_unpriv = REJECT, .result = ACCEPT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -3415,9 +3446,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is outside of the array range", - .result_unpriv = REJECT, .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -3439,9 +3468,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", - .result_unpriv = REJECT, + .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map", .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -3455,7 +3482,7 @@ static struct bpf_test tests[] = { BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), BPF_MOV32_IMM(BPF_REG_1, 0), @@ -3466,8 +3493,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .errstr_unpriv = "R0 leaks addr", + .errstr = "R0 unbounded memory access", .result_unpriv = REJECT, .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -3493,7 +3520,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .errstr = "invalid access to map value, value_size=48 off=44 size=8", .result_unpriv = REJECT, .result = REJECT, @@ -3523,8 +3550,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3, 11 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .errstr_unpriv = "R0 pointer += pointer", + .errstr = "R0 invalid mem access 'inv'", .result_unpriv = REJECT, .result = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -3665,34 +3692,6 @@ static struct bpf_test tests[] = { .result = ACCEPT, .prog_type = BPF_PROG_TYPE_SCHED_CLS }, - { - "multiple registers share map_lookup_elem bad reg type", - .insns = { - BPF_MOV64_IMM(BPF_REG_1, 10), - BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_1, -8), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_3, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), - BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 1), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), - BPF_MOV64_IMM(BPF_REG_1, 2), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1), - BPF_ST_MEM(BPF_DW, BPF_REG_3, 0, 0), - BPF_MOV64_IMM(BPF_REG_1, 3), - BPF_EXIT_INSN(), - }, - .fixup_map1 = { 4 }, - .result = REJECT, - .errstr = "R3 invalid mem access 'inv'", - .prog_type = BPF_PROG_TYPE_SCHED_CLS - }, { "invalid map access from else condition", .insns = { @@ -3711,9 +3710,9 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map", + .errstr = "R0 unbounded memory access", .result = REJECT, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, @@ -4091,7 +4090,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=0 size=-8", + .errstr = "R2 min value is negative", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4157,7 +4156,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "R1 min value is outside of the array range", + .errstr = "invalid access to map value, value_size=48 off=4 size=0", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4203,7 +4202,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=-8", + .errstr = "R2 min value is negative", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4225,7 +4224,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "R1 min value is outside of the array range", + .errstr = "R2 min value is negative", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4341,7 +4340,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "invalid access to map value, value_size=48 off=4 size=-8", + .errstr = "R2 min value is negative", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4364,7 +4363,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "R1 min value is outside of the array range", + .errstr = "R2 min value is negative", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4452,13 +4451,13 @@ static struct bpf_test tests[] = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), BPF_MOV64_IMM(BPF_REG_3, 0), BPF_EMIT_CALL(BPF_FUNC_probe_read), BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "R1 min value is negative, either use unsigned index or do a if (index >=0) check", + .errstr = "R1 unbounded memory access", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4578,7 +4577,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .result = ACCEPT, .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -4606,7 +4605,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .result = ACCEPT, .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -4625,7 +4624,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 bitwise operator &= on pointer", .errstr = "invalid mem access 'inv'", .result = REJECT, .result_unpriv = REJECT, @@ -4644,7 +4643,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", .errstr = "invalid mem access 'inv'", .result = REJECT, .result_unpriv = REJECT, @@ -4663,7 +4662,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 pointer arithmetic with /= operator", .errstr = "invalid mem access 'inv'", .result = REJECT, .result_unpriv = REJECT, @@ -4706,10 +4705,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 invalid mem access 'inv'", .errstr = "R0 invalid mem access 'inv'", .result = REJECT, - .result_unpriv = REJECT, }, { "map element value is preserved across register spilling", @@ -4731,7 +4728,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 leaks addr", .result = ACCEPT, .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, @@ -4913,7 +4910,8 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr = "R2 unbounded memory access", + /* because max wasn't checked, signed min is negative */ + .errstr = "R2 min value is negative, either use unsigned or 'var &= const'", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -5061,6 +5059,20 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, + { + "helper access to variable memory: size = 0 allowed on NULL", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_4, 0), + BPF_MOV64_IMM(BPF_REG_5, 0), + BPF_EMIT_CALL(BPF_FUNC_csum_diff), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "helper access to variable memory: size > 0 not allowed on NULL", .insns = { @@ -5075,7 +5087,7 @@ static struct bpf_test tests[] = { BPF_EMIT_CALL(BPF_FUNC_csum_diff), BPF_EXIT_INSN(), }, - .errstr = "R1 type=imm expected=fp", + .errstr = "R1 type=inv expected=fp", .result = REJECT, .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, @@ -5160,7 +5172,7 @@ static struct bpf_test tests[] = { BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 6), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4), BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), @@ -5169,10 +5181,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .errstr = "R0 max value is outside of the array range", .result = REJECT, - .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { @@ -5201,10 +5211,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", + .errstr = "R0 max value is outside of the array range", .result = REJECT, - .result_unpriv = REJECT, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { @@ -5251,7 +5259,7 @@ static struct bpf_test tests[] = { }, .fixup_map_in_map = { 3 }, .errstr = "R1 type=inv expected=map_ptr", - .errstr_unpriv = "R1 pointer arithmetic prohibited", + .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited", .result = REJECT, }, { @@ -5531,10 +5539,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { "bounds checks mixing signed and unsigned", @@ -5557,10 +5563,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { "bounds checks mixing signed and unsigned, variant 2", @@ -5585,10 +5589,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R8 invalid mem access 'inv'", .result = REJECT, - .result_unpriv = REJECT, }, { "bounds checks mixing signed and unsigned, variant 3", @@ -5612,10 +5614,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R8 invalid mem access 'inv'", .result = REJECT, - .result_unpriv = REJECT, }, { "bounds checks mixing signed and unsigned, variant 4", @@ -5638,10 +5638,7 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative", - .result = REJECT, - .result_unpriv = REJECT, + .result = ACCEPT, }, { "bounds checks mixing signed and unsigned, variant 5", @@ -5665,10 +5662,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 invalid mem access", + .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { "bounds checks mixing signed and unsigned, variant 6", @@ -5689,10 +5684,8 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, - .errstr_unpriv = "R4 min value is negative, either use unsigned", .errstr = "R4 min value is negative, either use unsigned", .result = REJECT, - .result_unpriv = REJECT, }, { "bounds checks mixing signed and unsigned, variant 7", @@ -5715,39 +5708,10 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative", - .result = REJECT, - .result_unpriv = REJECT, + .result = ACCEPT, }, { "bounds checks mixing signed and unsigned, variant 8", - .insns = { - BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), - BPF_LD_MAP_FD(BPF_REG_1, 0), - BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), - BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8), - BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16), - BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024 + 1), - BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3), - BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2), - BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), - BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0), - BPF_MOV64_IMM(BPF_REG_0, 0), - BPF_EXIT_INSN(), - }, - .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative", - .result = REJECT, - .result_unpriv = REJECT, - }, - { - "bounds checks mixing signed and unsigned, variant 9", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5769,13 +5733,11 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { - "bounds checks mixing signed and unsigned, variant 10", + "bounds checks mixing signed and unsigned, variant 9", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5797,13 +5759,10 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", - .errstr = "R0 min value is negative", - .result = REJECT, - .result_unpriv = REJECT, + .result = ACCEPT, }, { - "bounds checks mixing signed and unsigned, variant 11", + "bounds checks mixing signed and unsigned, variant 10", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5825,13 +5784,11 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { - "bounds checks mixing signed and unsigned, variant 12", + "bounds checks mixing signed and unsigned, variant 11", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5854,13 +5811,11 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { - "bounds checks mixing signed and unsigned, variant 13", + "bounds checks mixing signed and unsigned, variant 12", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5882,13 +5837,11 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { - "bounds checks mixing signed and unsigned, variant 14", + "bounds checks mixing signed and unsigned, variant 13", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5913,13 +5866,11 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { - "bounds checks mixing signed and unsigned, variant 15", + "bounds checks mixing signed and unsigned, variant 14", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1, offsetof(struct __sk_buff, mark)), @@ -5945,13 +5896,11 @@ static struct bpf_test tests[] = { BPF_JMP_IMM(BPF_JA, 0, 0, -7), }, .fixup_map1 = { 4 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", .errstr = "R0 min value is negative", .result = REJECT, - .result_unpriv = REJECT, }, { - "bounds checks mixing signed and unsigned, variant 16", + "bounds checks mixing signed and unsigned, variant 15", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5975,13 +5924,13 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr_unpriv = "R0 pointer comparison prohibited", .errstr = "R0 min value is negative", .result = REJECT, .result_unpriv = REJECT, }, { - "subtraction bounds (map value)", + "subtraction bounds (map value) variant 1", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -6003,10 +5952,33 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), }, .fixup_map1 = { 3 }, - .errstr_unpriv = "R0 pointer arithmetic prohibited", + .errstr = "R0 max value is outside of the array range", + .result = REJECT, + }, + { + "subtraction bounds (map value) variant 2", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0xff, 6), + BPF_LDX_MEM(BPF_B, BPF_REG_3, BPF_REG_0, 1), + BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 0xff, 4), + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result = REJECT, - .result_unpriv = REJECT, }, }; -- cgit v1.2.3-55-g7522 From 9fafa80513d922ed80c2067a3ea636696fc14c61 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:27:34 +0100 Subject: selftests/bpf: rewrite test_align Expectations have changed, as has the format of the logged state. To make the tests easier to read, add a line-matching framework so that each match need only quote the register it cares about. (Multiple matches may refer to the same line, but matches must be listed in order of increasing line.) Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_align.c | 225 ++++++++++++++++++------------- 1 file changed, 132 insertions(+), 93 deletions(-) diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index 29793694cbc7..62232e4d0332 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -27,6 +27,11 @@ #define MAX_INSNS 512 #define MAX_MATCHES 16 +struct bpf_reg_match { + unsigned int line; + const char *match; +}; + struct bpf_align_test { const char *descr; struct bpf_insn insns[MAX_INSNS]; @@ -36,10 +41,14 @@ struct bpf_align_test { REJECT } result; enum bpf_prog_type prog_type; - const char *matches[MAX_MATCHES]; + /* Matches must be in order of increasing line */ + struct bpf_reg_match matches[MAX_MATCHES]; }; static struct bpf_align_test tests[] = { + /* Four tests of known constants. These aren't staggeringly + * interesting since we track exact values now. + */ { .descr = "mov", .insns = { @@ -53,11 +62,13 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - "1: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", - "2: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", - "3: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", - "4: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", - "5: R1=ctx R3=imm32,min_value=32,max_value=32,min_align=32 R10=fp", + {1, "R1=ctx(id=0,off=0,imm=0)"}, + {1, "R10=fp0"}, + {1, "R3=inv2"}, + {2, "R3=inv4"}, + {3, "R3=inv8"}, + {4, "R3=inv16"}, + {5, "R3=inv32"}, }, }, { @@ -79,17 +90,19 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - "1: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", - "2: R1=ctx R3=imm2,min_value=2,max_value=2,min_align=2 R10=fp", - "3: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", - "4: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=8 R10=fp", - "5: R1=ctx R3=imm16,min_value=16,max_value=16,min_align=16 R10=fp", - "6: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R10=fp", - "7: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm32,min_value=32,max_value=32,min_align=32 R10=fp", - "8: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm16,min_value=16,max_value=16,min_align=16 R10=fp", - "9: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", - "10: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm4,min_value=4,max_value=4,min_align=4 R10=fp", - "11: R1=ctx R3=imm1,min_value=1,max_value=1,min_align=1 R4=imm2,min_value=2,max_value=2,min_align=2 R10=fp", + {1, "R1=ctx(id=0,off=0,imm=0)"}, + {1, "R10=fp0"}, + {1, "R3=inv1"}, + {2, "R3=inv2"}, + {3, "R3=inv4"}, + {4, "R3=inv8"}, + {5, "R3=inv16"}, + {6, "R3=inv1"}, + {7, "R4=inv32"}, + {8, "R4=inv16"}, + {9, "R4=inv8"}, + {10, "R4=inv4"}, + {11, "R4=inv2"}, }, }, { @@ -106,12 +119,14 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - "1: R1=ctx R3=imm4,min_value=4,max_value=4,min_align=4 R10=fp", - "2: R1=ctx R3=imm8,min_value=8,max_value=8,min_align=4 R10=fp", - "3: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R10=fp", - "4: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm8,min_value=8,max_value=8,min_align=8 R10=fp", - "5: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm12,min_value=12,max_value=12,min_align=4 R10=fp", - "6: R1=ctx R3=imm10,min_value=10,max_value=10,min_align=2 R4=imm14,min_value=14,max_value=14,min_align=2 R10=fp", + {1, "R1=ctx(id=0,off=0,imm=0)"}, + {1, "R10=fp0"}, + {1, "R3=inv4"}, + {2, "R3=inv8"}, + {3, "R3=inv10"}, + {4, "R4=inv8"}, + {5, "R4=inv12"}, + {6, "R4=inv14"}, }, }, { @@ -126,13 +141,16 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - "1: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", - "2: R1=ctx R3=imm7,min_value=7,max_value=7,min_align=1 R10=fp", - "3: R1=ctx R3=imm14,min_value=14,max_value=14,min_align=2 R10=fp", - "4: R1=ctx R3=imm56,min_value=56,max_value=56,min_align=4 R10=fp", + {1, "R1=ctx(id=0,off=0,imm=0)"}, + {1, "R10=fp0"}, + {1, "R3=inv7"}, + {2, "R3=inv7"}, + {3, "R3=inv14"}, + {4, "R3=inv56"}, }, }, + /* Tests using unknown values */ #define PREP_PKT_POINTERS \ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \ offsetof(struct __sk_buff, data)), \ @@ -166,17 +184,19 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", - "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv55,min_align=2 R10=fp", - "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv54,min_align=4 R10=fp", - "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv53,min_align=8 R10=fp", - "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv52,min_align=16 R10=fp", - "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv56 R10=fp", - "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv51,min_align=32 R10=fp", - "20: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv52,min_align=16 R10=fp", - "21: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv53,min_align=8 R10=fp", - "22: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv54,min_align=4 R10=fp", - "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv55,min_align=2 R10=fp", + {7, "R0=pkt(id=0,off=8,r=8,imm=0)"}, + {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {8, "R3=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, + {9, "R3=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {10, "R3=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, + {11, "R3=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, + {18, "R3=pkt_end(id=0,off=0,imm=0)"}, + {18, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {19, "R4=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"}, + {20, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, + {21, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, + {22, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {23, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, }, }, { @@ -197,16 +217,16 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - "7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R10=fp", - "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", - "9: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv55,min_align=1 R10=fp", - "10: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", - "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv54,min_align=2 R10=fp", - "12: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", - "13: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv53,min_align=4 R10=fp", - "14: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv56 R10=fp", - "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv52,min_align=8 R10=fp", - "16: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=inv56 R4=inv50,min_align=8 R10=fp" + {7, "R3=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {8, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {9, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {11, "R4=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"}, + {12, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {13, "R4=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {14, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {15, "R4=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"}, + {16, "R4=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"}, }, }, { @@ -237,12 +257,14 @@ static struct bpf_align_test tests[] = { }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .matches = { - "4: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=0,r=0) R10=fp", - "5: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R5=pkt(id=0,off=14,r=0) R10=fp", - "6: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R4=pkt(id=0,off=14,r=0) R5=pkt(id=0,off=14,r=0) R10=fp", - "10: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv56 R5=pkt(id=0,off=14,r=18) R10=fp", - "14: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", - "15: R0=imm0,min_value=0,max_value=0,min_align=2147483648 R1=ctx R2=pkt(id=0,off=0,r=18) R3=pkt_end R4=inv48 R5=pkt(id=0,off=14,r=18) R10=fp", + {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, + {5, "R5=pkt(id=0,off=14,r=0,imm=0)"}, + {6, "R4=pkt(id=0,off=14,r=0,imm=0)"}, + {10, "R2=pkt(id=0,off=0,r=18,imm=0)"}, + {10, "R5=pkt(id=0,off=14,r=18,imm=0)"}, + {10, "R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"}, + {14, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, + {15, "R4=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"}, }, }, { @@ -297,62 +319,59 @@ static struct bpf_align_test tests[] = { /* Calculated offset in R6 has unknown value, but known * alignment of 4. */ - "8: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R6=inv54,min_align=4 R10=fp", - - /* Offset is added to packet pointer R5, resulting in known - * auxiliary alignment and offset. + {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, + {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* Offset is added to packet pointer R5, resulting in + * known fixed offset, and variable offset from R6. */ - "11: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R5=pkt(id=1,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", - + {11, "R5=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * it's total offset is NET_IP_ALIGN + reg->off (0) + * reg->aux_off (14) which is 16. Then the variable * offset is considered using reg->aux_off_align which * is 4 and meets the load's requirements. */ - "15: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=1,off=4,r=4),aux_off=14,aux_off_align=4 R5=pkt(id=1,off=0,r=4),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", - - + {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, * resulting in auxiliary alignment of 4. */ - "18: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=0,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", - + {18, "R5=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - "19: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off=14,aux_off_align=4 R5=pkt(id=2,off=14,r=0),aux_off_align=4 R6=inv54,min_align=4 R10=fp", - + {19, "R5=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, - * it's total offset is NET_IP_ALIGN + reg->off (14) which - * is 16. Then the variable offset is considered using - * reg->aux_off_align which is 4 and meets the load's - * requirements. + * its total fixed offset is NET_IP_ALIGN + reg->off + * (14) which is 16. Then the variable offset is 4-byte + * aligned, so the total offset is 4-byte aligned and + * meets the load's requirements. */ - "23: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=2,off=18,r=18),aux_off_align=4 R5=pkt(id=2,off=14,r=18),aux_off_align=4 R6=inv54,min_align=4 R10=fp", - + {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - "26: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=0,off=14,r=8) R6=inv54,min_align=4 R10=fp", - /* Variable offset is added to R5, resulting in an - * auxiliary offset of 14, and an auxiliary alignment of 4. + {26, "R5=pkt(id=0,off=14,r=8"}, + /* Variable offset is added to R5, resulting in a + * variable offset of (4n). */ - "27: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=0,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", - /* Constant is added to R5 again, setting reg->off to 4. */ - "28: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=3,off=4,r=0),aux_off=14,aux_off_align=4 R6=inv54,min_align=4 R10=fp", - /* And once more we add a variable, which causes an accumulation - * of reg->off into reg->aux_off_align, with resulting value of - * 18. The auxiliary alignment stays at 4. + {27, "R5=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* Constant is added to R5 again, setting reg->off to 18. */ + {28, "R5=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* And once more we add a variable; resulting var_off + * is still (4n), fixed offset is not changed. + * Also, we create a new reg->id. */ - "29: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=inv,aux_off_align=4 R5=pkt(id=4,off=0,r=0),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + {29, "R5=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"}, /* At the time the word size load is performed from R5, - * it's total offset is NET_IP_ALIGN + reg->off (0) + - * reg->aux_off (18) which is 20. Then the variable offset - * is considered using reg->aux_off_align which is 4 and meets - * the load's requirements. + * its total fixed offset is NET_IP_ALIGN + reg->off (18) + * which is 20. Then the variable offset is (4n), so + * the total offset is 4-byte aligned and meets the + * load's requirements. */ - "33: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R4=pkt(id=4,off=4,r=4),aux_off=18,aux_off_align=4 R5=pkt(id=4,off=0,r=4),aux_off=18,aux_off_align=4 R6=inv54,min_align=4 R10=fp", + {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, + {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, }, }, }; @@ -373,6 +392,9 @@ static int do_test_single(struct bpf_align_test *test) { struct bpf_insn *prog = test->insns; int prog_type = test->prog_type; + char bpf_vlog_copy[32768]; + const char *line_ptr; + int cur_line = -1; int prog_len, i; int fd_prog; int ret; @@ -387,14 +409,31 @@ static int do_test_single(struct bpf_align_test *test) ret = 1; } else { ret = 0; + /* We make a local copy so that we can strtok() it */ + strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy)); + line_ptr = strtok(bpf_vlog_copy, "\n"); for (i = 0; i < MAX_MATCHES; i++) { - const char *t, *m = test->matches[i]; + struct bpf_reg_match m = test->matches[i]; - if (!m) + if (!m.match) break; - t = strstr(bpf_vlog, m); - if (!t) { - printf("Failed to find match: %s\n", m); + while (line_ptr) { + cur_line = -1; + sscanf(line_ptr, "%u: ", &cur_line); + if (cur_line == m.line) + break; + line_ptr = strtok(NULL, "\n"); + } + if (!line_ptr) { + printf("Failed to find line %u for match: %s\n", + m.line, m.match); + ret = 1; + printf("%s", bpf_vlog); + break; + } + if (!strstr(line_ptr, m.match)) { + printf("Failed to find match %u: %s\n", + m.line, m.match); ret = 1; printf("%s", bpf_vlog); break; -- cgit v1.2.3-55-g7522 From 715dddb5e640bd24ce46dc7885c9657038ad1e23 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:28:00 +0100 Subject: selftests/bpf: add a test to test_align New test adds 14 to the unknown value before adding to the packet pointer, meaning there's no 'fixed offset' field and instead we add into the var_off, yielding a '4n+2' value. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_align.c | 67 ++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index 62232e4d0332..74cc4a6b8ed0 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -374,6 +374,73 @@ static struct bpf_align_test tests[] = { {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"}, }, }, + { + .descr = "packet variable offset 2", + .insns = { + /* Create an unknown offset, (4n+2)-aligned */ + LOAD_UNKNOWN(BPF_REG_6), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), + /* Add it to the packet pointer */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + /* Make a (4n) offset from the value we just read */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + /* Add it to the packet pointer */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + {8, "R2=pkt(id=0,off=0,r=8,imm=0)"}, + {8, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* Adding 14 makes R6 be (4n+2) */ + {9, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + /* Packet pointer has (4n+2) offset */ + {11, "R5=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + /* Newly read value in R6 was shifted left by 2, so has + * known alignment of 4. + */ + {18, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* Added (4n) to packet pointer's (4n+2) var_off, giving + * another (4n+2). + */ + {19, "R5=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, + {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, + }, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3-55-g7522 From c2c3e11712e23d430a49e1247a8ec211740c2254 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:28:45 +0100 Subject: selftests/bpf: add test for bogus operations on pointers Tests non-add/sub operations (AND, LSH) on pointers decaying them to unknown scalars. Also tests that a pkt_ptr add which could potentially overflow is rejected (find_good_pkt_pointers ignores it and doesn't give us any reg->range). Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_align.c | 66 +++++++++++++++++++++++++++++++- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index 74cc4a6b8ed0..b0816830a937 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -441,6 +441,62 @@ static struct bpf_align_test tests[] = { {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"}, }, }, + { + .descr = "dubious pointer arithmetic", + .insns = { + PREP_PKT_POINTERS, + BPF_MOV64_IMM(BPF_REG_0, 0), + /* ptr & const => unknown & const */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40), + /* ptr << const => unknown << const */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2), + /* We have a (4n) value. Let's make a packet offset + * out of it. First add 14, to make it a (4n+2) + */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), + /* Then make sure it's nonnegative */ + BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1), + BPF_EXIT_INSN(), + /* Add it to packet pointer */ + BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_6), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .result = REJECT, + .matches = { + {4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, + /* ptr & 0x40 == either 0 or 0x40 */ + {5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"}, + /* ptr << 2 == unknown, (4n) */ + {7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"}, + /* (4n) + 14 == (4n+2). We blow our bounds, because + * the add could overflow. + */ + {8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"}, + /* Checked s>=0 */ + {10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, + /* packet pointer + nonnegative (4n+2) */ + {12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, + {14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, + /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. + * We checked the bounds, but it might have been able + * to overflow if the packet pointer started in the + * upper half of the address space. + * So we did not get a 'range' on R6, and the access + * attempt will fail. + */ + {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, + } + }, }; static int probe_filter_length(const struct bpf_insn *fp) @@ -470,10 +526,15 @@ static int do_test_single(struct bpf_align_test *test) fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, prog, prog_len, 1, "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2); - if (fd_prog < 0) { + if (fd_prog < 0 && test->result != REJECT) { printf("Failed to load program.\n"); printf("%s", bpf_vlog); ret = 1; + } else if (fd_prog >= 0 && test->result == REJECT) { + printf("Unexpected success to load!\n"); + printf("%s", bpf_vlog); + ret = 1; + close(fd_prog); } else { ret = 0; /* We make a local copy so that we can strtok() it */ @@ -506,7 +567,8 @@ static int do_test_single(struct bpf_align_test *test) break; } } - close(fd_prog); + if (fd_prog >= 0) + close(fd_prog); } return ret; } -- cgit v1.2.3-55-g7522 From 1f9ab38f8a155913c9a587a673e61eedb75c9bc8 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:29:11 +0100 Subject: selftests/bpf: don't try to access past MAX_PACKET_OFF in test_verifier A number of selftests fell foul of the changed MAX_PACKET_OFF handling. For instance, "direct packet access: test2" was potentially reading four bytes from pkt + 0xffff, which could take it past the verifier's limit, causing the program to be rejected (checks against pkt_end didn't give us any reg->range). Increase the shifts by one so that R2 is now mask 0x7fff instead of mask 0xffff. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 06914941f376..876b8785fd83 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -2330,8 +2330,8 @@ static struct bpf_test tests[] = { offsetof(struct __sk_buff, data)), BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 48), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 48), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), BPF_MOV64_REG(BPF_REG_2, BPF_REG_3), BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 8), @@ -2710,11 +2710,11 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_0, 0xffffffff), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xffff), + BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0x7fff), BPF_MOV64_REG(BPF_REG_4, BPF_REG_0), BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_0, 0), @@ -2736,10 +2736,10 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -8), BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0xffff), + BPF_ALU64_IMM(BPF_AND, BPF_REG_4, 0x7fff), BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), BPF_MOV64_REG(BPF_REG_5, BPF_REG_4), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0xffff - 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 0x7fff - 1), BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_3, 1), BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_4, 0), BPF_MOV64_IMM(BPF_REG_0, 0), @@ -2765,7 +2765,7 @@ static struct bpf_test tests[] = { BPF_MOV64_IMM(BPF_REG_4, 0xffffffff), BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_4, -8), BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 48), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 49), BPF_ALU64_REG(BPF_ADD, BPF_REG_4, BPF_REG_2), BPF_MOV64_REG(BPF_REG_0, BPF_REG_4), BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), @@ -2820,7 +2820,7 @@ static struct bpf_test tests[] = { BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), BPF_MOV64_REG(BPF_REG_5, BPF_REG_0), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0xffff - 1), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7fff - 1), BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), BPF_STX_MEM(BPF_DW, BPF_REG_5, BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0), -- cgit v1.2.3-55-g7522 From f999d64c346c0154e7ed4beb0eba7d2eed422a34 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:29:34 +0100 Subject: selftests/bpf: add tests for subtraction & negative numbers Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_align.c | 104 +++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c index b0816830a937..8591c89c0828 100644 --- a/tools/testing/selftests/bpf/test_align.c +++ b/tools/testing/selftests/bpf/test_align.c @@ -497,6 +497,110 @@ static struct bpf_align_test tests[] = { {16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, } }, + { + .descr = "variable subtraction", + .insns = { + /* Create an unknown offset, (4n+2)-aligned */ + LOAD_UNKNOWN(BPF_REG_6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), + /* Create another unknown, (4n)-aligned, and subtract + * it from the first one + */ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), + BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7), + /* Bounds-check the result */ + BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1), + BPF_EXIT_INSN(), + /* Add it to the packet pointer */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, + {9, "R6=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* Adding 14 makes R6 be (4n+2) */ + {10, "R6=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"}, + /* New unknown value in R7 is (4n) */ + {11, "R7=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + /* Subtracting it from R6 blows our unsigned bounds */ + {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"}, + /* Checked s>= 0 */ + {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"}, + }, + }, + { + .descr = "pointer variable subtraction", + .insns = { + /* Create an unknown offset, (4n+2)-aligned and bounded + * to [14,74] + */ + LOAD_UNKNOWN(BPF_REG_6), + BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), + BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14), + /* Subtract it from the packet pointer */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), + BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6), + /* Create another unknown, (4n)-aligned and >= 74. + * That in fact means >= 76, since 74 % 4 == 2 + */ + BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76), + /* Add it to the packet pointer */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7), + /* Check bounds and perform a read */ + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + .matches = { + /* Calculated offset in R6 has unknown value, but known + * alignment of 4. + */ + {7, "R2=pkt(id=0,off=0,r=8,imm=0)"}, + {10, "R6=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"}, + /* Adding 14 makes R6 be (4n+2) */ + {11, "R6=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"}, + /* Subtracting from packet pointer overflows ubounds */ + {13, "R5=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"}, + /* New unknown value in R7 is (4n), >= 76 */ + {15, "R7=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"}, + /* Adding it to packet pointer gives nice bounds again */ + {16, "R5=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, + /* At the time the word size load is performed from R5, + * its total fixed offset is NET_IP_ALIGN + reg->off (0) + * which is 2. Then the variable offset is (4n+2), so + * the total offset is 4-byte aligned and meets the + * load's requirements. + */ + {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"}, + }, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3-55-g7522 From 69c4e8ada616ce5ad4e37d6acca851d648dbdfa9 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:29:51 +0100 Subject: selftests/bpf: variable offset negative tests Variable ctx accesses and stack accesses aren't allowed, because we can't determine what type of value will be read. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 41 +++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 876b8785fd83..65aa562cff87 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -5980,6 +5980,47 @@ static struct bpf_test tests[] = { .errstr = "R0 min value is negative, either use unsigned index or do a if (index >=0) check.", .result = REJECT, }, + { + "variable-offset ctx access", + .insns = { + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + /* add it to skb. We now have either &skb->len or + * &skb->pkt_type, but we don't know which + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2), + /* dereference it */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), + BPF_EXIT_INSN(), + }, + .errstr = "variable ctx access var_off=(0x0; 0x4)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, + { + "variable-offset stack access", + .insns = { + /* Fill the top 8 bytes of the stack */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* Make it small and 4-byte aligned */ + BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4), + BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8), + /* add it to fp. We now have either fp-4 or fp-8, but + * we don't know which + */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10), + /* dereference it */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_EXIT_INSN(), + }, + .errstr = "variable stack access var_off=(0xfffffffffffffff8; 0x4)", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3-55-g7522 From 0cbf4741652b29cf98f6a77a156d32b2aca59bc3 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:30:09 +0100 Subject: Documentation: describe the new eBPF verifier value tracking behaviour Also bring the eBPF documentation up to date in other ways. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 122 ++++++++++++++++++++++++++++++------ 1 file changed, 104 insertions(+), 18 deletions(-) diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index b69b205501de..d0fdba7d66e2 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -793,7 +793,7 @@ Some core changes of the new internal format: bpf_exit After the call the registers R1-R5 contain junk values and cannot be read. - In the future an eBPF verifier can be used to validate internal BPF programs. + An in-kernel eBPF verifier is used to validate internal BPF programs. Also in the new design, eBPF is limited to 4096 insns, which means that any program will terminate quickly and will only call a fixed number of kernel @@ -1017,7 +1017,7 @@ At the start of the program the register R1 contains a pointer to context and has type PTR_TO_CTX. If verifier sees an insn that does R2=R1, then R2 has now type PTR_TO_CTX as well and can be used on the right hand side of expression. -If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=UNKNOWN_VALUE, +If R1=PTR_TO_CTX and insn is R2=R1+R1, then R2=SCALAR_VALUE, since addition of two valid pointers makes invalid pointer. (In 'secure' mode verifier will reject any type of pointer arithmetic to make sure that kernel addresses don't leak to unprivileged users) @@ -1039,7 +1039,7 @@ is a correct program. If there was R1 instead of R6, it would have been rejected. load/store instructions are allowed only with registers of valid types, which -are PTR_TO_CTX, PTR_TO_MAP, FRAME_PTR. They are bounds and alignment checked. +are PTR_TO_CTX, PTR_TO_MAP, PTR_TO_STACK. They are bounds and alignment checked. For example: bpf_mov R1 = 1 bpf_mov R2 = 2 @@ -1058,7 +1058,7 @@ intends to load a word from address R6 + 8 and store it into R0 If R6=PTR_TO_CTX, via is_valid_access() callback the verifier will know that offset 8 of size 4 bytes can be accessed for reading, otherwise the verifier will reject the program. -If R6=FRAME_PTR, then access should be aligned and be within +If R6=PTR_TO_STACK, then access should be aligned and be within stack bounds, which are [-MAX_BPF_STACK, 0). In this example offset is 8, so it will fail verification, since it's out of bounds. @@ -1069,7 +1069,7 @@ For example: bpf_ld R0 = *(u32 *)(R10 - 4) bpf_exit is invalid program. -Though R10 is correct read-only register and has type FRAME_PTR +Though R10 is correct read-only register and has type PTR_TO_STACK and R10 - 4 is within stack bounds, there were no stores into that location. Pointer register spill/fill is tracked as well, since four (R6-R9) @@ -1094,6 +1094,71 @@ all use cases. See details of eBPF verifier in kernel/bpf/verifier.c +Register value tracking +----------------------- +In order to determine the safety of an eBPF program, the verifier must track +the range of possible values in each register and also in each stack slot. +This is done with 'struct bpf_reg_state', defined in include/linux/ +bpf_verifier.h, which unifies tracking of scalar and pointer values. Each +register state has a type, which is either NOT_INIT (the register has not been +written to), SCALAR_VALUE (some value which is not usable as a pointer), or a +pointer type. The types of pointers describe their base, as follows: + PTR_TO_CTX Pointer to bpf_context. + CONST_PTR_TO_MAP Pointer to struct bpf_map. "Const" because arithmetic + on these pointers is forbidden. + PTR_TO_MAP_VALUE Pointer to the value stored in a map element. + PTR_TO_MAP_VALUE_OR_NULL + Either a pointer to a map value, or NULL; map accesses + (see section 'eBPF maps', below) return this type, + which becomes a PTR_TO_MAP_VALUE when checked != NULL. + Arithmetic on these pointers is forbidden. + PTR_TO_STACK Frame pointer. + PTR_TO_PACKET skb->data. + PTR_TO_PACKET_END skb->data + headlen; arithmetic forbidden. +However, a pointer may be offset from this base (as a result of pointer +arithmetic), and this is tracked in two parts: the 'fixed offset' and 'variable +offset'. The former is used when an exactly-known value (e.g. an immediate +operand) is added to a pointer, while the latter is used for values which are +not exactly known. The variable offset is also used in SCALAR_VALUEs, to track +the range of possible values in the register. +The verifier's knowledge about the variable offset consists of: +* minimum and maximum values as unsigned +* minimum and maximum values as signed +* knowledge of the values of individual bits, in the form of a 'tnum': a u64 +'mask' and a u64 'value'. 1s in the mask represent bits whose value is unknown; +1s in the value represent bits known to be 1. Bits known to be 0 have 0 in both +mask and value; no bit should ever be 1 in both. For example, if a byte is read +into a register from memory, the register's top 56 bits are known zero, while +the low 8 are unknown - which is represented as the tnum (0x0; 0xff). If we +then OR this with 0x40, we get (0x40; 0xcf), then if we add 1 we get (0x0; +0x1ff), because of potential carries. +Besides arithmetic, the register state can also be updated by conditional +branches. For instance, if a SCALAR_VALUE is compared > 8, in the 'true' branch +it will have a umin_value (unsigned minimum value) of 9, whereas in the 'false' +branch it will have a umax_value of 8. A signed compare (with BPF_JSGT or +BPF_JSGE) would instead update the signed minimum/maximum values. Information +from the signed and unsigned bounds can be combined; for instance if a value is +first tested < 8 and then tested s> 4, the verifier will conclude that the value +is also > 4 and s< 8, since the bounds prevent crossing the sign boundary. +PTR_TO_PACKETs with a variable offset part have an 'id', which is common to all +pointers sharing that same variable offset. This is important for packet range +checks: after adding some variable to a packet pointer, if you then copy it to +another register and (say) add a constant 4, both registers will share the same +'id' but one will have a fixed offset of +4. Then if it is bounds-checked and +found to be less than a PTR_TO_PACKET_END, the other register is now known to +have a safe range of at least 4 bytes. See 'Direct packet access', below, for +more on PTR_TO_PACKET ranges. +The 'id' field is also used on PTR_TO_MAP_VALUE_OR_NULL, common to all copies of +the pointer returned from a map lookup. This means that when one copy is +checked and found to be non-NULL, all copies can become PTR_TO_MAP_VALUEs. +As well as range-checking, the tracked information is also used for enforcing +alignment of pointer accesses. For instance, on most systems the packet pointer +is 2 bytes after a 4-byte alignment. If a program adds 14 bytes to that to jump +over the Ethernet header, then reads IHL and addes (IHL * 4), the resulting +pointer will have a variable offset known to be 4n+2 for some n, so adding the 2 +bytes (NET_IP_ALIGN) gives a 4-byte alignment and so word-sized accesses through +that pointer are safe. + Direct packet access -------------------- In cls_bpf and act_bpf programs the verifier allows direct access to the packet @@ -1121,7 +1186,7 @@ it now points to 'skb->data + 14' and accessible range is [R5, R5 + 14 - 14) which is zero bytes. More complex packet access may look like: - R0=imm1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp + R0=inv1 R1=ctx R3=pkt(id=0,off=0,r=14) R4=pkt_end R5=pkt(id=0,off=14,r=14) R10=fp 6: r0 = *(u8 *)(r3 +7) /* load 7th byte from the packet */ 7: r4 = *(u8 *)(r3 +12) 8: r4 *= 14 @@ -1135,26 +1200,31 @@ More complex packet access may look like: 16: r2 += 8 17: r1 = *(u32 *)(r1 +80) /* load skb->data_end */ 18: if r2 > r1 goto pc+2 - R0=inv56 R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv52 R5=pkt(id=0,off=14,r=14) R10=fp + R0=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) R1=pkt_end R2=pkt(id=2,off=8,r=8) R3=pkt(id=2,off=0,r=8) R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)) R5=pkt(id=0,off=14,r=14) R10=fp 19: r1 = *(u8 *)(r3 +4) The state of the register R3 is R3=pkt(id=2,off=0,r=8) id=2 means that two 'r3 += rX' instructions were seen, so r3 points to some offset within a packet and since the program author did 'if (r3 + 8 > r1) goto err' at insn #18, the safe range is [R3, R3 + 8). -The verifier only allows 'add' operation on packet registers. Any other -operation will set the register state to 'unknown_value' and it won't be +The verifier only allows 'add'/'sub' operations on packet registers. Any other +operation will set the register state to 'SCALAR_VALUE' and it won't be available for direct packet access. Operation 'r3 += rX' may overflow and become less than original skb->data, -therefore the verifier has to prevent that. So it tracks the number of -upper zero bits in all 'uknown_value' registers, so when it sees -'r3 += rX' instruction and rX is more than 16-bit value, it will error as: -"cannot add integer value with N upper zero bits to ptr_to_packet" +therefore the verifier has to prevent that. So when it sees 'r3 += rX' +instruction and rX is more than 16-bit value, any subsequent bounds-check of r3 +against skb->data_end will not give us 'range' information, so attempts to read +through the pointer will give "invalid access to packet" error. Ex. after insn 'r4 = *(u8 *)(r3 +12)' (insn #7 above) the state of r4 is -R4=inv56 which means that upper 56 bits on the register are guaranteed -to be zero. After insn 'r4 *= 14' the state becomes R4=inv52, since -multiplying 8-bit value by constant 14 will keep upper 52 bits as zero. -Similarly 'r2 >>= 48' will make R2=inv48, since the shift is not sign -extending. This logic is implemented in evaluate_reg_alu() function. +R4=inv(id=0,umax_value=255,var_off=(0x0; 0xff)) which means that upper 56 bits +of the register are guaranteed to be zero, and nothing is known about the lower +8 bits. After insn 'r4 *= 14' the state becomes +R4=inv(id=0,umax_value=3570,var_off=(0x0; 0xfffe)), since multiplying an 8-bit +value by constant 14 will keep upper 52 bits as zero, also the least significant +bit will be zero as 14 is even. Similarly 'r2 >>= 48' will make +R2=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff)), since the shift is not sign +extending. This logic is implemented in adjust_reg_min_max_vals() function, +which calls adjust_ptr_min_max_vals() for adding pointer to scalar (or vice +versa) and adjust_scalar_min_max_vals() for operations on two scalars. The end result is that bpf program author can access packet directly using normal C code as: @@ -1214,6 +1284,22 @@ The map is defined by: . key size in bytes . value size in bytes +Pruning +------- +The verifier does not actually walk all possible paths through the program. For +each new branch to analyse, the verifier looks at all the states it's previously +been in when at this instruction. If any of them contain the current state as a +subset, the branch is 'pruned' - that is, the fact that the previous state was +accepted implies the current state would be as well. For instance, if in the +previous state, r1 held a packet-pointer, and in the current state, r1 holds a +packet-pointer with a range as long or longer and at least as strict an +alignment, then r1 is safe. Similarly, if r2 was NOT_INIT before then it can't +have been used by any path from that point, so any value in r2 (including +another NOT_INIT) is safe. The implementation is in the function regsafe(). +Pruning considers not only the registers but also the stack (and any spilled +registers it may hold). They must all be safe for the branch to be pruned. +This is implemented in states_equal(). + Understanding eBPF verifier messages ------------------------------------ -- cgit v1.2.3-55-g7522 From 8e17c1b16277cba0e9426de6fe78817df378f45c Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Mon, 7 Aug 2017 15:30:30 +0100 Subject: bpf/verifier: increase complexity limit to 128k The more detailed value tracking can reduce the effectiveness of pruning for some programs. So, to avoid rejecting previously valid programs, up the limit to 128kinsns. Hopefully we will be able to bring this back down later by improving pruning performance. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 08a6fa0369c2..8160a81a40bf 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -140,7 +140,7 @@ struct bpf_verifier_stack_elem { struct bpf_verifier_stack_elem *next; }; -#define BPF_COMPLEXITY_LIMIT_INSNS 98304 +#define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) -- cgit v1.2.3-55-g7522 From 46b3bb9b47ae63632892ad19976948b290aec2c6 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Wed, 28 Jun 2017 09:22:26 -0600 Subject: igb: do not drop PF mailbox lock after read of VF message When the PF receives a mailbox message from the VF, it grabs the mailbox lock, reads the VF message from the mailbox, ACKs the message and drops the lock. While the PF is performing the action for the VF message, nothing prevents another VF message from being posted to the mailbox. The current code handles this condition by just dropping any new VF messages without processing them. This results in a mailbox timeout in the VM for posted messages waiting for an ACK, and the VF is reset by the igbvf_watchdog_task in the VM. Given the right sequence of VF messages and mailbox timeouts, this condition can go on ad infinitum. Modify the PF mailbox read method to take an 'unlock' argument that optionally leaves the mailbox locked by the PF after reading the VF message. This ensures another VF message is not posted to the mailbox until after the PF has completed processing the VF message and written its reply. Signed-off-by: Greg Edwards Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_hw.h | 3 ++- drivers/net/ethernet/intel/igb/e1000_mbx.c | 18 ++++++++++++------ drivers/net/ethernet/intel/igb/e1000_mbx.h | 3 ++- drivers/net/ethernet/intel/igb/igb_main.c | 14 ++++++++++---- 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 6076f258a0a5..6ea9f702ba0f 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -491,7 +491,8 @@ struct e1000_fc_info { struct e1000_mbx_operations { s32 (*init_params)(struct e1000_hw *hw); - s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); + s32 (*read)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); s32 (*write)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); s32 (*read_posted)(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); s32 (*write_posted)(struct e1000_hw *hw, u32 *msg, u16 size, diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 6aa44723507b..bffd58f7b2a1 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -32,7 +32,8 @@ * * returns SUCCESS if it successfully read message from buffer **/ -s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; @@ -42,7 +43,7 @@ s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) size = mbx->size; if (mbx->ops.read) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); + ret_val = mbx->ops.read(hw, msg, size, mbx_id, unlock); return ret_val; } @@ -222,7 +223,7 @@ static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, ret_val = igb_poll_for_msg(hw, mbx_id); if (!ret_val) - ret_val = mbx->ops.read(hw, msg, size, mbx_id); + ret_val = mbx->ops.read(hw, msg, size, mbx_id, true); out: return ret_val; } @@ -423,13 +424,14 @@ out_no_write: * @msg: The message buffer * @size: Length of buffer * @vf_number: the VF index + * @unlock: unlock the mailbox when done? * * This function copies a message from the mailbox buffer to the caller's * memory buffer. The presumption is that the caller knows that there was * a message due to a VF request so no polling for message is needed. **/ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, - u16 vf_number) + u16 vf_number, bool unlock) { s32 ret_val; u16 i; @@ -443,8 +445,12 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, for (i = 0; i < size; i++) msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); - /* Acknowledge the message and release buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + /* Acknowledge the message and release mailbox lock (or not) */ + if (unlock) + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + else + wr32(E1000_P2VMAILBOX(vf_number), + E1000_P2VMAILBOX_ACK | E1000_P2VMAILBOX_PFU); /* update stats */ hw->mbx.stats.msgs_rx++; diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h index a98c5dc60afd..a62b08e1572e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.h +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h @@ -67,7 +67,8 @@ #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ -s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); +s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id, + bool unlock); s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id); s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id); s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1a99164d5d11..fd4a46b03cc8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -6675,32 +6675,33 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) struct vf_data_storage *vf_data = &adapter->vf_data[vf]; s32 retval; - retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf); + retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false); if (retval) { /* if receive failed revoke VF CTS stats and restart init */ dev_err(&pdev->dev, "Error receiving message from VF\n"); vf_data->flags &= ~IGB_VF_FLAG_CTS; if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; + goto unlock; goto out; } /* this is a message we already processed, do nothing */ if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) - return; + goto unlock; /* until the vf completes a reset it should not be * allowed to start any configuration. */ if (msgbuf[0] == E1000_VF_RESET) { + /* unlocks mailbox */ igb_vf_reset_msg(adapter, vf); return; } if (!(vf_data->flags & IGB_VF_FLAG_CTS)) { if (!time_after(jiffies, vf_data->last_nack + (2 * HZ))) - return; + goto unlock; retval = -1; goto out; } @@ -6741,7 +6742,12 @@ out: else msgbuf[0] |= E1000_VT_MSGTYPE_ACK; + /* unlocks mailbox */ igb_write_mbx(hw, msgbuf, 1, vf); + return; + +unlock: + igb_unlock_mbx(hw, vf); } static void igb_msg_task(struct igb_adapter *adapter) -- cgit v1.2.3-55-g7522 From 48f76b68f9fca4e1d5bbb1755d14e8e8e09bdd5b Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 17 Jul 2017 15:13:39 -0700 Subject: e1000e: Initial Support for IceLake i219 (8) and i219 (9) are the next LOM generations that will be available on the next Intel Client platform (IceLake). This patch provides the initial support for these devices Signed-off-by: Sasha Neftin Reviewed-by: Raanan Avargil Reviewed-by: Dima Ruinskiy Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/hw.h | 4 ++++ drivers/net/ethernet/intel/e1000e/netdev.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 66bd5060a65b..d803b1a12349 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -100,6 +100,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE #define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB #define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC +#define E1000_DEV_ID_PCH_ICP_I219_LM8 0x15DF +#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0 +#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1 +#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 2dcb5463d9b8..327dfe5bedc0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7544,6 +7544,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V6), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_LM7), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CNP_I219_V7), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM8), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; -- cgit v1.2.3-55-g7522 From 32652c2ac2f330d2c159d8259f359da0ee2b0aa3 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Thu, 20 Jul 2017 10:00:57 -0600 Subject: igbvf: add lock around mailbox ops The PF driver assumes the VF will not send another mailbox message until the PF has written its reply to the previous message. If the VF does, that message will be silently dropped by the PF before it writes its reply to the mailbox. This results in a VF mailbox timeout for posted messages waiting for an ACK, and the VF is reset by the igbvf_watchdog_task in the VM. Add a lock around the VF mailbox ops to prevent the VF from sending another message while the PF is still processing the previous one. Signed-off-by: Greg Edwards Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igbvf/ethtool.c | 4 +++ drivers/net/ethernet/intel/igbvf/mbx.c | 4 +++ drivers/net/ethernet/intel/igbvf/netdev.c | 47 ++++++++++++++++++++++++++++++ drivers/net/ethernet/intel/igbvf/vf.h | 1 + 4 files changed, 56 insertions(+) diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 34faa113a8a0..a127688e83e6 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -296,8 +296,12 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) struct e1000_hw *hw = &adapter->hw; *data = 0; + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.check_for_link(hw); + spin_unlock_bh(&hw->mbx_lock); + if (!(er32(STATUS) & E1000_STATUS_LU)) *data = 1; diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 01752f44ace2..c9a441632e9f 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -264,6 +264,8 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; + WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) @@ -300,6 +302,8 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; + WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); if (err) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1b9cbbe88f6f..1ed556911b14 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1235,7 +1235,12 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter) struct e1000_hw *hw = &adapter->hw; max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; + + spin_lock_bh(&hw->mbx_lock); + e1000_rlpml_set_vf(hw, max_frame_size); + + spin_unlock_bh(&hw->mbx_lock); } static int igbvf_vlan_rx_add_vid(struct net_device *netdev, @@ -1244,10 +1249,16 @@ static int igbvf_vlan_rx_add_vid(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + if (hw->mac.ops.set_vfta(hw, vid, true)) { dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); + spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } + + spin_unlock_bh(&hw->mbx_lock); + set_bit(vid, adapter->active_vlans); return 0; } @@ -1258,11 +1269,17 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + if (hw->mac.ops.set_vfta(hw, vid, false)) { dev_err(&adapter->pdev->dev, "Failed to remove vlan id %d\n", vid); + spin_unlock_bh(&hw->mbx_lock); return -EINVAL; } + + spin_unlock_bh(&hw->mbx_lock); + clear_bit(vid, adapter->active_vlans); return 0; } @@ -1428,7 +1445,11 @@ static void igbvf_set_multi(struct net_device *netdev) netdev_for_each_mc_addr(ha, netdev) memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); + + spin_unlock_bh(&hw->mbx_lock); kfree(mta_list); } @@ -1449,16 +1470,24 @@ static int igbvf_set_uni(struct net_device *netdev) return -ENOSPC; } + spin_lock_bh(&hw->mbx_lock); + /* Clear all unicast MAC filters */ hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_CLR, NULL); + spin_unlock_bh(&hw->mbx_lock); + if (!netdev_uc_empty(netdev)) { struct netdev_hw_addr *ha; /* Add MAC filters one by one */ netdev_for_each_uc_addr(ha, netdev) { + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.set_uc_addr(hw, E1000_VF_MAC_FILTER_ADD, ha->addr); + + spin_unlock_bh(&hw->mbx_lock); udelay(200); } } @@ -1503,12 +1532,16 @@ static void igbvf_reset(struct igbvf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + spin_lock_bh(&hw->mbx_lock); + /* Allow time for pending master requests to run */ if (mac->ops.reset_hw(hw)) dev_err(&adapter->pdev->dev, "PF still resetting\n"); mac->ops.init_hw(hw); + spin_unlock_bh(&hw->mbx_lock); + if (is_valid_ether_addr(adapter->hw.mac.addr)) { memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); @@ -1643,6 +1676,7 @@ static int igbvf_sw_init(struct igbvf_adapter *adapter) igbvf_irq_disable(adapter); spin_lock_init(&adapter->stats_lock); + spin_lock_init(&adapter->hw.mbx_lock); set_bit(__IGBVF_DOWN, &adapter->state); return 0; @@ -1786,8 +1820,12 @@ static int igbvf_set_mac(struct net_device *netdev, void *p) memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + spin_lock_bh(&hw->mbx_lock); + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + spin_unlock_bh(&hw->mbx_lock); + if (!ether_addr_equal(addr->sa_data, hw->mac.addr)) return -EADDRNOTAVAIL; @@ -1858,7 +1896,12 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter) if (test_bit(__IGBVF_DOWN, &adapter->state)) return false; + spin_lock_bh(&hw->mbx_lock); + ret_val = hw->mac.ops.check_for_link(hw); + + spin_unlock_bh(&hw->mbx_lock); + link_active = !hw->mac.get_link_status; /* if check for link returns error we will need to reset */ @@ -2808,6 +2851,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; + spin_lock_bh(&hw->mbx_lock); + /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); if (err) { @@ -2824,6 +2869,8 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->addr_len); } + spin_unlock_bh(&hw->mbx_lock); + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_info(&pdev->dev, "Assigning random MAC address.\n"); eth_hw_addr_random(netdev); diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 4cf78b0dec50..d213eefb6169 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -245,6 +245,7 @@ struct e1000_hw { struct e1000_mac_info mac; struct e1000_mbx_info mbx; + spinlock_t mbx_lock; /* serializes mailbox ops */ union { struct e1000_dev_spec_vf vf; -- cgit v1.2.3-55-g7522 From 0d3ee0d9251ceb2c937e7c0c1adb366fdb0812c8 Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Thu, 20 Jul 2017 10:00:58 -0600 Subject: igbvf: after mailbox write, wait for reply Two of the VF mailbox commands were not waiting for a reply from the PF, which can result in a VF mailbox timeout in the VM for the next command. Signed-off-by: Greg Edwards Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igbvf/vf.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 528be116184e..1d3aa9adcaa8 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -230,6 +230,7 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u16 *hash_list = (u16 *)&msgbuf[1]; u32 hash_value; u32 cnt, i; + s32 ret_val; /* Each entry in the list uses 1 16 bit word. We have 30 * 16 bit words available in our HW msg buffer (minus 1 for the @@ -250,7 +251,9 @@ static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, mc_addr_list += ETH_ALEN; } - mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); + ret_val = mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); + if (!ret_val) + mbx->ops.read_posted(hw, msgbuf, 1); } /** @@ -293,11 +296,14 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) { struct e1000_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; + s32 ret_val; msgbuf[0] = E1000_VF_SET_LPE; msgbuf[1] = max_size; - mbx->ops.write_posted(hw, msgbuf, 2); + ret_val = mbx->ops.write_posted(hw, msgbuf, 2); + if (!ret_val) + mbx->ops.read_posted(hw, msgbuf, 1); } /** -- cgit v1.2.3-55-g7522 From d466124860cf96a6d4db5af5b3500f53f0d1bc7a Mon Sep 17 00:00:00 2001 From: Greg Edwards Date: Thu, 20 Jul 2017 10:15:14 -0600 Subject: igbvf: convert msleep to mdelay in atomic context This fixes a "scheduling while atomic" splat seen with CONFIG_DEBUG_ATOMIC_SLEEP enabled. Signed-off-by: Greg Edwards Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igbvf/vf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index 1d3aa9adcaa8..9577ccf4b26a 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -149,7 +149,7 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) msgbuf[0] = E1000_VF_RESET; mbx->ops.write_posted(hw, msgbuf, 1); - msleep(10); + mdelay(10); /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); -- cgit v1.2.3-55-g7522 From c99c287254e1d2816e2f4b01df9049b88b205bc5 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 7 Aug 2017 10:39:00 -0700 Subject: liquidio: fix wrong info about vf rx/tx ring parameters reported to ethtool Information reported to ethtool about vf rx/tx ring parameters is wrong. Fix it by adding the missing initializations. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 30 +++++++--------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 09e287597c74..b78e296c4cba 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -649,33 +649,21 @@ lio_ethtool_get_ringparam(struct net_device *netdev, rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); - } else if (OCTEON_CN23XX_PF(oct)) { - struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); - + } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; - rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf23, lio->ifidx); - tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf23, lio->ifidx); - } - - if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) { - ering->rx_pending = 0; - ering->rx_max_pending = 0; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = rx_pending; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = rx_max_pending; - } else { - ering->rx_pending = rx_pending; - ering->rx_max_pending = rx_max_pending; - ering->rx_mini_pending = 0; - ering->rx_jumbo_pending = 0; - ering->rx_mini_max_pending = 0; - ering->rx_jumbo_max_pending = 0; + rx_pending = oct->droq[0]->max_count; + tx_pending = oct->instr_queue[0]->max_count; } ering->tx_pending = tx_pending; ering->tx_max_pending = tx_max_pending; + ering->rx_pending = rx_pending; + ering->rx_max_pending = rx_max_pending; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; } static u32 lio_get_msglevel(struct net_device *netdev) -- cgit v1.2.3-55-g7522 From eeb0149660a21c61122d4937bd406aa4f334c1e4 Mon Sep 17 00:00:00 2001 From: John W Linville Date: Fri, 21 Jul 2017 14:12:24 -0400 Subject: igb: support BCM54616 PHY The management port on an Edgecore AS7712-32 switch uses an igb MAC, but it uses a BCM54616 PHY. Without a patch like this, loading the igb module produces dmesg output like this: [ 3.439125] igb: Copyright (c) 2007-2014 Intel Corporation. [ 3.439866] igb: probe of 0000:00:14.0 failed with error -2 Signed-off-by: John W Linville Cc: Jeff Kirsher Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 6 ++++++ drivers/net/ethernet/intel/igb/e1000_defines.h | 1 + drivers/net/ethernet/intel/igb/e1000_hw.h | 1 + 3 files changed, 8 insertions(+) diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 4a50870e0fa7..c37cc8bccf47 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -340,6 +340,9 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; break; + case BCM54616_E_PHY_ID: + phy->type = e1000_phy_bcm54616; + break; default: ret_val = -E1000_ERR_PHY; goto out; @@ -1659,6 +1662,9 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) case e1000_phy_82580: ret_val = igb_copper_link_setup_82580(hw); break; + case e1000_phy_bcm54616: + ret_val = 0; + break; default: ret_val = -E1000_ERR_PHY; break; diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index d8517779439b..1de82f247312 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -889,6 +889,7 @@ #define I210_I_PHY_ID 0x01410C00 #define M88E1543_E_PHY_ID 0x01410EA0 #define M88E1512_E_PHY_ID 0x01410DD0 +#define BCM54616_E_PHY_ID 0x03625D10 /* M88E1000 Specific Registers */ #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 6ea9f702ba0f..6c9485ab4b57 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -128,6 +128,7 @@ enum e1000_phy_type { e1000_phy_ife, e1000_phy_82580, e1000_phy_i210, + e1000_phy_bcm54616, }; enum e1000_bus_type { -- cgit v1.2.3-55-g7522 From 7b83f52047e8a3d551a9495b0267df5d0754c5bf Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Mon, 7 Aug 2017 11:30:00 -0700 Subject: netvsc: make sure and unregister datapath Go back to switching datapath directly in the notifier callback. Otherwise datapath might not get switched on unregister. No need for calling the NOTIFY_PEERS notifier since that is only for a gratitious ARP/ND packet; but that is not required with Hyper-V because both VF and synthetic NIC have the same MAC address. Reported-by: Vitaly Kuznetsov Fixes: 0c195567a8f6 ("netvsc: transparent VF management") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 3 -- drivers/net/hyperv/netvsc.c | 2 -- drivers/net/hyperv/netvsc_drv.c | 71 ++++++++++++++++------------------------- 3 files changed, 28 insertions(+), 48 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index c701b059c5ac..d1ea99a12cf2 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -724,14 +724,11 @@ struct net_device_context { struct net_device __rcu *vf_netdev; struct netvsc_vf_pcpu_stats __percpu *vf_stats; struct work_struct vf_takeover; - struct work_struct vf_notify; /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; /* Serial number of the VF to team with */ u32 vf_serial; - - bool datapath; /* 0 - synthetic, 1 - VF nic */ }; /* Per channel data */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 9598220b3bcc..208f03aa83de 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -60,8 +60,6 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf) sizeof(struct nvsp_message), (unsigned long)init_pkt, VM_PKT_DATA_INBAND, 0); - - net_device_ctx->datapath = vf; } static struct netvsc_device *alloc_net_device(void) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e75c0f852a63..eb0023f55fe1 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1649,55 +1649,35 @@ static int netvsc_register_vf(struct net_device *vf_netdev) return NOTIFY_OK; } -/* Change datapath */ -static void netvsc_vf_update(struct work_struct *w) +static int netvsc_vf_up(struct net_device *vf_netdev) { - struct net_device_context *ndev_ctx - = container_of(w, struct net_device_context, vf_notify); - struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); + struct net_device_context *net_device_ctx; struct netvsc_device *netvsc_dev; - struct net_device *vf_netdev; - bool vf_is_up; - - if (!rtnl_trylock()) { - schedule_work(w); - return; - } + struct net_device *ndev; - vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); - if (!vf_netdev) - goto unlock; + ndev = get_netvsc_byref(vf_netdev); + if (!ndev) + return NOTIFY_DONE; - netvsc_dev = rtnl_dereference(ndev_ctx->nvdev); + net_device_ctx = netdev_priv(ndev); + netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); if (!netvsc_dev) - goto unlock; - - vf_is_up = netif_running(vf_netdev); - if (vf_is_up != ndev_ctx->datapath) { - if (vf_is_up) { - netdev_info(ndev, "VF up: %s\n", vf_netdev->name); - rndis_filter_open(netvsc_dev); - netvsc_switch_datapath(ndev, true); - netdev_info(ndev, "Data path switched to VF: %s\n", - vf_netdev->name); - } else { - netdev_info(ndev, "VF down: %s\n", vf_netdev->name); - netvsc_switch_datapath(ndev, false); - rndis_filter_close(netvsc_dev); - netdev_info(ndev, "Data path switched from VF: %s\n", - vf_netdev->name); - } + return NOTIFY_DONE; - /* Now notify peers through VF device. */ - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev); - } -unlock: - rtnl_unlock(); + /* Bump refcount when datapath is acvive - Why? */ + rndis_filter_open(netvsc_dev); + + /* notify the host to switch the data path. */ + netvsc_switch_datapath(ndev, true); + netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name); + + return NOTIFY_OK; } -static int netvsc_vf_notify(struct net_device *vf_netdev) +static int netvsc_vf_down(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; + struct netvsc_device *netvsc_dev; struct net_device *ndev; ndev = get_netvsc_byref(vf_netdev); @@ -1705,7 +1685,13 @@ static int netvsc_vf_notify(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - schedule_work(&net_device_ctx->vf_notify); + netvsc_dev = rtnl_dereference(net_device_ctx->nvdev); + if (!netvsc_dev) + return NOTIFY_DONE; + + netvsc_switch_datapath(ndev, false); + netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name); + rndis_filter_close(netvsc_dev); return NOTIFY_OK; } @@ -1721,7 +1707,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) net_device_ctx = netdev_priv(ndev); cancel_work_sync(&net_device_ctx->vf_takeover); - cancel_work_sync(&net_device_ctx->vf_notify); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); @@ -1764,7 +1749,6 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); INIT_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); - INIT_WORK(&net_device_ctx->vf_notify, netvsc_vf_update); net_device_ctx->vf_stats = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); @@ -1915,8 +1899,9 @@ static int netvsc_netdev_event(struct notifier_block *this, case NETDEV_UNREGISTER: return netvsc_unregister_vf(event_dev); case NETDEV_UP: + return netvsc_vf_up(event_dev); case NETDEV_DOWN: - return netvsc_vf_notify(event_dev); + return netvsc_vf_down(event_dev); default: return NOTIFY_DONE; } -- cgit v1.2.3-55-g7522 From 58291a7465f6b88248c9f34807c16705bd5698f8 Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Mon, 7 Aug 2017 20:45:19 +0200 Subject: bpf: Move check_uarg_tail_zero() upward The function check_uarg_tail_zero() may be useful for other part of the code in the syscall.c file. Move this function at the beginning of the file. Signed-off-by: Mickaël Salaün Acked-by: Daniel Borkmann Cc: Alexei Starovoitov Cc: David S. Miller Cc: Kees Cook Cc: Martin KaFai Lau Signed-off-by: David S. Miller --- kernel/bpf/syscall.c | 52 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 6c772adabad2..c653ee0bd162 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -48,6 +48,32 @@ static const struct bpf_map_ops * const bpf_map_types[] = { #undef BPF_MAP_TYPE }; +static int check_uarg_tail_zero(void __user *uaddr, + size_t expected_size, + size_t actual_size) +{ + unsigned char __user *addr; + unsigned char __user *end; + unsigned char val; + int err; + + if (actual_size <= expected_size) + return 0; + + addr = uaddr + expected_size; + end = uaddr + actual_size; + + for (; addr < end; addr++) { + err = get_user(val, addr); + if (err) + return err; + if (val) + return -E2BIG; + } + + return 0; +} + static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) { struct bpf_map *map; @@ -1246,32 +1272,6 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr) return fd; } -static int check_uarg_tail_zero(void __user *uaddr, - size_t expected_size, - size_t actual_size) -{ - unsigned char __user *addr; - unsigned char __user *end; - unsigned char val; - int err; - - if (actual_size <= expected_size) - return 0; - - addr = uaddr + expected_size; - end = uaddr + actual_size; - - for (; addr < end; addr++) { - err = get_user(val, addr); - if (err) - return err; - if (val) - return -E2BIG; - } - - return 0; -} - static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, const union bpf_attr *attr, union bpf_attr __user *uattr) -- cgit v1.2.3-55-g7522 From 752ba56fb130b27c32c2ae6c82c8ef246b22106c Mon Sep 17 00:00:00 2001 From: Mickaël Salaün Date: Mon, 7 Aug 2017 20:45:20 +0200 Subject: bpf: Extend check_uarg_tail_zero() checks The function check_uarg_tail_zero() was created from bpf(2) for BPF_OBJ_GET_INFO_BY_FD without taking the access_ok() nor the PAGE_SIZE checks. Make this checks more generally available while unlikely to be triggered, extend the memory range check and add an explanation including why the ToCToU should not be a security concern. Signed-off-by: Mickaël Salaün Acked-by: Daniel Borkmann Cc: Alexei Starovoitov Cc: David S. Miller Cc: Kees Cook Cc: Martin KaFai Lau Link: https://lkml.kernel.org/r/CAGXu5j+vRGFvJZmjtAcT8Hi8B+Wz0e1b6VKYZHfQP_=DXzC4CQ@mail.gmail.com Signed-off-by: David S. Miller --- kernel/bpf/syscall.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index c653ee0bd162..fbe09a0cccf4 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -48,6 +48,15 @@ static const struct bpf_map_ops * const bpf_map_types[] = { #undef BPF_MAP_TYPE }; +/* + * If we're handed a bigger struct than we know of, ensure all the unknown bits + * are 0 - i.e. new user-space does not rely on any kernel feature extensions + * we don't know about yet. + * + * There is a ToCToU between this function call and the following + * copy_from_user() call. However, this is not a concern since this function is + * meant to be a future-proofing of bits. + */ static int check_uarg_tail_zero(void __user *uaddr, size_t expected_size, size_t actual_size) @@ -57,6 +66,12 @@ static int check_uarg_tail_zero(void __user *uaddr, unsigned char val; int err; + if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ + return -E2BIG; + + if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size))) + return -EFAULT; + if (actual_size <= expected_size) return 0; @@ -1393,17 +1408,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) return -EPERM; - if (!access_ok(VERIFY_READ, uattr, 1)) - return -EFAULT; - - if (size > PAGE_SIZE) /* silly large */ - return -E2BIG; - - /* If we're handed a bigger struct than we know of, - * ensure all the unknown bits are 0 - i.e. new - * user-space does not rely on any kernel feature - * extensions we dont know about yet. - */ err = check_uarg_tail_zero(uattr, sizeof(attr), size); if (err) return err; -- cgit v1.2.3-55-g7522 From ea6404c841011a1ed7b6eac64621d5f96e8b7183 Mon Sep 17 00:00:00 2001 From: Derek Chickles Date: Mon, 7 Aug 2017 12:22:15 -0700 Subject: liquidio: fix misspelled firmware image filenames Fix misspelled firmware image filenames advertised via MODULE_FIRMWARE(). Signed-off-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8c2cd8011bae..3ec0dd9b7201 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -39,10 +39,14 @@ MODULE_AUTHOR("Cavium Networks, "); MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(LIQUIDIO_VERSION); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); -MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); static int ddr_timeout = 10000; module_param(ddr_timeout, int, 0644); -- cgit v1.2.3-55-g7522 From 996139e801fd145bc44b70b4f4bfa621d626f948 Mon Sep 17 00:00:00 2001 From: William Tu Date: Mon, 7 Aug 2017 13:14:42 -0700 Subject: selftests: bpf: add a test for XDP redirect Add test for xdp_redirect by creating two namespaces with two veth peers, then forward packets in-between. Signed-off-by: William Tu Cc: Daniel Borkmann Cc: John Fastabend Acked-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: David S. Miller --- tools/include/uapi/linux/bpf.h | 3 +- tools/testing/selftests/bpf/Makefile | 4 +- tools/testing/selftests/bpf/test_xdp_redirect.c | 28 ++++++++++++ tools/testing/selftests/bpf/test_xdp_redirect.sh | 54 ++++++++++++++++++++++++ 4 files changed, 86 insertions(+), 3 deletions(-) create mode 100644 tools/testing/selftests/bpf/test_xdp_redirect.c create mode 100755 tools/testing/selftests/bpf/test_xdp_redirect.sh diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 1579cab49717..8d9bfcca3fe4 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -592,7 +592,8 @@ union bpf_attr { FN(get_socket_uid), \ FN(set_hash), \ FN(setsockopt), \ - FN(skb_adjust_room), + FN(skb_adjust_room), \ + FN(redirect_map), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 153c3a181a4c..3c2e67da4b41 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -15,9 +15,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_align TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ - test_pkt_md_access.o + test_pkt_md_access.o test_xdp_redirect.o -TEST_PROGS := test_kmod.sh +TEST_PROGS := test_kmod.sh test_xdp_redirect.sh include ../lib.mk diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.c b/tools/testing/selftests/bpf/test_xdp_redirect.c new file mode 100644 index 000000000000..ef9e704be140 --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdp_redirect.c @@ -0,0 +1,28 @@ +/* Copyright (c) 2017 VMware + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +SEC("redirect_to_111") +int xdp_redirect_to_111(struct xdp_md *xdp) +{ + return bpf_redirect(111, 0); +} +SEC("redirect_to_222") +int xdp_redirect_to_222(struct xdp_md *xdp) +{ + return bpf_redirect(222, 0); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh new file mode 100755 index 000000000000..d8c73ed6e040 --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh @@ -0,0 +1,54 @@ +#!/bin/sh +# Create 2 namespaces with two veth peers, and +# forward packets in-between using generic XDP +# +# NS1(veth11) NS2(veth22) +# | | +# | | +# (veth1, ------ (veth2, +# id:111) id:222) +# | xdp forwarding | +# ------------------ + +cleanup() +{ + if [ "$?" = "0" ]; then + echo "selftests: test_xdp_redirect [PASS]"; + else + echo "selftests: test_xdp_redirect [FAILED]"; + fi + + set +e + ip netns del ns1 2> /dev/null + ip netns del ns2 2> /dev/null +} + +set -e + +ip netns add ns1 +ip netns add ns2 + +trap cleanup 0 2 3 6 9 + +ip link add veth1 index 111 type veth peer name veth11 +ip link add veth2 index 222 type veth peer name veth22 + +ip link set veth11 netns ns1 +ip link set veth22 netns ns2 + +ip link set veth1 up +ip link set veth2 up + +ip netns exec ns1 ip addr add 10.1.1.11/24 dev veth11 +ip netns exec ns2 ip addr add 10.1.1.22/24 dev veth22 + +ip netns exec ns1 ip link set dev veth11 up +ip netns exec ns2 ip link set dev veth22 up + +ip link set dev veth1 xdpgeneric obj test_xdp_redirect.o sec redirect_to_222 +ip link set dev veth2 xdpgeneric obj test_xdp_redirect.o sec redirect_to_111 + +ip netns exec ns1 ping -c 1 10.1.1.22 +ip netns exec ns2 ping -c 1 10.1.1.11 + +exit 0 -- cgit v1.2.3-55-g7522 From 274cdb46e98b965449c68d912bcc45899ba34593 Mon Sep 17 00:00:00 2001 From: Egil Hjelmeland Date: Tue, 8 Aug 2017 00:22:21 +0200 Subject: net: dsa: lan9303: Only allocate 3 ports Save 2628 bytes on arm eabi by allocate only the required 3 ports. Now that ds->num_ports is correct: In net/dsa/tag_lan9303.c eliminate duplicate LAN9303_MAX_PORTS, use ds->num_ports. (Matching the pattern of other net/dsa/tag_xxx.c files.) Signed-off-by: Egil Hjelmeland Reviewed-by: Florian Fainelli Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/lan9303-core.c | 2 +- net/dsa/tag_lan9303.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 15befd155251..46fc1d5d3c9e 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -811,7 +811,7 @@ static struct dsa_switch_ops lan9303_switch_ops = { static int lan9303_register_switch(struct lan9303 *chip) { - chip->ds = dsa_switch_alloc(chip->dev, DSA_MAX_PORTS); + chip->ds = dsa_switch_alloc(chip->dev, LAN9303_NUM_PORTS); if (!chip->ds) return -ENOMEM; diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c index 247774d149f9..e23e7635fa00 100644 --- a/net/dsa/tag_lan9303.c +++ b/net/dsa/tag_lan9303.c @@ -39,7 +39,6 @@ */ #define LAN9303_TAG_LEN 4 -#define LAN9303_MAX_PORTS 3 static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -104,7 +103,7 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, source_port = ntohs(lan9303_tag[1]) & 0x3; - if (source_port >= LAN9303_MAX_PORTS) { + if (source_port >= ds->num_ports) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n"); return NULL; } -- cgit v1.2.3-55-g7522 From 7120371c8ef1bca709308f6e41130b6fd87b7d15 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 7 Aug 2017 15:26:50 -0700 Subject: net_sched: get rid of some forward declarations If we move up tcf_fill_node() we can get rid of these forward declarations. Also, move down tfilter_notify_chain() to group them together. Reported-by: Jamal Hadi Salim Cc: Jamal Hadi Salim Signed-off-by: Cong Wang Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_api.c | 214 +++++++++++++++++++++++++--------------------------- 1 file changed, 103 insertions(+), 111 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 668afb6e9885..8d1157aebaf7 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -100,25 +100,6 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) } EXPORT_SYMBOL(unregister_tcf_proto_ops); -static int tfilter_notify(struct net *net, struct sk_buff *oskb, - struct nlmsghdr *n, struct tcf_proto *tp, - void *fh, int event, bool unicast); - -static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, - struct nlmsghdr *n, struct tcf_proto *tp, - void *fh, bool unicast, bool *last); - -static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, - struct nlmsghdr *n, - struct tcf_chain *chain, int event) -{ - struct tcf_proto *tp; - - for (tp = rtnl_dereference(chain->filter_chain); - tp; tp = rtnl_dereference(tp->next)) - tfilter_notify(net, oskb, n, tp, 0, event, false); -} - /* Select new prio value from the range, managed by kernel. */ static inline u32 tcf_auto_prio(struct tcf_proto *tp) @@ -411,6 +392,109 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain, return tp; } +static int tcf_fill_node(struct net *net, struct sk_buff *skb, + struct tcf_proto *tp, void *fh, u32 portid, + u32 seq, u16 flags, int event) +{ + struct tcmsg *tcm; + struct nlmsghdr *nlh; + unsigned char *b = skb_tail_pointer(skb); + + nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); + if (!nlh) + goto out_nlmsg_trim; + tcm = nlmsg_data(nlh); + tcm->tcm_family = AF_UNSPEC; + tcm->tcm__pad1 = 0; + tcm->tcm__pad2 = 0; + tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; + tcm->tcm_parent = tp->classid; + tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); + if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) + goto nla_put_failure; + if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) + goto nla_put_failure; + if (!fh) { + tcm->tcm_handle = 0; + } else { + if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) + goto nla_put_failure; + } + nlh->nlmsg_len = skb_tail_pointer(skb) - b; + return skb->len; + +out_nlmsg_trim: +nla_put_failure: + nlmsg_trim(skb, b); + return -1; +} + +static int tfilter_notify(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, struct tcf_proto *tp, + void *fh, int event, bool unicast) +{ + struct sk_buff *skb; + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; + + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, + n->nlmsg_flags, event) <= 0) { + kfree_skb(skb); + return -EINVAL; + } + + if (unicast) + return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); + + return rtnetlink_send(skb, net, portid, RTNLGRP_TC, + n->nlmsg_flags & NLM_F_ECHO); +} + +static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, struct tcf_proto *tp, + void *fh, bool unicast, bool *last) +{ + struct sk_buff *skb; + u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; + int err; + + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOBUFS; + + if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, + n->nlmsg_flags, RTM_DELTFILTER) <= 0) { + kfree_skb(skb); + return -EINVAL; + } + + err = tp->ops->delete(tp, fh, last); + if (err) { + kfree_skb(skb); + return err; + } + + if (unicast) + return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); + + return rtnetlink_send(skb, net, portid, RTNLGRP_TC, + n->nlmsg_flags & NLM_F_ECHO); +} + +static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb, + struct nlmsghdr *n, + struct tcf_chain *chain, int event) +{ + struct tcf_proto *tp; + + for (tp = rtnl_dereference(chain->filter_chain); + tp; tp = rtnl_dereference(tp->next)) + tfilter_notify(net, oskb, n, tp, 0, event, false); +} + /* Add/change/delete/get a filter node */ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, @@ -640,98 +724,6 @@ errout: return err; } -static int tcf_fill_node(struct net *net, struct sk_buff *skb, - struct tcf_proto *tp, void *fh, u32 portid, - u32 seq, u16 flags, int event) -{ - struct tcmsg *tcm; - struct nlmsghdr *nlh; - unsigned char *b = skb_tail_pointer(skb); - - nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); - if (!nlh) - goto out_nlmsg_trim; - tcm = nlmsg_data(nlh); - tcm->tcm_family = AF_UNSPEC; - tcm->tcm__pad1 = 0; - tcm->tcm__pad2 = 0; - tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; - tcm->tcm_parent = tp->classid; - tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); - if (nla_put_string(skb, TCA_KIND, tp->ops->kind)) - goto nla_put_failure; - if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index)) - goto nla_put_failure; - if (!fh) { - tcm->tcm_handle = 0; - } else { - if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0) - goto nla_put_failure; - } - nlh->nlmsg_len = skb_tail_pointer(skb) - b; - return skb->len; - -out_nlmsg_trim: -nla_put_failure: - nlmsg_trim(skb, b); - return -1; -} - -static int tfilter_notify(struct net *net, struct sk_buff *oskb, - struct nlmsghdr *n, struct tcf_proto *tp, - void *fh, int event, bool unicast) -{ - struct sk_buff *skb; - u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; - - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOBUFS; - - if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, - n->nlmsg_flags, event) <= 0) { - kfree_skb(skb); - return -EINVAL; - } - - if (unicast) - return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); - - return rtnetlink_send(skb, net, portid, RTNLGRP_TC, - n->nlmsg_flags & NLM_F_ECHO); -} - -static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, - struct nlmsghdr *n, struct tcf_proto *tp, - void *fh, bool unicast, bool *last) -{ - struct sk_buff *skb; - u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; - int err; - - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOBUFS; - - if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, - n->nlmsg_flags, RTM_DELTFILTER) <= 0) { - kfree_skb(skb); - return -EINVAL; - } - - err = tp->ops->delete(tp, fh, last); - if (err) { - kfree_skb(skb); - return err; - } - - if (unicast) - return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT); - - return rtnetlink_send(skb, net, portid, RTNLGRP_TC, - n->nlmsg_flags & NLM_F_ECHO); -} - struct tcf_dump_args { struct tcf_walker w; struct sk_buff *skb; -- cgit v1.2.3-55-g7522 From 4da1874190946b16d21b3c02e283b08619255c24 Mon Sep 17 00:00:00 2001 From: Arjun Vynipadath Date: Tue, 8 Aug 2017 11:20:52 +0530 Subject: cxgb4: Clear On FLASH config file after a FW upgrade Because Firmware and the Firmware Configuration File need to be in sync; clear out any On-FLASH Firmware Configuration File when new Firmware is loaded. This will avoid difficult to diagnose and fix problems with a mis-matched Firmware Configuration File which prevents the adapter from being initialized. Original work by: Casey Leedom Signed-off-by: Arjun Vynipadath Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1 + drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 70 ++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 1978abbc6ceb..daa37750d152 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1405,6 +1405,7 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, int t4_fl_pkt_align(struct adapter *adap); unsigned int t4_flash_cfg_addr(struct adapter *adapter); int t4_check_fw_version(struct adapter *adap); +int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size); int t4_get_fw_version(struct adapter *adapter, u32 *vers); int t4_get_bs_version(struct adapter *adapter, u32 *vers); int t4_get_tp_version(struct adapter *adapter, u32 *vers); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 24087c886974..fff8fba86f97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -6663,6 +6663,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, if (ret < 0) goto out; + /* + * If there was a Firmware Configuration File stored in FLASH, + * there's a good chance that it won't be compatible with the new + * Firmware. In order to prevent difficult to diagnose adapter + * initialization issues, we clear out the Firmware Configuration File + * portion of the FLASH . The user will need to re-FLASH a new + * Firmware Configuration File which is compatible with the new + * Firmware if that's desired. + */ + (void)t4_load_cfg(adap, NULL, 0); + /* * Older versions of the firmware don't understand the new * PCIE_FW.HALT flag and so won't know to perform a RESET when they @@ -8895,6 +8906,65 @@ void t4_idma_monitor(struct adapter *adapter, } } +/** + * t4_load_cfg - download config file + * @adap: the adapter + * @cfg_data: the cfg text file to write + * @size: text file size + * + * Write the supplied config text file to the card's serial flash. + */ +int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size) +{ + int ret, i, n, cfg_addr; + unsigned int addr; + unsigned int flash_cfg_start_sec; + unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; + + cfg_addr = t4_flash_cfg_addr(adap); + if (cfg_addr < 0) + return cfg_addr; + + addr = cfg_addr; + flash_cfg_start_sec = addr / SF_SEC_SIZE; + + if (size > FLASH_CFG_MAX_SIZE) { + dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n", + FLASH_CFG_MAX_SIZE); + return -EFBIG; + } + + i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */ + sf_sec_size); + ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec, + flash_cfg_start_sec + i - 1); + /* If size == 0 then we're simply erasing the FLASH sectors associated + * with the on-adapter Firmware Configuration File. + */ + if (ret || size == 0) + goto out; + + /* this will write to the flash up to SF_PAGE_SIZE at a time */ + for (i = 0; i < size; i += SF_PAGE_SIZE) { + if ((size - i) < SF_PAGE_SIZE) + n = size - i; + else + n = SF_PAGE_SIZE; + ret = t4_write_flash(adap, addr, n, cfg_data); + if (ret) + goto out; + + addr += SF_PAGE_SIZE; + cfg_data += SF_PAGE_SIZE; + } + +out: + if (ret) + dev_err(adap->pdev_dev, "config file %s failed %d\n", + (size == 0 ? "clear" : "download"), ret); + return ret; +} + /** * t4_set_vf_mac - Set MAC address for the specified VF * @adapter: The adapter -- cgit v1.2.3-55-g7522 From 51ce3e2145cc3927c9551b8c3623610114b41651 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 8 Aug 2017 10:52:32 +0100 Subject: net: phy: mdio-bcm-unimac: fix unsigned wrap-around when decrementing timeout Change post-decrement compare to pre-decrement to avoid an unsigned integer wrap-around on timeout. This leads to the following !timeout check to never to be true so -ETIMEDOUT is never returned. Detected by CoverityScan, CID#1452623 ("Logically dead code") Fixes: 69a60b0579a4 ("net: phy: mdio-bcm-unimac: factor busy polling loop") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/phy/mdio-bcm-unimac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index 89425ca48412..73c5267a11fd 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -71,7 +71,7 @@ static int unimac_mdio_poll(void *wait_func_data) return 0; usleep_range(1000, 2000); - } while (timeout--); + } while (--timeout); if (!timeout) return -ETIMEDOUT; -- cgit v1.2.3-55-g7522 From 3df3ba2d4dc12be2788f9d6ec6943ba3eccb7add Mon Sep 17 00:00:00 2001 From: Bjørn Mork Date: Tue, 8 Aug 2017 18:02:11 +0200 Subject: qmi_wwan: fix NULL deref on disconnect qmi_wwan_disconnect is called twice when disconnecting devices with separate control and data interfaces. The first invocation will set the interface data to NULL for both interfaces to flag that the disconnect has been handled. But the matching NULL check was left out when qmi_wwan_disconnect was added, resulting in this oops: usb 2-1.4: USB disconnect, device number 4 qmi_wwan 2-1.4:1.6 wwp0s29u1u4i6: unregister 'qmi_wwan' usb-0000:00:1d.0-1.4, WWAN/QMI device BUG: unable to handle kernel NULL pointer dereference at 00000000000000e0 IP: qmi_wwan_disconnect+0x25/0xc0 [qmi_wwan] PGD 0 P4D 0 Oops: 0000 [#1] SMP Modules linked in: CPU: 2 PID: 33 Comm: kworker/2:1 Tainted: G E 4.12.3-nr44-normandy-r1500619820+ #1 Hardware name: LENOVO 4291LR7/4291LR7, BIOS CBET4000 4.6-810-g50522254fb 07/21/2017 Workqueue: usb_hub_wq hub_event [usbcore] task: ffff8c882b716040 task.stack: ffffb8e800d84000 RIP: 0010:qmi_wwan_disconnect+0x25/0xc0 [qmi_wwan] RSP: 0018:ffffb8e800d87b38 EFLAGS: 00010246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000001 RSI: ffff8c8824f3f1d0 RDI: ffff8c8824ef6400 RBP: ffff8c8824ef6400 R08: 0000000000000000 R09: 0000000000000000 R10: ffffb8e800d87780 R11: 0000000000000011 R12: ffffffffc07ea0e8 R13: ffff8c8824e2e000 R14: ffff8c8824e2e098 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff8c8835300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000000e0 CR3: 0000000229ca5000 CR4: 00000000000406e0 Call Trace: ? usb_unbind_interface+0x71/0x270 [usbcore] ? device_release_driver_internal+0x154/0x210 ? qmi_wwan_unbind+0x6d/0xc0 [qmi_wwan] ? usbnet_disconnect+0x6c/0xf0 [usbnet] ? qmi_wwan_disconnect+0x87/0xc0 [qmi_wwan] ? usb_unbind_interface+0x71/0x270 [usbcore] ? device_release_driver_internal+0x154/0x210 Reported-and-tested-by: Nathaniel Roach Fixes: c6adf77953bc ("net: usb: qmi_wwan: add qmap mux protocol support") Cc: Daniele Palmas Signed-off-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 5894e3c9468f..68c23b0ee40d 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1340,10 +1340,14 @@ static int qmi_wwan_probe(struct usb_interface *intf, static void qmi_wwan_disconnect(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - struct qmi_wwan_state *info = (void *)&dev->data; + struct qmi_wwan_state *info; struct list_head *iter; struct net_device *ldev; + /* called twice if separate control and data intf */ + if (!dev) + return; + info = (void *)&dev->data; if (info->flags & QMI_WWAN_FLAG_MUX) { if (!rtnl_trylock()) { restart_syscall(); -- cgit v1.2.3-55-g7522 From 585f46a827340f1a501980f8626a4fa4b53fa3d2 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 22:19:27 +0530 Subject: isdn: hisax: hfc_usb: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/isdn/hisax/hfc_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c index ef4748083efd..e8212185d386 100644 --- a/drivers/isdn/hisax/hfc_usb.c +++ b/drivers/isdn/hisax/hfc_usb.c @@ -65,7 +65,7 @@ typedef struct { } hfcsusb_vdata; /* VID/PID device list */ -static struct usb_device_id hfcusb_idtab[] = { +static const struct usb_device_id hfcusb_idtab[] = { { USB_DEVICE(0x0959, 0x2bd0), .driver_info = (unsigned long) &((hfcsusb_vdata) -- cgit v1.2.3-55-g7522 From f374771d0f4c311a63a84a68684bbae7b48d7f0d Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 8 Aug 2017 22:19:28 +0530 Subject: isdn: hfcsusb: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/hfcsusb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.h b/drivers/isdn/hardware/mISDN/hfcsusb.h index 4157311d569d..5f8f1d9cac11 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.h +++ b/drivers/isdn/hardware/mISDN/hfcsusb.h @@ -337,7 +337,7 @@ static const char *HFC_NT_LAYER1_STATES[HFC_MAX_NT_LAYER1_STATE + 1] = { }; /* supported devices */ -static struct usb_device_id hfcsusb_idtab[] = { +static const struct usb_device_id hfcsusb_idtab[] = { { USB_DEVICE(0x0959, 0x2bd0), .driver_info = (unsigned long) &((struct hfcsusb_vdata) -- cgit v1.2.3-55-g7522 From feca7d8c135bc1527b244fe817b8b6498066ccec Mon Sep 17 00:00:00 2001 From: Vincent Bernat Date: Tue, 8 Aug 2017 20:23:49 +0200 Subject: net: ipv6: avoid overhead when no custom FIB rules are installed If the user hasn't installed any custom rules, don't go through the whole FIB rules layer. This is pretty similar to f4530fa574df (ipv4: Avoid overhead when no custom FIB rules are installed). Using a micro-benchmark module [1], timing ip6_route_output() with get_cycles(), with 40,000 routes in the main routing table, before this patch: min=606 max=12911 count=627 average=1959 95th=4903 90th=3747 50th=1602 mad=821 table=254 avgdepth=21.8 maxdepth=39 value │ ┊ count 600 │▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ 199 880 │▒▒▒░░░░░░░░░░░░░░░░ 43 1160 │▒▒▒░░░░░░░░░░░░░░░░░░░░ 48 1440 │▒▒▒░░░░░░░░░░░░░░░░░░░░░░░ 43 1720 │▒▒▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░ 59 2000 │▒▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 50 2280 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 26 2560 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 31 2840 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 28 3120 │▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 17 3400 │▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 17 3680 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 8 3960 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 11 4240 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 6 4520 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 6 4800 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 9 After: min=544 max=11687 count=627 average=1776 95th=4546 90th=3585 50th=1227 mad=565 table=254 avgdepth=21.8 maxdepth=39 value │ ┊ count 540 │▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ 201 800 │▒▒▒▒▒░░░░░░░░░░░░░░░░ 63 1060 │▒▒▒▒▒░░░░░░░░░░░░░░░░░░░░░ 68 1320 │▒▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░ 39 1580 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 32 1840 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 32 2100 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 34 2360 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 33 2620 │▒▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 26 2880 │▒░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 22 3140 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 9 3400 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 8 3660 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 9 3920 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 8 4180 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 8 4440 │░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 8 At the frequency of the host during the bench (~ 3.7 GHz), this is about a 100 ns difference on the median value. A next step would be to collapse local and main tables, as in 0ddcf43d5d4a (ipv4: FIB Local/MAIN table collapse). [1]: https://github.com/vincentbernat/network-lab/blob/master/lab-routes-ipv6/kbench_mod.c Signed-off-by: Vincent Bernat Reviewed-by: Jiri Pirko Acked-by: David Ahern Signed-off-by: David S. Miller --- include/net/netns/ipv6.h | 1 + net/ipv6/fib6_rules.c | 40 +++++++++++++++++++++++++++------------- net/ipv6/route.c | 1 + 3 files changed, 29 insertions(+), 13 deletions(-) diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index abdf3b40303b..0e50bf3ed097 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -65,6 +65,7 @@ struct netns_ipv6 { unsigned int ip6_rt_gc_expire; unsigned long ip6_rt_last_gc; #ifdef CONFIG_IPV6_MULTIPLE_TABLES + bool fib6_has_custom_rules; struct rt6_info *ip6_prohibit_entry; struct rt6_info *ip6_blk_hole_entry; struct fib6_table *fib6_local_tbl; diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 2f29e4e33bd3..b240f24a6e52 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -63,19 +63,32 @@ unsigned int fib6_rules_seq_read(struct net *net) struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6, int flags, pol_lookup_t lookup) { - struct fib_lookup_arg arg = { - .lookup_ptr = lookup, - .flags = FIB_LOOKUP_NOREF, - }; - - /* update flow if oif or iif point to device enslaved to l3mdev */ - l3mdev_update_flow(net, flowi6_to_flowi(fl6)); - - fib_rules_lookup(net->ipv6.fib6_rules_ops, - flowi6_to_flowi(fl6), flags, &arg); - - if (arg.result) - return arg.result; + if (net->ipv6.fib6_has_custom_rules) { + struct fib_lookup_arg arg = { + .lookup_ptr = lookup, + .flags = FIB_LOOKUP_NOREF, + }; + + /* update flow if oif or iif point to device enslaved to l3mdev */ + l3mdev_update_flow(net, flowi6_to_flowi(fl6)); + + fib_rules_lookup(net->ipv6.fib6_rules_ops, + flowi6_to_flowi(fl6), flags, &arg); + + if (arg.result) + return arg.result; + } else { + struct rt6_info *rt; + + rt = lookup(net, net->ipv6.fib6_local_tbl, fl6, flags); + if (rt != net->ipv6.ip6_null_entry && rt->dst.error != -EAGAIN) + return &rt->dst; + ip6_rt_put(rt); + rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags); + if (rt->dst.error != -EAGAIN) + return &rt->dst; + ip6_rt_put(rt); + } dst_hold(&net->ipv6.ip6_null_entry->dst); return &net->ipv6.ip6_null_entry->dst; @@ -245,6 +258,7 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, rule6->dst.plen = frh->dst_len; rule6->tclass = frh->tos; + net->ipv6.fib6_has_custom_rules = true; err = 0; errout: return err; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index aba07fce67fb..7ecbe5eb19f8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3934,6 +3934,7 @@ static int __net_init ip6_route_net_init(struct net *net) ip6_template_metrics, true); #ifdef CONFIG_IPV6_MULTIPLE_TABLES + net->ipv6.fib6_has_custom_rules = false; net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, sizeof(*net->ipv6.ip6_prohibit_entry), GFP_KERNEL); -- cgit v1.2.3-55-g7522 From 4aa234eeec0aa9414637bd193f0745cf91bb60ef Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 29 Jun 2017 09:14:45 +0200 Subject: iwlwifi: mvm: remove useless condition in LED code If the module parameter is set to disable the LED, we leave the initialization routine before setting the LEDS_INIT_COMPLETE status bit. Therefore, there's no need to check the parameter again on exit, just the status check is sufficient. Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/led.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index 3cac4278a5fd..95ef37df292f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -129,8 +129,7 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm) void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { - if (iwlwifi_mod_params.led_mode == IWL_LED_DISABLE || - !(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) + if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) return; led_classdev_unregister(&mvm->led); -- cgit v1.2.3-55-g7522 From 7089ae634c50544b29b31faf1a751e8765c8de3b Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 28 Jun 2017 16:19:49 +0200 Subject: iwlwifi: mvm: use firmware LED command where applicable On devices starting from 8000 series, the host can no longer toggle the LED through the CSR_LED_REG register, but must do it via the firmware instead. Add support for this. Note that this means that the LED cannot be turned on while the firmware is off, so using an arbitrary LED trigger may not work as expected. Fixes: 503ab8c56ca0 ("iwlwifi: Add 8000 HW family support") Signed-off-by: Johannes Berg Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/fw/api/commands.h | 5 ++ drivers/net/wireless/intel/iwlwifi/fw/api/led.h | 71 ++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 2 + drivers/net/wireless/intel/iwlwifi/mvm/led.c | 56 ++++++++++++++--- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 4 ++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + 7 files changed, 130 insertions(+), 10 deletions(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/api/led.h diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index c7b8cffdf281..0eb35b119ae9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -287,6 +287,11 @@ enum iwl_legacy_cmds { */ NON_QOS_TX_COUNTER_CMD = 0x2d, + /** + * @LEDS_CMD: command is &struct iwl_led_cmd + */ + LEDS_CMD = 0x48, + /** * @LQ_CMD: using &struct iwl_lq_cmd */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/led.h b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h new file mode 100644 index 000000000000..b30c9d229d6e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h @@ -0,0 +1,71 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#ifndef __iwl_fw_api_led_h__ +#define __iwl_fw_api_led_h__ + +/** + * struct iwl_led_cmd - LED switching command + * + * @status: LED status (on/off) + */ +struct iwl_led_cmd { + __le32 status; +} __packed; /* LEDS_CMD_API_S_VER_2 */ + +#endif /* __iwl_fw_api_led_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 69336f38ac58..e8e74dd558f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -83,6 +83,7 @@ #include "fw/api/commands.h" #include "fw/api/d3.h" #include "fw/api/filter.h" +#include "fw/api/led.h" #include "fw/api/mac.h" #include "fw/api/nvm-reg.h" #include "fw/api/phy-ctxt.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 875cf3a60adb..ec018d94a9dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1217,6 +1217,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + iwl_mvm_leds_sync(mvm); + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c index 95ef37df292f..005e2e7278a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -66,26 +68,45 @@ #include "iwl-csr.h" #include "mvm.h" -/* Set led register on */ -static void iwl_mvm_led_enable(struct iwl_mvm *mvm) +static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on) { - iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON); + struct iwl_led_cmd led_cmd = { + .status = cpu_to_le32(on), + }; + struct iwl_host_cmd cmd = { + .id = WIDE_ID(LONG_GROUP, LEDS_CMD), + .len = { sizeof(led_cmd), }, + .data = { &led_cmd, }, + .flags = CMD_ASYNC, + }; + int err; + + if (!iwl_mvm_firmware_running(mvm)) + return; + + err = iwl_mvm_send_cmd(mvm, &cmd); + + if (err) + IWL_WARN(mvm, "LED command failed: %d\n", err); } -/* Set led register off */ -static void iwl_mvm_led_disable(struct iwl_mvm *mvm) +static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on) { - iwl_write32(mvm->trans, CSR_LED_REG, CSR_LED_REG_TURN_OFF); + if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_8000) { + iwl_mvm_send_led_fw_cmd(mvm, on); + return; + } + + iwl_write32(mvm->trans, CSR_LED_REG, + on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF); } static void iwl_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led); - if (brightness > 0) - iwl_mvm_led_enable(mvm); - else - iwl_mvm_led_disable(mvm); + + iwl_mvm_led_set(mvm, brightness > 0); } int iwl_mvm_leds_init(struct iwl_mvm *mvm) @@ -127,6 +148,21 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm) return 0; } +void iwl_mvm_leds_sync(struct iwl_mvm *mvm) +{ + if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) + return; + + /* + * if we control through the register, we're doing it + * even when the firmware isn't up, so no need to sync + */ + if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) + return; + + iwl_mvm_led_set(mvm, mvm->led.brightness > 0); +} + void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE)) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index c274fe177dfa..8b6238e1c7ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1566,6 +1566,7 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, #ifdef CONFIG_IWLWIFI_LEDS int iwl_mvm_leds_init(struct iwl_mvm *mvm); void iwl_mvm_leds_exit(struct iwl_mvm *mvm); +void iwl_mvm_leds_sync(struct iwl_mvm *mvm); #else static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) { @@ -1574,6 +1575,9 @@ static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm) static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm) { } +static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm) +{ +} #endif /* D3 (WoWLAN, NetDetect) */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 9c9c1b4b6d48..29a21a11c7f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -350,6 +350,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(BINDING_CONTEXT_CMD), HCMD_NAME(TIME_QUOTA_CMD), HCMD_NAME(NON_QOS_TX_COUNTER_CMD), + HCMD_NAME(LEDS_CMD), HCMD_NAME(LQ_CMD), HCMD_NAME(FW_PAGING_BLOCK_CMD), HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD), -- cgit v1.2.3-55-g7522 From b1a1efc57665f7c70e2051e0531837cb1985f11b Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Wed, 21 Jun 2017 14:10:42 +0530 Subject: iwlwifi: mvm: add const to thermal_cooling_device_ops structure Declare thermal_cooling_device_ops structure as const as it is only passed as an argument to the function thermal_cooling_device_register and this argument is of type const. So, declare the structure as const. Signed-off-by: Bhumika Goyal Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index a638bd69a1f9..3f143402cf7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -813,7 +813,7 @@ unlock: return ret; } -static struct thermal_cooling_device_ops tcooling_ops = { +static const struct thermal_cooling_device_ops tcooling_ops = { .get_max_state = iwl_mvm_tcool_get_max_state, .get_cur_state = iwl_mvm_tcool_get_cur_state, .set_cur_state = iwl_mvm_tcool_set_cur_state, -- cgit v1.2.3-55-g7522 From 61d8c626468a8193a5e890e9f633edc10aa05c1e Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Tue, 27 Jun 2017 14:13:02 +0300 Subject: iwlwifi: mvm: add debugfs to force CT-kill CT-kill is a thermal-based "RF-kill", which disables the NIC completely if the temperature gets too high, in order to avoid damage. Add a debugfs entry to simulate high temperatures, in order to test CT-kill flows in the driver without having to physically heat the device up. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 14 ++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 29f1d1807415..0b5cae54b86b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -114,6 +114,18 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } +static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + if (!iwl_mvm_firmware_running(mvm) || + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; + + iwl_mvm_enter_ctkill(mvm); + + return count; +} + static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { @@ -1641,6 +1653,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); @@ -1828,6 +1841,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 8b6238e1c7ea..a6983042d3b8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1755,6 +1755,7 @@ void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 3f143402cf7f..8876c2abc440 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -71,7 +71,7 @@ #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ -static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) +void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; u32 duration = tt->params.ct_kill_duration; -- cgit v1.2.3-55-g7522 From a4380b4eb3da772473efb38bfc1aaad8ddad2b84 Mon Sep 17 00:00:00 2001 From: Sharon Dvir Date: Mon, 12 Jun 2017 11:28:21 +0300 Subject: iwlwifi: change functions that can only return 0 to void Change iwl_set_ucode_api_flags(), iwl_set_ucode_capabilities() to be void. No need to check returned values. Found by Klocwork. Signed-off-by: Sharon Dvir Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 6fdb5921e17f..cdb765656115 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -478,8 +478,8 @@ static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data) return 0; } -static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, - struct iwl_ucode_capabilities *capa) +static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, + struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_api *ucode_api = (void *)data; u32 api_index = le32_to_cpu(ucode_api->api_index); @@ -490,20 +490,17 @@ static int iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, IWL_ERR(drv, "api flags index %d larger than supported by driver\n", api_index); - /* don't return an error so we can load FW that has more bits */ - return 0; + return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_api); } - - return 0; } -static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, - struct iwl_ucode_capabilities *capa) +static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, + struct iwl_ucode_capabilities *capa) { const struct iwl_ucode_capa *ucode_capa = (void *)data; u32 api_index = le32_to_cpu(ucode_capa->api_index); @@ -514,16 +511,13 @@ static int iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, IWL_ERR(drv, "capa flags index %d larger than supported by driver\n", api_index); - /* don't return an error so we can load FW that has more bits */ - return 0; + return; } for (i = 0; i < 32; i++) { if (api_flags & BIT(i)) __set_bit(i + 32 * api_index, capa->_capa); } - - return 0; } static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv, @@ -765,14 +759,12 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, case IWL_UCODE_TLV_API_CHANGES_SET: if (tlv_len != sizeof(struct iwl_ucode_api)) goto invalid_tlv_len; - if (iwl_set_ucode_api_flags(drv, tlv_data, capa)) - goto tlv_error; + iwl_set_ucode_api_flags(drv, tlv_data, capa); break; case IWL_UCODE_TLV_ENABLED_CAPABILITIES: if (tlv_len != sizeof(struct iwl_ucode_capa)) goto invalid_tlv_len; - if (iwl_set_ucode_capabilities(drv, tlv_data, capa)) - goto tlv_error; + iwl_set_ucode_capabilities(drv, tlv_data, capa); break; case IWL_UCODE_TLV_INIT_EVTLOG_PTR: if (tlv_len != sizeof(u32)) -- cgit v1.2.3-55-g7522 From cbeb58ef36a01f190a1b61e52a60d4264d420c6b Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 28 Jun 2017 14:08:53 +0300 Subject: iwlwifi: fix a few instances of misaligned kerneldoc parameters There are a few places where we don't have a space between the * and the @ in the parameter description. Also, in one case, the @ had trailing space before the parameter name. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/tx.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 95dbed609f3e..2909d1ed89cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -421,7 +421,7 @@ enum iwl_tx_status { * occur if tx failed for this frame when it was a member of a previous * aggregation block). If rate scaling is used, retry count indicates the * rate table entry used for all frames in the new agg. - *@ AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for + * @AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for * this frame * * TODO: complete documentation -- cgit v1.2.3-55-g7522 From f6586b69b29ef2e4f3c606c3d2da731a5ff4f16c Mon Sep 17 00:00:00 2001 From: Tzipi Peres Date: Wed, 14 Jun 2017 10:02:47 +0300 Subject: iwlwifi: add support of FPGA fw Load FW according to NIC type, taking into account simulation, if exists. This is determined by a prph register. Signed-off-by: Tzipi Peres Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/a000.c | 14 ++++++++++++++ drivers/net/wireless/intel/iwlwifi/iwl-config.h | 1 + drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 6 ++++++ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 11 +++++++++++ 4 files changed, 32 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index 98f24cd1b44f..40d67a5a2635 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c @@ -75,11 +75,14 @@ #define IWL_A000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" #define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" +#define IWL_A000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" #define IWL_A000_JF_MODULE_FIRMWARE(api) \ IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_HR_QNJ_MODULE_FIRMWARE(api) \ + IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 @@ -168,5 +171,16 @@ const struct iwl_cfg iwla000_2ax_cfg_hr = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwla000_2ax_cfg_qnj_hr = { + .name = "Intel(R) Dual Band Wireless AX a000", + .fw_name_pre = IWL_A000_HR_F0_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_HR_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index c52623cb7c2a..573dbeed3fbf 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -463,6 +463,7 @@ extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; extern const struct iwl_cfg iwla000_2ax_cfg_hr; +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 6772c59b7764..fbce97ed4ecd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -404,6 +404,12 @@ enum aux_misc_master1_en { #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 #define UMAG_SB_CPU_2_STATUS 0xA038C4 +#define UMAG_GEN_HW_STATUS 0xA038C8 + +/* For UMAG_GEN_HW_STATUS reg check */ +enum { + UMAG_GEN_HW_IS_FPGA = BIT(1), +}; /* FW chicken bits */ #define LMPM_CHICK 0xA01FF8 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 32f06f14328c..439cf424e058 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3137,7 +3137,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, iwl_set_bit(trans, CSR_HOST_CHICKEN, CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME); +#if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); + if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) { + u32 hw_status; + + hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); + if (hw_status & UMAG_GEN_HW_IS_FPGA) + trans->cfg = &iwla000_2ax_cfg_qnj_hr; + else + trans->cfg = &iwla000_2ac_cfg_hr; + } +#endif iwl_pcie_set_interrupt_capa(pdev, trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; -- cgit v1.2.3-55-g7522 From 36ae4f3aafe7768d00510a18e5ad17a55a4849a5 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Sun, 2 Jul 2017 10:32:34 +0300 Subject: iwlwifi: fix a000 RF_ID define One of the defines has a wrong value. Fixes: 1afb0ae42174 ("iwlwifi: allow combining different phy images with mac images") Signed-off-by: Liad Kaufman Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index c6c1876c1ad4..560de9de5232 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -356,7 +356,7 @@ enum { #define CSR_HW_REV_TYPE_NONE (0x00001F0) /* RF_ID value */ -#define CSR_HW_RF_ID_TYPE_JF (0x00105000) +#define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) #define CSR_HW_RF_ID_TYPE_HRCDB (0x00109000) -- cgit v1.2.3-55-g7522 From ce27f005c9e39f2e4081b0a8f6a5f395fc8eaa61 Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Sun, 25 Jun 2017 17:23:23 +0300 Subject: iwlwifi: dump smem configuration when firmware crashes Add the smem configuration to the fw data dump, once the firmware crashes. This is useful mainly for later parsing of the smem. Signed-off-by: Golan Ben-Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/dbg.c | 38 +++++++++++++++++++--- drivers/net/wireless/intel/iwlwifi/fw/error-dump.h | 30 +++++++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/smem.c | 3 ++ 3 files changed, 66 insertions(+), 5 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 77245fcba996..6afc7a799892 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -545,11 +545,13 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) struct iwl_fw_error_dump_data *dump_data; struct iwl_fw_error_dump_info *dump_info; struct iwl_fw_error_dump_mem *dump_mem; + struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg; struct iwl_fw_error_dump_trigger_desc *dump_trig; struct iwl_fw_dump_ptrs *fw_error_dump; struct scatterlist *sg_dump_data; u32 sram_len, sram_ofs; const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv; + struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0; u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ? @@ -585,8 +587,6 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { - struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg; - fifo_data_len = 0; /* Count RXF2 size */ @@ -675,7 +675,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } file_len = sizeof(*dump_file) + - sizeof(*dump_data) * 2 + + sizeof(*dump_data) * 3 + + sizeof(*dump_smem_cfg) + fifo_data_len + prph_len + radio_len + @@ -706,8 +707,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) /* If we only want a monitor dump, reset the file length */ if (monitor_dump_only) { - file_len = sizeof(*dump_file) + sizeof(*dump_data) + - sizeof(*dump_info); + file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } if (fwrt->dump.desc) @@ -744,6 +745,33 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) sizeof(dump_info->bus_human_readable)); dump_data = iwl_fw_error_next_data(dump_data); + + /* Dump shared memory configuration */ + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); + dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); + dump_smem_cfg = (void *)dump_data->data; + dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); + dump_smem_cfg->num_txfifo_entries = + cpu_to_le32(mem_cfg->num_txfifo_entries); + for (i = 0; i < MAX_NUM_LMAC; i++) { + int j; + + for (j = 0; j < TX_FIFO_MAX_NUM; j++) + dump_smem_cfg->lmac[i].txfifo_size[j] = + cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]); + dump_smem_cfg->lmac[i].rxfifo1_size = + cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); + } + dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); + dump_smem_cfg->internal_txfifo_addr = + cpu_to_le32(mem_cfg->internal_txfifo_addr); + for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { + dump_smem_cfg->internal_txfifo_size[i] = + cpu_to_le32(mem_cfg->internal_txfifo_size[i]); + } + + dump_data = iwl_fw_error_next_data(dump_data); + /* We only dump the FIFOs if the FW is in error state */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { iwl_fw_dump_fifos(fwrt, &dump_data); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index cfebde68a391..ed7beca8817e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -7,6 +7,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2014 Intel Corporation. All rights reserved. * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -92,6 +94,9 @@ * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and * for that reason is not in use in any other place in the Linux Wi-Fi * stack. + * @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem, + * which we get from the fw after ALIVE. The content is structured as + * &struct iwl_fw_error_dump_smem_cfg. */ enum iwl_fw_error_dump_type { /* 0 is deprecated */ @@ -110,6 +115,7 @@ enum iwl_fw_error_dump_type { IWL_FW_ERROR_DUMP_RADIO_REG = 13, IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14, IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */ + IWL_FW_ERROR_DUMP_MEM_CFG = 16, IWL_FW_ERROR_DUMP_MAX, }; @@ -208,6 +214,30 @@ struct iwl_fw_error_dump_fw_mon { u8 data[]; } __packed; +#define MAX_NUM_LMAC 2 +#define TX_FIFO_INTERNAL_MAX_NUM 6 +#define TX_FIFO_MAX_NUM 15 +/** + * struct iwl_fw_error_dump_smem_cfg - Dump SMEM configuration + * This must follow &struct iwl_fwrt_shared_mem_cfg. + * @num_lmacs: number of lmacs + * @num_txfifo_entries: number of tx fifos + * @lmac: sizes of lmacs txfifos and rxfifo1 + * @rxfifo2_size: size of rxfifo2 + * @internal_txfifo_addr: address of internal tx fifo + * @internal_txfifo_size: size of internal tx fifo + */ +struct iwl_fw_error_dump_smem_cfg { + __le32 num_lmacs; + __le32 num_txfifo_entries; + struct { + __le32 txfifo_size[TX_FIFO_MAX_NUM]; + __le32 rxfifo1_size; + } lmac[MAX_NUM_LMAC]; + __le32 rxfifo2_size; + __le32 internal_txfifo_addr; + __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM]; +} __packed; /** * struct iwl_fw_error_dump_prph - periphery registers data * @prph_start: address of the first register in this chunk diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index 065a951cefba..76675736ba4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -113,6 +113,9 @@ static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt, BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) != sizeof(mem_cfg->internal_txfifo_size)); + fwrt->smem_cfg.internal_txfifo_addr = + le32_to_cpu(mem_cfg->internal_txfifo_addr); + for (i = 0; i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size); i++) -- cgit v1.2.3-55-g7522 From c135cb564c6d4d7758fa68e5d0b37021398f4057 Mon Sep 17 00:00:00 2001 From: Shaul Triebitz Date: Thu, 22 Jun 2017 17:09:08 +0300 Subject: iwlwifi: mvm: move a000 device NVM retrieval to a common place Getting the NVM data in a000 devices should be shared across operation mode. Signed-off-by: Shaul Triebitz Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/Makefile | 2 +- drivers/net/wireless/intel/iwlwifi/fw/nvm.c | 162 ++++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/runtime.h | 2 + drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 6 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 91 ------------- 6 files changed, 169 insertions(+), 95 deletions(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/fw/nvm.c diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 5dcb4a848dba..35a32a3ec882 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -12,7 +12,7 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/a000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o -iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o +iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o iwlwifi-objs += $(iwlwifi-m) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c new file mode 100644 index 000000000000..ae03d0f5564f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c @@ -0,0 +1,162 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, + * USA + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#include "iwl-drv.h" +#include "runtime.h" +#include "fw/api/nvm-reg.h" +#include "fw/api/commands.h" +#include "iwl-nvm-parse.h" + +struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) +{ + struct iwl_nvm_get_info cmd = {}; + struct iwl_nvm_get_info_rsp *rsp; + struct iwl_trans *trans = fwrt->trans; + struct iwl_nvm_data *nvm; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, + .data = { &cmd, }, + .len = { sizeof(cmd) }, + .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) + }; + int ret; + bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && + fw_has_capa(&fwrt->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + return ERR_PTR(ret); + + if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), + "Invalid payload len in NVM response from FW %d", + iwl_rx_packet_payload_len(hcmd.resp_pkt))) { + ret = -EINVAL; + goto out; + } + + rsp = (void *)hcmd.resp_pkt->data; + if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) + IWL_INFO(fwrt, "OTP is empty\n"); + + nvm = kzalloc(sizeof(*nvm) + + sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS, + GFP_KERNEL); + if (!nvm) { + ret = -ENOMEM; + goto out; + } + + iwl_set_hw_address_from_csr(trans, nvm); + /* TODO: if platform NVM has MAC address - override it here */ + + if (!is_valid_ether_addr(nvm->hw_addr)) { + IWL_ERR(fwrt, "no valid mac address was found\n"); + ret = -EINVAL; + goto err_free; + } + + IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr); + + /* Initialize general data */ + nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version); + + /* Initialize MAC sku data */ + nvm->sku_cap_11ac_enable = + le32_to_cpu(rsp->mac_sku.enable_11ac); + nvm->sku_cap_11n_enable = + le32_to_cpu(rsp->mac_sku.enable_11n); + nvm->sku_cap_band_24GHz_enable = + le32_to_cpu(rsp->mac_sku.enable_24g); + nvm->sku_cap_band_52GHz_enable = + le32_to_cpu(rsp->mac_sku.enable_5g); + nvm->sku_cap_mimo_disabled = + le32_to_cpu(rsp->mac_sku.mimo_disable); + + /* Initialize PHY sku data */ + nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); + nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); + + /* Initialize regulatory data */ + nvm->lar_enabled = + le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported; + + iwl_init_sbands(trans->dev, trans->cfg, nvm, + rsp->regulatory.channel_profile, + nvm->valid_tx_ant & fwrt->fw->valid_tx_ant, + nvm->valid_rx_ant & fwrt->fw->valid_rx_ant, + rsp->regulatory.lar_enabled && lar_fw_supported); + + iwl_free_resp(&hcmd); + return nvm; + +err_free: + kfree(nvm); +out: + iwl_free_resp(&hcmd); + return ERR_PTR(ret); +} +IWL_EXPORT_SYMBOL(iwl_fw_get_nvm); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 66bea6545690..50cfb6d795a5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -63,6 +63,7 @@ #include "img.h" #include "fw/api/debug.h" #include "fw/api/paging.h" +#include "iwl-eeprom-parse.h" struct iwl_fw_runtime_ops { int (*dump_start)(void *ctx); @@ -152,5 +153,6 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, struct iwl_rx_cmd_buffer *rxb); +struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt); #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index ec018d94a9dd..ac782383443e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -412,8 +412,10 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Read the NVM only at driver load time, no need to do this twice */ if (!IWL_MVM_PARSE_NVM && read_nvm) { - ret = iwl_mvm_nvm_get_from_fw(mvm); - if (ret) { + mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt); + if (IS_ERR(mvm->nvm_data)) { + ret = PTR_ERR(mvm->nvm_data); + mvm->nvm_data = NULL; IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index a6983042d3b8..8ff74fbb2562 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1377,7 +1377,6 @@ void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); -int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 5cc749261ce3..08020386c3d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -546,97 +546,6 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) return ret; } -int iwl_mvm_nvm_get_from_fw(struct iwl_mvm *mvm) -{ - struct iwl_nvm_get_info cmd = {}; - struct iwl_nvm_get_info_rsp *rsp; - struct iwl_trans *trans = mvm->trans; - struct iwl_host_cmd hcmd = { - .flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL, - .data = { &cmd, }, - .len = { sizeof(cmd) }, - .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) - }; - int ret; - bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_send_cmd(mvm, &hcmd); - if (ret) - return ret; - - if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp), - "Invalid payload len in NVM response from FW %d", - iwl_rx_packet_payload_len(hcmd.resp_pkt))) { - ret = -EINVAL; - goto out; - } - - rsp = (void *)hcmd.resp_pkt->data; - if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP) - IWL_INFO(mvm, "OTP is empty\n"); - - mvm->nvm_data = kzalloc(sizeof(*mvm->nvm_data) + - sizeof(struct ieee80211_channel) * - IWL_NUM_CHANNELS, GFP_KERNEL); - if (!mvm->nvm_data) { - ret = -ENOMEM; - goto out; - } - - iwl_set_hw_address_from_csr(trans, mvm->nvm_data); - /* TODO: if platform NVM has MAC address - override it here */ - - if (!is_valid_ether_addr(mvm->nvm_data->hw_addr)) { - IWL_ERR(trans, "no valid mac address was found\n"); - ret = -EINVAL; - goto err_free; - } - - IWL_INFO(trans, "base HW address: %pM\n", mvm->nvm_data->hw_addr); - - /* Initialize general data */ - mvm->nvm_data->nvm_version = le16_to_cpu(rsp->general.nvm_version); - - /* Initialize MAC sku data */ - mvm->nvm_data->sku_cap_11ac_enable = - le32_to_cpu(rsp->mac_sku.enable_11ac); - mvm->nvm_data->sku_cap_11n_enable = - le32_to_cpu(rsp->mac_sku.enable_11n); - mvm->nvm_data->sku_cap_band_24GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_24g); - mvm->nvm_data->sku_cap_band_52GHz_enable = - le32_to_cpu(rsp->mac_sku.enable_5g); - mvm->nvm_data->sku_cap_mimo_disabled = - le32_to_cpu(rsp->mac_sku.mimo_disable); - - /* Initialize PHY sku data */ - mvm->nvm_data->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); - mvm->nvm_data->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - - /* Initialize regulatory data */ - mvm->nvm_data->lar_enabled = - le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported; - - iwl_init_sbands(trans->dev, trans->cfg, mvm->nvm_data, - rsp->regulatory.channel_profile, - mvm->nvm_data->valid_tx_ant & mvm->fw->valid_tx_ant, - mvm->nvm_data->valid_rx_ant & mvm->fw->valid_rx_ant, - rsp->regulatory.lar_enabled && lar_fw_supported); - - iwl_free_resp(&hcmd); - return 0; - -err_free: - kfree(mvm->nvm_data); -out: - iwl_free_resp(&hcmd); - return ret; -} - int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) { int ret, section; -- cgit v1.2.3-55-g7522 From 944eafc255500102b69940f425eb073bfab933db Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Sun, 9 Jul 2017 16:35:14 +0300 Subject: iwlwifi: mvm: set the default cTDP budget In case there is no value received from BIOS for cTDP budget, the default should be 2000 mWatt. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index ac782383443e..0099050f6e2b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1173,7 +1173,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) } /* TODO: read the budget from BIOS / Platform NVM */ - if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) { + + /* + * In case there is no budget from BIOS / Platform NVM the default + * budget should be 2000mW (cooling state 0). + */ + if (iwl_mvm_is_ctdp_supported(mvm)) { ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, mvm->cooling_dev.cur_state); if (ret) -- cgit v1.2.3-55-g7522 From 6ca33f8bae01bd3f1949287a0339c3e6f9e96aca Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Mon, 12 Jun 2017 14:59:02 +0300 Subject: iwlwifi: mvm: support new beacon template command Support a new version of the beacon template command. This replaces v8 of the command, which was missing the rate code. Also, export rate decision logic to a separate function. Signed-off-by: Haim Dreyfuss Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/tx.h | 17 +++++--- drivers/net/wireless/intel/iwlwifi/fw/file.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 47 ++++++++++++++++------- 3 files changed, 47 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 2909d1ed89cf..4928310ddd31 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -786,13 +786,20 @@ struct iwl_mac_beacon_cmd_v7 { struct ieee80211_hdr frame[0]; } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */ +enum iwl_mac_beacon_flags { + IWL_MAC_BEACON_CCK = BIT(8), + IWL_MAC_BEACON_ANT_A = BIT(9), + IWL_MAC_BEACON_ANT_B = BIT(10), + IWL_MAC_BEACON_ANT_C = BIT(11), +}; + /** * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA - * @byte_cnt: byte count of the beacon frame - * @flags: for future use + * @byte_cnt: byte count of the beacon frame. + * @flags: least significant byte for rate code. The most significant byte + * is &enum iwl_mac_beacon_flags. * @reserved: reserved - * @template_id: currently equal to the mac context id of the coresponding - * mac. + * @template_id: currently equal to the mac context id of the coresponding mac. * @tim_idx: the offset of the tim IE in the beacon * @tim_size: the length of the tim IE * @ecsa_offset: offset to the ECSA IE if present @@ -809,7 +816,7 @@ struct iwl_mac_beacon_cmd { __le32 ecsa_offset; __le32 csa_offset; struct ieee80211_hdr frame[0]; -} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_8 */ +} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */ struct iwl_beacon_notif { struct iwl_mvm_tx_resp beacon_notify_hdr; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 0fa8c473f1e2..d933aa324ffe 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -260,6 +260,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, NUM_IWL_UCODE_TLV_API diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 8fe955d58c6e..a2bf530eeae4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -923,6 +923,19 @@ static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size) return ie - beacon; } +static u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info, + struct ieee80211_vif *vif) +{ + u8 rate; + + if (info->band == NL80211_BAND_5GHZ || vif->p2p) + rate = IWL_FIRST_OFDM_RATE; + else + rate = IWL_FIRST_CCK_RATE; + + return rate; +} + static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon, @@ -930,7 +943,8 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct ieee80211_tx_info *info; - u32 rate, tx_flags; + u8 rate; + u32 tx_flags; info = IEEE80211_SKB_CB(beacon); @@ -955,14 +969,12 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS); - if (info->band == NL80211_BAND_5GHZ || vif->p2p) { - rate = IWL_FIRST_OFDM_RATE; - } else { - rate = IWL_FIRST_CCK_RATE; - tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); - } + rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate)); + if (rate == IWL_FIRST_CCK_RATE) + tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK); + } static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm, @@ -1033,19 +1045,27 @@ static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm, sizeof(beacon_cmd)); } -static int iwl_mvm_mac_ctxt_send_beacon_v8(struct iwl_mvm *mvm, +static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct sk_buff *beacon) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon); struct iwl_mac_beacon_cmd beacon_cmd = {}; + u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif); + u16 flags; + + flags = iwl_mvm_mac80211_idx_to_hwrate(rate); + if (rate == IWL_FIRST_CCK_RATE) + flags |= IWL_MAC_BEACON_CCK; + + beacon_cmd.flags = cpu_to_le16(flags); beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len); beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id); if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_mac_ctxt_set_tim(mvm, - &beacon_cmd.tim_idx, + iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx, &beacon_cmd.tim_size, beacon->data, beacon->len); @@ -1073,10 +1093,11 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD)) return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon); - if (!iwl_mvm_has_new_tx_api(mvm)) - return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); + if (fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE)) + return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon); - return iwl_mvm_mac_ctxt_send_beacon_v8(mvm, vif, beacon); + return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon); } /* The beacon template for the AP/GO/IBSS has changed and needs update */ -- cgit v1.2.3-55-g7522 From 0bef1b83d35566e9bc757469c823c126862a65d3 Mon Sep 17 00:00:00 2001 From: Matt Chen Date: Fri, 23 Jun 2017 17:50:18 +0800 Subject: iwlwifi: mvm: don't send CTDP commands via debugfs if not supported Fix this issue if it is not supported by the firmware. Fixes: 00f481bd895a ("iwlwifi: mvm: add ctdp operations to debugfs") Signed-off-by: Matt Chen Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 0b5cae54b86b..ceb486610a56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -82,6 +82,9 @@ static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, char buf[16]; int pos, budget; + if (!iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; @@ -103,6 +106,9 @@ static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, { int ret; + if (!iwl_mvm_is_ctdp_supported(mvm)) + return -EOPNOTSUPP; + if (!iwl_mvm_firmware_running(mvm) || mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) return -EIO; -- cgit v1.2.3-55-g7522 From 732d06e9d9cf96c39cd1c0cd16472930a5583bb7 Mon Sep 17 00:00:00 2001 From: Shaul Triebitz Date: Mon, 10 Jul 2017 19:58:10 +0300 Subject: iwlwifi: mvm: add station before allocating a queue One of the queue config params is the associated station id. Hence the FW must know about the station prior to the queue allocation. In a000 devices, allocating a queue without a valid station results with assert 0x2B00. In FW restart flow the queues are allocated before adding the station so first add the station. Signed-off-by: Shaul Triebitz Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 108 ++++++++++++++++----------- 1 file changed, 63 insertions(+), 45 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 922cd5379841..f88202c38d4c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1277,6 +1277,50 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, } } +static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, + struct iwl_mvm_int_sta *sta, + const u8 *addr, + u16 mac_id, u16 color) +{ + struct iwl_mvm_add_sta_cmd cmd; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + memset(&cmd, 0, sizeof(cmd)); + cmd.sta_id = sta->sta_id; + cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, + color)); + if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) + cmd.station_type = sta->type; + + if (!iwl_mvm_has_new_tx_api(mvm)) + cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(0xffff); + + if (addr) + memcpy(cmd.addr, addr, ETH_ALEN); + + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), + &cmd, &status); + if (ret) + return ret; + + switch (status & IWL_ADD_STA_STATUS_MASK) { + case ADD_STA_SUCCESS: + IWL_DEBUG_INFO(mvm, "Internal station added.\n"); + return 0; + default: + ret = -EIO; + IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", + status); + break; + } + return ret; +} + int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -1285,6 +1329,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_rxq_dup_data *dup_data; int i, ret, sta_id; + bool sta_update = false; + unsigned int sta_flags = 0; lockdep_assert_held(&mvm->mutex); @@ -1301,7 +1347,23 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, /* if this is a HW restart re-alloc existing queues */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + struct iwl_mvm_int_sta tmp_sta = { + .sta_id = sta_id, + .type = mvm_sta->sta_type, + }; + + /* + * First add an empty station since allocating + * a queue requires a valid station + */ + ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, + mvmvif->id, mvmvif->color); + if (ret) + goto err; + iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta); + sta_update = true; + sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; goto update_fw; } @@ -1368,7 +1430,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, } update_fw: - ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); + ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) goto err; @@ -1637,50 +1699,6 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) sta->sta_id = IWL_MVM_INVALID_STA; } -static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, - struct iwl_mvm_int_sta *sta, - const u8 *addr, - u16 mac_id, u16 color) -{ - struct iwl_mvm_add_sta_cmd cmd; - int ret; - u32 status; - - lockdep_assert_held(&mvm->mutex); - - memset(&cmd, 0, sizeof(cmd)); - cmd.sta_id = sta->sta_id; - cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, - color)); - if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) - cmd.station_type = sta->type; - - if (!iwl_mvm_has_new_tx_api(mvm)) - cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); - cmd.tid_disable_tx = cpu_to_le16(0xffff); - - if (addr) - memcpy(cmd.addr, addr, ETH_ALEN); - - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, - iwl_mvm_add_sta_cmd_size(mvm), - &cmd, &status); - if (ret) - return ret; - - switch (status & IWL_ADD_STA_STATUS_MASK) { - case ADD_STA_SUCCESS: - IWL_DEBUG_INFO(mvm, "Internal station added.\n"); - return 0; - default: - ret = -EIO; - IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", - status); - break; - } - return ret; -} - static void iwl_mvm_enable_aux_queue(struct iwl_mvm *mvm) { unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? -- cgit v1.2.3-55-g7522 From 39fff7599397e1c9fdf54093f1f4c3146066c24b Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 13 Jul 2017 09:49:32 +0300 Subject: iwlwifi: pcie: don't init a Tx queue with an SSN > size of the queue The TVQM tells us the initial write pointer for a queue, but that write pointer is in WiFi sequence number unit and not in TFD index unit. Which means that the write pointer in the TVQM's response can be bigger than the Tx queue ring size. Fix that by modulo'ing the write pointer from the TVQM with the Tx queue size. Fixes: 66128fa08806 ("iwlwifi: move to TVQM mode") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 5dc785d4c167..83a28892dc4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1033,6 +1033,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, .flags = CMD_WANT_SKB, }; int ret, qid; + u32 wr_ptr; txq = kzalloc(sizeof(*txq), GFP_KERNEL); if (!txq) @@ -1073,6 +1074,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, rsp = (void *)hcmd.resp_pkt->data; qid = le16_to_cpu(rsp->queue_number); + wr_ptr = le16_to_cpu(rsp->write_pointer); if (qid >= ARRAY_SIZE(trans_pcie->txq)) { WARN_ONCE(1, "queue index %d unsupported", qid); @@ -1088,10 +1090,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, txq->id = qid; trans_pcie->txq[qid] = txq; + wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1); /* Place first TFD at index corresponding to start sequence number */ - txq->read_ptr = le16_to_cpu(rsp->write_pointer); - txq->write_ptr = le16_to_cpu(rsp->write_pointer); + txq->read_ptr = wr_ptr; + txq->write_ptr = wr_ptr; iwl_write_direct32(trans, HBUS_TARG_WRPTR, (txq->write_ptr) | (qid << 16)); IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); -- cgit v1.2.3-55-g7522 From f4ca70ef931fc7191d6de2feedffb81eccec1982 Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Wed, 28 Jun 2017 13:03:58 +0300 Subject: iwlwifi: fix nmi triggering from host Although nmi was triggered fine till now, it appears that the driver didn't write the exact correct values to the correct addresses for each HW. Fix the nmi triggering by setting the correct addresses and values. Fixes: 4c9706dc2f29 ("iwlwifi: update nmi register") Signed-off-by: Golan Ben-Ami Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-io.c | 14 +++----------- drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 7 +++---- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index c527b8c10370..efb1998dcabd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -241,20 +241,12 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); void iwl_force_nmi(struct iwl_trans *trans) { - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) { + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000) iwl_write_prph(trans, DEVICE_SET_NMI_REG, DEVICE_SET_NMI_VAL_DRV); - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_HW); - } else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_A000) { + else iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER, - DEVICE_SET_NMI_8000_VAL); - } else { - iwl_write_prph(trans, DEVICE_SET_NMI_8000_REG, - DEVICE_SET_NMI_8000_VAL); - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_DRV); - } + UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK); } IWL_EXPORT_SYMBOL(iwl_force_nmi); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index fbce97ed4ecd..421a869633a3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -109,13 +109,12 @@ /* Device system time */ #define DEVICE_SYSTEM_TIME_REG 0xA0206C -/* Device NMI register */ +/* Device NMI register and value for 8000 family and lower hw's */ #define DEVICE_SET_NMI_REG 0x00a01c30 -#define DEVICE_SET_NMI_VAL_HW BIT(0) #define DEVICE_SET_NMI_VAL_DRV BIT(7) -#define DEVICE_SET_NMI_8000_REG 0x00a01c24 -#define DEVICE_SET_NMI_8000_VAL 0x1000000 +/* Device NMI register and value for 9000 family and above hw's */ #define UREG_NIC_SET_NMI_DRIVER 0x00a05c10 +#define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK 0xff000000 /* Shared registers (0x0..0x3ff, via target indirect or periphery */ #define SHR_BASE 0x00a10000 -- cgit v1.2.3-55-g7522 From fb70d49f2afec87fc570852c2251ffd3117c7373 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Mon, 17 Jul 2017 09:15:34 +0300 Subject: iwlwifi: remove references to unsupported HW There are still some references to 3945 and 4965 HW, which were never supported in iwlwifi. These references were inherited from a previous project and are irrelevant here. Additionally, remove some irrelevant references to 5100 HW. Remove all these. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/dvm/commands.h | 8 +------- drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 9 +++++---- 3 files changed, 7 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index ede47e3c5971..f89736d60a3d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -311,11 +311,6 @@ enum { /** * rate_n_flags Tx antenna masks - * 4965 has 2 transmitters - * 5100 has 1 transmitter B - * 5150 has 1 transmitter A - * 5300 has 3 transmitters - * 5350 has 3 transmitters * bit14:16 */ #define RATE_MCS_ANT_POS 14 @@ -1230,7 +1225,6 @@ struct iwl_rx_mpdu_res_start { */ /* - * 4965 uCode updates these Tx attempt count values in host DRAM. * Used for managing Tx retries when expecting block-acks. * Driver should set these fields to 0. */ @@ -1540,7 +1534,7 @@ struct iwl_link_qual_general_params { /* Best single antenna to use for single stream (legacy, SISO). */ u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */ - /* Best antennas to use for MIMO (unused for 4965, assumes both). */ + /* Best antennas to use for MIMO */ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */ /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 560de9de5232..7d468ad7cb6a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -169,7 +169,7 @@ /* * CSR Hardware Revision Workaround Register. Indicates hardware rev; - * "step" determines CCK backoff for txpower calculation. Used for 4965 only. + * "step" determines CCK backoff for txpower calculation. * See also CSR_HW_REV register. * Bit fields: * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 439cf424e058..382d7c251066 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1842,8 +1842,8 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * These bits say the device is running, and should keep running for * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; - * 3945 and 4965 have volatile SRAM, and must save/restore contents - * to/from host DRAM when sleeping/waking for power-saving. + * HW with volatile SRAM must save/restore contents to/from + * host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), @@ -1851,8 +1851,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call - * is just for hardware register access; but GP1 MAC_SLEEP check is a - * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log). + * is just for hardware register access; but GP1 MAC_SLEEP + * check is a good idea before accessing the SRAM of HW with + * volatile SRAM (e.g. reading Event Log). * * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling. -- cgit v1.2.3-55-g7522 From 9bb3d5a003b1740a8252c59f6c82dd75b3c0fe83 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 16 Jul 2017 12:45:12 +0300 Subject: iwlwifi: pcie: free the TSO page when a Tx queue is unmapped on A000 devices When we unmap a non-empty Tx queue, we need to free the pages that we allocated for the headers in TSO flows. This code existed for the 9000 device family, but somehow it got left out when the new Tx path for the A000 devices was written. Fixes: 2b0c5946d9ed ("iwlwifi: pcie: introduce a000 TX queues management") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 ++ drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 9 +++++++++ drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 4 ++-- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index a8ffd4ca8cd8..f46871840fd2 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -806,6 +806,8 @@ int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size); void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); void iwl_pcie_apply_destination(struct iwl_trans *trans); +void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, + struct sk_buff *skb); #ifdef CONFIG_INET struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 83a28892dc4f..4db45e56b6ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -937,6 +937,15 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); + if (txq_id != trans_pcie->cmd_queue) { + int idx = get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb = txq->entries[idx].skb; + + if (WARN_ON_ONCE(!skb)) + continue; + + iwl_pcie_free_tso_page(trans_pcie, skb); + } iwl_pcie_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 6f5894545f4f..c893f9088f9d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -577,8 +577,8 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return 0; } -static void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, - struct sk_buff *skb) +void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, + struct sk_buff *skb) { struct page **page_ptr; -- cgit v1.2.3-55-g7522 From 88c5f476bac964eb96e751b004f5684c90629eab Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 23 Jul 2017 12:53:46 +0300 Subject: iwlwifi: mvm: fix the coex firmware API The firmware API defined in the header files didn't match the structure that are actually passed by the firmware. The impact could be a decision for MIMO in Tx or Rx in coex scenarios. Fixes: 430a3bbafdc7 ("iwlwifi: mvm: BT Coex - new API") Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/coex.h | 14 +++++--------- drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 7 +++---- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 4 ++-- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 583f4189f55e..df4ecec59b40 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -220,12 +220,6 @@ enum iwl_bt_ci_compliance { BT_CI_COMPLIANCE_BOTH = 3, }; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */ -#define IWL_COEX_IS_TTC_ON(_ttc_rrc_status, _phy_id) \ - (_ttc_rrc_status & BIT(_phy_id)) - -#define IWL_COEX_IS_RRC_ON(_ttc_rrc_status, _phy_id) \ - ((_ttc_rrc_status >> 4) & BIT(_phy_id)) - /** * struct iwl_bt_coex_profile_notif - notification about BT coex * @mbox_msg: message from BT to WiFi @@ -234,7 +228,8 @@ enum iwl_bt_ci_compliance { * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading - * @ttc_rrc_status: is TTC or RRC enabled - one bit per PHY + * @ttc_status: is TTC enabled - one bit per PHY + * @rrc_status: is RRC enabled - one bit per PHY * @reserved: reserved */ struct iwl_bt_coex_profile_notif { @@ -245,8 +240,9 @@ struct iwl_bt_coex_profile_notif { __le32 primary_ch_lut; __le32 secondary_ch_lut; __le32 bt_activity_grading; - u8 ttc_rrc_status; - u8 reserved[3]; + u8 ttc_status; + u8 rrc_status; + __le16 reserved; } __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */ #endif /* __iwl_fw_api_coex_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 6c5c6510428a..0b4486114ddc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -560,8 +560,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac, smps_mode = IEEE80211_SMPS_AUTOMATIC; if (mvmvif->phy_ctxt && - IWL_COEX_IS_RRC_ON(mvm->last_bt_notif.ttc_rrc_status, - mvmvif->phy_ctxt->id)) + (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id))) smps_mode = IEEE80211_SMPS_AUTOMATIC; IWL_DEBUG_COEX(data->mvm, @@ -792,7 +791,7 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) + if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return LINK_QUAL_AGG_TIME_LIMIT_DEF; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < @@ -816,7 +815,7 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt; enum iwl_bt_coex_lut_type lut_type; - if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id)) + if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id)) return true; if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index ceb486610a56..ba2745a3b537 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -569,9 +569,9 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, "antenna isolation = %d CORUN LUT index = %d\n", mvm->last_ant_isol, mvm->last_corun_lut); pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - (notif->ttc_rrc_status >> 4) & 0xF); + notif->rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_rrc_status & 0xF); + notif->ttc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", IWL_MVM_BT_COEX_SYNC2SCO); -- cgit v1.2.3-55-g7522 From ccaffff182027078e9443d912b5af461850965f4 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Wed, 9 Aug 2017 19:09:43 -0400 Subject: sock: fix zerocopy panic in mem accounting Only call mm_unaccount_pinned_pages when releasing a struct ubuf_info that has initialized its field uarg->mmp. Before this patch, a vhost-net with experimental_zcopytx can crash in mm_unaccount_pinned_pages sock_zerocopy_put skb_zcopy_clear skb_release_data Only sock_zerocopy_alloc initializes this field. Move the unaccount call from generic sock_zerocopy_put to its specific callback sock_zerocopy_callback. Fixes: a91dbff551a6 ("sock: ulimit on MSG_ZEROCOPY pages") Reported-by: David Ahern Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/core/skbuff.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 42b62c716a33..cb123590c674 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1044,6 +1044,8 @@ void sock_zerocopy_callback(struct ubuf_info *uarg, bool success) u32 lo, hi; u16 len; + mm_unaccount_pinned_pages(&uarg->mmp); + /* if !len, there was only 1 call, and it was aborted * so do not queue a completion notification */ @@ -1084,8 +1086,6 @@ EXPORT_SYMBOL_GPL(sock_zerocopy_callback); void sock_zerocopy_put(struct ubuf_info *uarg) { if (uarg && atomic_dec_and_test(&uarg->refcnt)) { - mm_unaccount_pinned_pages(&uarg->mmp); - if (uarg->callback) uarg->callback(uarg, uarg->zerocopy); else -- cgit v1.2.3-55-g7522 From 0a4a060bb204c47825eb4f7c27f66fc7ee85d508 Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Wed, 9 Aug 2017 19:09:44 -0400 Subject: sock: fix zerocopy_success regression with msg_zerocopy Do not use uarg->zerocopy outside msg_zerocopy. In other paths the field is not explicitly initialized and aliases another field. Those paths have only one reference so do not need this intermediate variable. Call uarg->callback directly. Fixes: 1f8b977ab32d ("sock: enable MSG_ZEROCOPY") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/linux/skbuff.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 8c0708d2e5e6..7594e19bce62 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1273,8 +1273,13 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) struct ubuf_info *uarg = skb_zcopy(skb); if (uarg) { - uarg->zerocopy = uarg->zerocopy && zerocopy; - sock_zerocopy_put(uarg); + if (uarg->callback == sock_zerocopy_callback) { + uarg->zerocopy = uarg->zerocopy && zerocopy; + sock_zerocopy_put(uarg); + } else { + uarg->callback(uarg, zerocopy); + } + skb_shinfo(skb)->tx_flags &= ~SKBTX_ZEROCOPY_FRAG; } } -- cgit v1.2.3-55-g7522 From 92b31a9af73b3a3fc801899335d6c47966351830 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:39:55 +0200 Subject: bpf: add BPF_J{LT,LE,SLT,SLE} instructions Currently, eBPF only understands BPF_JGT (>), BPF_JGE (>=), BPF_JSGT (s>), BPF_JSGE (s>=) instructions, this means that particularly *JLT/*JLE counterparts involving immediates need to be rewritten from e.g. X < [IMM] by swapping arguments into [IMM] > X, meaning the immediate first is required to be loaded into a register Y := [IMM], such that then we can compare with Y > X. Note that the destination operand is always required to be a register. This has the downside of having unnecessarily increased register pressure, meaning complex program would need to spill other registers temporarily to stack in order to obtain an unused register for the [IMM]. Loading to registers will thus also affect state pruning since we need to account for that register use and potentially those registers that had to be spilled/filled again. As a consequence slightly more stack space might have been used due to spilling, and BPF programs are a bit longer due to extra code involving the register load and potentially required spill/fills. Thus, add BPF_JLT (<), BPF_JLE (<=), BPF_JSLT (s<), BPF_JSLE (s<=) counterparts to the eBPF instruction set. Modifying LLVM to remove the NegateCC() workaround in a PoC patch at [1] and allowing it to also emit the new instructions resulted in cilium's BPF programs that are injected into the fast-path to have a reduced program length in the range of 2-3% (e.g. accumulated main and tail call sections from one of the object file reduced from 4864 to 4729 insns), reduced complexity in the range of 10-30% (e.g. accumulated sections reduced in one of the cases from 116432 to 88428 insns), and reduced stack usage in the range of 1-5% (e.g. accumulated sections from one of the object files reduced from 824 to 784b). The modification for LLVM will be incorporated in a backwards compatible way. Plan is for LLVM to have i) a target specific option to offer a possibility to explicitly enable the extension by the user (as we have with -m target specific extensions today for various CPU insns), and ii) have the kernel checked for presence of the extensions and enable them transparently when the user is selecting more aggressive options such as -march=native in a bpf target context. (Other frontends generating BPF byte code, e.g. ply can probe the kernel directly for its code generation.) [1] https://github.com/borkmann/llvm/tree/bpf-insns Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 4 + include/uapi/linux/bpf.h | 5 + kernel/bpf/core.c | 60 ++++++ lib/test_bpf.c | 364 ++++++++++++++++++++++++++++++++++++ net/core/filter.c | 21 ++- tools/include/uapi/linux/bpf.h | 5 + 6 files changed, 455 insertions(+), 4 deletions(-) diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index d0fdba7d66e2..6a0df8df6c43 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -906,6 +906,10 @@ If BPF_CLASS(code) == BPF_JMP, BPF_OP(code) is one of: BPF_JSGE 0x70 /* eBPF only: signed '>=' */ BPF_CALL 0x80 /* eBPF only: function call */ BPF_EXIT 0x90 /* eBPF only: function return */ + BPF_JLT 0xa0 /* eBPF only: unsigned '<' */ + BPF_JLE 0xb0 /* eBPF only: unsigned '<=' */ + BPF_JSLT 0xc0 /* eBPF only: signed '<' */ + BPF_JSLE 0xd0 /* eBPF only: signed '<=' */ So BPF_ADD | BPF_X | BPF_ALU means 32-bit addition in both classic BPF and eBPF. There are only two registers in classic BPF, so it means A += X. diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 1d06be1569b1..91da8371a2d0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -30,9 +30,14 @@ #define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_BE BPF_TO_BE +/* jmp encodings */ #define BPF_JNE 0x50 /* jump != */ +#define BPF_JLT 0xa0 /* LT is unsigned, '<' */ +#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ +#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ +#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ad5f55922a13..c69e7f5bfde7 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -595,9 +595,13 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSET | BPF_K: /* Accommodate for extra offset in case of a backjump. */ off = from->off; @@ -833,12 +837,20 @@ static unsigned int ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K, [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X, [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K, + [BPF_JMP | BPF_JLT | BPF_X] = &&JMP_JLT_X, + [BPF_JMP | BPF_JLT | BPF_K] = &&JMP_JLT_K, [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X, [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K, + [BPF_JMP | BPF_JLE | BPF_X] = &&JMP_JLE_X, + [BPF_JMP | BPF_JLE | BPF_K] = &&JMP_JLE_K, [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X, [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K, + [BPF_JMP | BPF_JSLT | BPF_X] = &&JMP_JSLT_X, + [BPF_JMP | BPF_JSLT | BPF_K] = &&JMP_JSLT_K, [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X, [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K, + [BPF_JMP | BPF_JSLE | BPF_X] = &&JMP_JSLE_X, + [BPF_JMP | BPF_JSLE | BPF_K] = &&JMP_JSLE_K, [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X, [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K, /* Program return */ @@ -1073,6 +1085,18 @@ out: CONT_JMP; } CONT; + JMP_JLT_X: + if (DST < SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JLT_K: + if (DST < IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; JMP_JGE_X: if (DST >= SRC) { insn += insn->off; @@ -1085,6 +1109,18 @@ out: CONT_JMP; } CONT; + JMP_JLE_X: + if (DST <= SRC) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JLE_K: + if (DST <= IMM) { + insn += insn->off; + CONT_JMP; + } + CONT; JMP_JSGT_X: if (((s64) DST) > ((s64) SRC)) { insn += insn->off; @@ -1097,6 +1133,18 @@ out: CONT_JMP; } CONT; + JMP_JSLT_X: + if (((s64) DST) < ((s64) SRC)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSLT_K: + if (((s64) DST) < ((s64) IMM)) { + insn += insn->off; + CONT_JMP; + } + CONT; JMP_JSGE_X: if (((s64) DST) >= ((s64) SRC)) { insn += insn->off; @@ -1109,6 +1157,18 @@ out: CONT_JMP; } CONT; + JMP_JSLE_X: + if (((s64) DST) <= ((s64) SRC)) { + insn += insn->off; + CONT_JMP; + } + CONT; + JMP_JSLE_K: + if (((s64) DST) <= ((s64) IMM)) { + insn += insn->off; + CONT_JMP; + } + CONT; JMP_JSET_X: if (DST & SRC) { insn += insn->off; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index d9d5a410955c..aa8812ae6776 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -951,6 +951,32 @@ static struct bpf_test tests[] = { { 4, 4, 4, 3, 3 }, { { 2, 0 }, { 3, 1 }, { 4, MAX_K } }, }, + { + "JGE (jt 0), test 1", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 4, 4, 4, 3, 3 }, + { { 2, 0 }, { 3, 1 }, { 4, 1 } }, + }, + { + "JGE (jt 0), test 2", + .u.insns = { + BPF_STMT(BPF_LDX | BPF_LEN, 0), + BPF_STMT(BPF_LD | BPF_B | BPF_ABS, 2), + BPF_JUMP(BPF_JMP | BPF_JGE | BPF_X, 0, 0, 1), + BPF_STMT(BPF_RET | BPF_K, 1), + BPF_STMT(BPF_RET | BPF_K, MAX_K) + }, + CLASSIC, + { 4, 4, 5, 3, 3 }, + { { 4, 1 }, { 5, 1 }, { 6, MAX_K } }, + }, { "JGE", .u.insns = { @@ -4492,6 +4518,35 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLT | BPF_K */ + { + "JMP_JSLT_K: Signed jump: if (-2 < -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xfffffffffffffffeLL), + BPF_JMP_IMM(BPF_JSLT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSLT_K: Signed jump: if (-1 < -1) return 0", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSLT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JSGT | BPF_K */ { "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1", @@ -4521,6 +4576,73 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLE | BPF_K */ + { + "JMP_JSLE_K: Signed jump: if (-2 <= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xfffffffffffffffeLL), + BPF_JMP_IMM(BPF_JSLE, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSLE_K: Signed jump: if (-1 <= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSLE, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSLE_K: Signed jump: value walk 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 6), + BPF_ALU64_IMM(BPF_SUB, R1, 1), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 4), + BPF_ALU64_IMM(BPF_SUB, R1, 1), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 2), + BPF_ALU64_IMM(BPF_SUB, R1, 1), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSLE_K: Signed jump: value walk 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 4), + BPF_ALU64_IMM(BPF_SUB, R1, 2), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 2), + BPF_ALU64_IMM(BPF_SUB, R1, 2), + BPF_JMP_IMM(BPF_JSLE, R1, 0, 1), + BPF_EXIT_INSN(), /* bad exit */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* good exit */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JSGE | BPF_K */ { "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1", @@ -4617,6 +4739,35 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLT | BPF_K */ + { + "JMP_JLT_K: if (2 < 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 2), + BPF_JMP_IMM(BPF_JLT, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JGT_K: Unsigned jump: if (1 < -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 1), + BPF_JMP_IMM(BPF_JLT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGE | BPF_K */ { "JMP_JGE_K: if (3 >= 2) return 1", @@ -4632,6 +4783,21 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLE | BPF_K */ + { + "JMP_JLE_K: if (2 <= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 2), + BPF_JMP_IMM(BPF_JLE, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGT | BPF_K jump backwards */ { "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)", @@ -4662,6 +4828,36 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLT | BPF_K jump backwards */ + { + "JMP_JGT_K: if (2 < 3) return 1 (jump backwards)", + .u.insns_int = { + BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */ + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */ + BPF_LD_IMM64(R1, 2), /* note: this takes 2 insns */ + BPF_JMP_IMM(BPF_JLT, R1, 3, -6), /* goto out */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JLE_K: if (3 <= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JLE, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JNE | BPF_K */ { "JMP_JNE_K: if (3 != 2) return 1", @@ -4752,6 +4948,37 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLT | BPF_X */ + { + "JMP_JSLT_X: Signed jump: if (-2 < -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSLT, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSLT_X: Signed jump: if (-1 < -1) return 0", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSLT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JSGE | BPF_X */ { "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1", @@ -4783,6 +5010,37 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JSLE | BPF_X */ + { + "JMP_JSLE_X: Signed jump: if (-2 <= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSLE, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSLE_X: Signed jump: if (-1 <= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSLE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGT | BPF_X */ { "JMP_JGT_X: if (3 > 2) return 1", @@ -4814,6 +5072,37 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLT | BPF_X */ + { + "JMP_JLT_X: if (2 < 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLT, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JLT_X: Unsigned jump: if (1 < -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, 1), + BPF_JMP_REG(BPF_JLT, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGE | BPF_X */ { "JMP_JGE_X: if (3 >= 2) return 1", @@ -4845,6 +5134,37 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + /* BPF_JMP | BPF_JLE | BPF_X */ + { + "JMP_JLE_X: if (2 <= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JLE_X: if (3 <= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JLE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, { /* Mainly testing JIT + imm64 here. */ "JMP_JGE_X: ldimm64 test 1", @@ -4890,6 +5210,50 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JLE_X: ldimm64 test 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 2), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xeeeeeeeeU } }, + }, + { + "JMP_JLE_X: ldimm64 test 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 0), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffffU } }, + }, + { + "JMP_JLE_X: ldimm64 test 3", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JLE, R2, R1, 4), + BPF_LD_IMM64(R0, 0xffffffffffffffffULL), + BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeULL), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JNE | BPF_X */ { "JMP_JNE_X: if (3 != 2) return 1", diff --git a/net/core/filter.c b/net/core/filter.c index 78d00933dbe7..5afe3ac191ec 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -514,14 +514,27 @@ do_pass: break; } - /* Convert JEQ into JNE when 'jump_true' is next insn. */ - if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { - insn->code = BPF_JMP | BPF_JNE | bpf_src; + /* Convert some jumps when 'jump_true' is next insn. */ + if (fp->jt == 0) { + switch (BPF_OP(fp->code)) { + case BPF_JEQ: + insn->code = BPF_JMP | BPF_JNE | bpf_src; + break; + case BPF_JGT: + insn->code = BPF_JMP | BPF_JLE | bpf_src; + break; + case BPF_JGE: + insn->code = BPF_JMP | BPF_JLT | bpf_src; + break; + default: + goto jmp_rest; + } + target = i + fp->jf + 1; BPF_EMIT_JMP; break; } - +jmp_rest: /* Other jumps are mapped into two insns: Jxx and JA. */ target = i + fp->jt + 1; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 8d9bfcca3fe4..bf3b2e230455 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -30,9 +30,14 @@ #define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_BE BPF_TO_BE +/* jmp encodings */ #define BPF_JNE 0x50 /* jump != */ +#define BPF_JLT 0xa0 /* LT is unsigned, '<' */ +#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ +#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ +#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ #define BPF_CALL 0x80 /* function call */ #define BPF_EXIT 0x90 /* function return */ -- cgit v1.2.3-55-g7522 From 52afc51e94b1c7a52d7e04fd81ea8b1c177436d0 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:39:56 +0200 Subject: bpf, x86: implement jiting of BPF_J{LT,LE,SLT,SLE} This work implements jiting of BPF_J{LT,LE,SLT,SLE} instructions with BPF_X/BPF_K variants for the x86_64 eBPF JIT. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit_comp.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index e1324f280e06..8194696e2805 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -94,7 +94,9 @@ static int bpf_size_to_x86_bytes(int bpf_size) #define X86_JNE 0x75 #define X86_JBE 0x76 #define X86_JA 0x77 +#define X86_JL 0x7C #define X86_JGE 0x7D +#define X86_JLE 0x7E #define X86_JG 0x7F static void bpf_flush_icache(void *start, void *end) @@ -888,9 +890,13 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: /* cmp dst_reg, src_reg */ EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, add_2reg(0xC0, dst_reg, src_reg)); @@ -911,9 +917,13 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: /* cmp dst_reg, imm8/32 */ EMIT1(add_1mod(0x48, dst_reg)); @@ -935,18 +945,34 @@ emit_cond_jmp: /* convert BPF opcode to x86 */ /* GT is unsigned '>', JA in x86 */ jmp_cond = X86_JA; break; + case BPF_JLT: + /* LT is unsigned '<', JB in x86 */ + jmp_cond = X86_JB; + break; case BPF_JGE: /* GE is unsigned '>=', JAE in x86 */ jmp_cond = X86_JAE; break; + case BPF_JLE: + /* LE is unsigned '<=', JBE in x86 */ + jmp_cond = X86_JBE; + break; case BPF_JSGT: /* signed '>', GT in x86 */ jmp_cond = X86_JG; break; + case BPF_JSLT: + /* signed '<', LT in x86 */ + jmp_cond = X86_JL; + break; case BPF_JSGE: /* signed '>=', GE in x86 */ jmp_cond = X86_JGE; break; + case BPF_JSLE: + /* signed '<=', LE in x86 */ + jmp_cond = X86_JLE; + break; default: /* to silence gcc warning */ return -EFAULT; } -- cgit v1.2.3-55-g7522 From c362b2f34e266d062a3fe09e0f400d8f8bdf23c9 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:39:57 +0200 Subject: bpf, arm64: implement jiting of BPF_J{LT, LE, SLT, SLE} This work implements jiting of BPF_J{LT,LE,SLT,SLE} instructions with BPF_X/BPF_K variants for the arm64 eBPF JIT. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/arm64/net/bpf_jit.h | 4 ++++ arch/arm64/net/bpf_jit_comp.c | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h index b02a9268dfbf..783de51a6c4e 100644 --- a/arch/arm64/net/bpf_jit.h +++ b/arch/arm64/net/bpf_jit.h @@ -44,8 +44,12 @@ #define A64_COND_NE AARCH64_INSN_COND_NE /* != */ #define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */ #define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */ +#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */ +#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */ #define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */ #define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */ +#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */ +#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */ #define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2) /* Unconditional branch (immediate) */ diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index f32144b2e07f..ba38d403abb2 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -527,10 +527,14 @@ emit_bswap_uxt: /* IF (dst COND src) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: emit(A64_CMP(1, dst, src), ctx); emit_cond_jmp: jmp_offset = bpf2a64_offset(i + off, i, ctx); @@ -542,9 +546,15 @@ emit_cond_jmp: case BPF_JGT: jmp_cond = A64_COND_HI; break; + case BPF_JLT: + jmp_cond = A64_COND_CC; + break; case BPF_JGE: jmp_cond = A64_COND_CS; break; + case BPF_JLE: + jmp_cond = A64_COND_LS; + break; case BPF_JSET: case BPF_JNE: jmp_cond = A64_COND_NE; @@ -552,9 +562,15 @@ emit_cond_jmp: case BPF_JSGT: jmp_cond = A64_COND_GT; break; + case BPF_JSLT: + jmp_cond = A64_COND_LT; + break; case BPF_JSGE: jmp_cond = A64_COND_GE; break; + case BPF_JSLE: + jmp_cond = A64_COND_LE; + break; default: return -EFAULT; } @@ -566,10 +582,14 @@ emit_cond_jmp: /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: emit_a64_mov_i(1, tmp, imm, ctx); emit(A64_CMP(1, dst, tmp), ctx); goto emit_cond_jmp; -- cgit v1.2.3-55-g7522 From 18423550e3e621eff893fbd9af6b53054c2d3d84 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:39:58 +0200 Subject: bpf, sparc64: implement jiting of BPF_J{LT, LE, SLT, SLE} This work implements jiting of BPF_J{LT,LE,SLT,SLE} instructions with BPF_X/BPF_K variants for the sparc64 eBPF JIT. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- arch/sparc/net/bpf_jit_comp_64.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 8799ae9a8788..c340af7b1371 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -128,6 +128,8 @@ static u32 WDISP10(u32 off) #define BA (BRANCH | CONDA) #define BG (BRANCH | CONDG) +#define BL (BRANCH | CONDL) +#define BLE (BRANCH | CONDLE) #define BGU (BRANCH | CONDGU) #define BLEU (BRANCH | CONDLEU) #define BGE (BRANCH | CONDGE) @@ -715,9 +717,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, case BPF_JGT: br_opcode = BGU; break; + case BPF_JLT: + br_opcode = BLU; + break; case BPF_JGE: br_opcode = BGEU; break; + case BPF_JLE: + br_opcode = BLEU; + break; case BPF_JSET: case BPF_JNE: br_opcode = BNE; @@ -725,9 +733,15 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, case BPF_JSGT: br_opcode = BG; break; + case BPF_JSLT: + br_opcode = BL; + break; case BPF_JSGE: br_opcode = BGE; break; + case BPF_JSLE: + br_opcode = BLE; + break; default: /* Make sure we dont leak kernel information to the * user. @@ -746,18 +760,30 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, case BPF_JGT: cbcond_opcode = CBCONDGU; break; + case BPF_JLT: + cbcond_opcode = CBCONDLU; + break; case BPF_JGE: cbcond_opcode = CBCONDGEU; break; + case BPF_JLE: + cbcond_opcode = CBCONDLEU; + break; case BPF_JNE: cbcond_opcode = CBCONDNE; break; case BPF_JSGT: cbcond_opcode = CBCONDG; break; + case BPF_JSLT: + cbcond_opcode = CBCONDL; + break; case BPF_JSGE: cbcond_opcode = CBCONDGE; break; + case BPF_JSLE: + cbcond_opcode = CBCONDLE; + break; default: /* Make sure we dont leak kernel information to the * user. @@ -1176,10 +1202,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* IF (dst COND src) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: { int err; @@ -1191,10 +1221,14 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* IF (dst COND imm) JUMP off */ case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: case BPF_JMP | BPF_JSET | BPF_K: { int err; -- cgit v1.2.3-55-g7522 From 3b497806f6fed6d4ef83f160af38b6fc8d708662 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:39:59 +0200 Subject: bpf, s390x: implement jiting of BPF_J{LT, LE, SLT, SLE} This work implements jiting of BPF_J{LT,LE,SLT,SLE} instructions with BPF_X/BPF_K variants for the s390x eBPF JIT. Signed-off-by: Daniel Borkmann Acked-by: Michael Holzheu Signed-off-by: David S. Miller --- arch/s390/net/bpf_jit_comp.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 1803797fc885..8ec88497a28d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -1093,15 +1093,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case BPF_JMP | BPF_JSGT | BPF_K: /* ((s64) dst > (s64) imm) */ mask = 0x2000; /* jh */ goto branch_ks; + case BPF_JMP | BPF_JSLT | BPF_K: /* ((s64) dst < (s64) imm) */ + mask = 0x4000; /* jl */ + goto branch_ks; case BPF_JMP | BPF_JSGE | BPF_K: /* ((s64) dst >= (s64) imm) */ mask = 0xa000; /* jhe */ goto branch_ks; + case BPF_JMP | BPF_JSLE | BPF_K: /* ((s64) dst <= (s64) imm) */ + mask = 0xc000; /* jle */ + goto branch_ks; case BPF_JMP | BPF_JGT | BPF_K: /* (dst_reg > imm) */ mask = 0x2000; /* jh */ goto branch_ku; + case BPF_JMP | BPF_JLT | BPF_K: /* (dst_reg < imm) */ + mask = 0x4000; /* jl */ + goto branch_ku; case BPF_JMP | BPF_JGE | BPF_K: /* (dst_reg >= imm) */ mask = 0xa000; /* jhe */ goto branch_ku; + case BPF_JMP | BPF_JLE | BPF_K: /* (dst_reg <= imm) */ + mask = 0xc000; /* jle */ + goto branch_ku; case BPF_JMP | BPF_JNE | BPF_K: /* (dst_reg != imm) */ mask = 0x7000; /* jne */ goto branch_ku; @@ -1119,15 +1131,27 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i case BPF_JMP | BPF_JSGT | BPF_X: /* ((s64) dst > (s64) src) */ mask = 0x2000; /* jh */ goto branch_xs; + case BPF_JMP | BPF_JSLT | BPF_X: /* ((s64) dst < (s64) src) */ + mask = 0x4000; /* jl */ + goto branch_xs; case BPF_JMP | BPF_JSGE | BPF_X: /* ((s64) dst >= (s64) src) */ mask = 0xa000; /* jhe */ goto branch_xs; + case BPF_JMP | BPF_JSLE | BPF_X: /* ((s64) dst <= (s64) src) */ + mask = 0xc000; /* jle */ + goto branch_xs; case BPF_JMP | BPF_JGT | BPF_X: /* (dst > src) */ mask = 0x2000; /* jh */ goto branch_xu; + case BPF_JMP | BPF_JLT | BPF_X: /* (dst < src) */ + mask = 0x4000; /* jl */ + goto branch_xu; case BPF_JMP | BPF_JGE | BPF_X: /* (dst >= src) */ mask = 0xa000; /* jhe */ goto branch_xu; + case BPF_JMP | BPF_JLE | BPF_X: /* (dst <= src) */ + mask = 0xc000; /* jle */ + goto branch_xu; case BPF_JMP | BPF_JNE | BPF_X: /* (dst != src) */ mask = 0x7000; /* jne */ goto branch_xu; -- cgit v1.2.3-55-g7522 From 20dbf5ccbb27850a129695d4590f22a7e5459798 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:40:00 +0200 Subject: bpf, ppc64: implement jiting of BPF_J{LT, LE, SLT, SLE} This work implements jiting of BPF_J{LT,LE,SLT,SLE} instructions with BPF_X/BPF_K variants for the ppc64 eBPF JIT. Signed-off-by: Daniel Borkmann Acked-by: Naveen N. Rao Tested-by: Naveen N. Rao Signed-off-by: David S. Miller --- arch/powerpc/net/bpf_jit.h | 1 + arch/powerpc/net/bpf_jit_comp64.c | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index 30cf03f53428..47fc6660845d 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -263,6 +263,7 @@ static inline bool is_nearbranch(int offset) #define COND_EQ (CR0_EQ | COND_CMP_TRUE) #define COND_NE (CR0_EQ | COND_CMP_FALSE) #define COND_LT (CR0_LT | COND_CMP_TRUE) +#define COND_LE (CR0_GT | COND_CMP_FALSE) #endif diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 861c5af1c9c4..faf20163bd4c 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c @@ -795,12 +795,24 @@ emit_clear: case BPF_JMP | BPF_JSGT | BPF_X: true_cond = COND_GT; goto cond_branch; + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_X: + true_cond = COND_LT; + goto cond_branch; case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGE | BPF_K: case BPF_JMP | BPF_JSGE | BPF_X: true_cond = COND_GE; goto cond_branch; + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_X: + true_cond = COND_LE; + goto cond_branch; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: true_cond = COND_EQ; @@ -817,14 +829,18 @@ emit_clear: cond_branch: switch (code) { case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JNE | BPF_X: /* unsigned comparison */ PPC_CMPLD(dst_reg, src_reg); break; case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: /* signed comparison */ PPC_CMPD(dst_reg, src_reg); break; @@ -834,7 +850,9 @@ cond_branch: case BPF_JMP | BPF_JNE | BPF_K: case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: /* * Need sign-extended load, so only positive * values can be used as imm in cmpldi @@ -849,7 +867,9 @@ cond_branch: } break; case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: /* * signed comparison, so any 16-bit value * can be used in cmpdi -- cgit v1.2.3-55-g7522 From 5dd294d4eb634a5f4c877dc487e83f975aa05c11 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:40:01 +0200 Subject: bpf, nfp: implement jiting of BPF_J{LT,LE} This work implements jiting of BPF_J{LT,LE} instructions with BPF_X/BPF_K variants for the nfp eBPF JIT. The two BPF_J{SLT,SLE} instructions have not been added yet given BPF_J{SGT,SGE} are not supported yet either. Signed-off-by: Daniel Borkmann Acked-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/bpf/jit.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 8e57fda6b8b5..239dfbe8a0a1 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -1238,6 +1238,16 @@ static int jge_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_imm(nfp_prog, meta, BR_BHS, true); } +static int jlt_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BHS, false); +} + +static int jle_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_imm(nfp_prog, meta, BR_BLO, true); +} + static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -1325,6 +1335,16 @@ static int jge_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return wrp_cmp_reg(nfp_prog, meta, BR_BHS, true); } +static int jlt_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BHS, false); +} + +static int jle_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + return wrp_cmp_reg(nfp_prog, meta, BR_BLO, true); +} + static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE); @@ -1383,11 +1403,15 @@ static const instr_cb_t instr_cb[256] = { [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm, [BPF_JMP | BPF_JGT | BPF_K] = jgt_imm, [BPF_JMP | BPF_JGE | BPF_K] = jge_imm, + [BPF_JMP | BPF_JLT | BPF_K] = jlt_imm, + [BPF_JMP | BPF_JLE | BPF_K] = jle_imm, [BPF_JMP | BPF_JSET | BPF_K] = jset_imm, [BPF_JMP | BPF_JNE | BPF_K] = jne_imm, [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg, [BPF_JMP | BPF_JGT | BPF_X] = jgt_reg, [BPF_JMP | BPF_JGE | BPF_X] = jge_reg, + [BPF_JMP | BPF_JLT | BPF_X] = jlt_reg, + [BPF_JMP | BPF_JLE | BPF_X] = jle_reg, [BPF_JMP | BPF_JSET | BPF_X] = jset_reg, [BPF_JMP | BPF_JNE | BPF_X] = jne_reg, [BPF_JMP | BPF_EXIT] = goto_out, -- cgit v1.2.3-55-g7522 From b4e432f1000a171d901e42551459059831925770 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:40:02 +0200 Subject: bpf: enable BPF_J{LT, LE, SLT, SLE} opcodes in verifier Enable the newly added jump opcodes, main parts are in two different areas, namely direct packet access and dynamic map value access. For the direct packet access, we now allow for the following two new patterns to match in order to trigger markings with find_good_pkt_pointers(): Variant 1 (access ok when taking the branch): 0: (61) r2 = *(u32 *)(r1 +76) 1: (61) r3 = *(u32 *)(r1 +80) 2: (bf) r0 = r2 3: (07) r0 += 8 4: (ad) if r0 < r3 goto pc+2 R0=pkt(id=0,off=8,r=0) R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R10=fp 5: (b7) r0 = 0 6: (95) exit from 4 to 7: R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R10=fp 7: (71) r0 = *(u8 *)(r2 +0) 8: (05) goto pc-4 5: (b7) r0 = 0 6: (95) exit processed 11 insns, stack depth 0 Variant 2 (access ok on fall-through): 0: (61) r2 = *(u32 *)(r1 +76) 1: (61) r3 = *(u32 *)(r1 +80) 2: (bf) r0 = r2 3: (07) r0 += 8 4: (bd) if r3 <= r0 goto pc+1 R0=pkt(id=0,off=8,r=8) R1=ctx R2=pkt(id=0,off=0,r=8) R3=pkt_end R10=fp 5: (71) r0 = *(u8 *)(r2 +0) 6: (b7) r0 = 1 7: (95) exit from 4 to 6: R0=pkt(id=0,off=8,r=0) R1=ctx R2=pkt(id=0,off=0,r=0) R3=pkt_end R10=fp 6: (b7) r0 = 1 7: (95) exit processed 10 insns, stack depth 0 The above two basically just swap the branches where we need to handle an exception and allow packet access compared to the two already existing variants for find_good_pkt_pointers(). For the dynamic map value access, we add the new instructions to reg_set_min_max() and reg_set_min_max_inv() in order to learn bounds. Verifier test cases for both are added in a follow-up patch. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: John Fastabend Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 58 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8160a81a40bf..ecc590e01a1d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -312,11 +312,15 @@ static const char *const bpf_jmp_string[16] = { [BPF_JA >> 4] = "jmp", [BPF_JEQ >> 4] = "==", [BPF_JGT >> 4] = ">", + [BPF_JLT >> 4] = "<", [BPF_JGE >> 4] = ">=", + [BPF_JLE >> 4] = "<=", [BPF_JSET >> 4] = "&", [BPF_JNE >> 4] = "!=", [BPF_JSGT >> 4] = "s>", + [BPF_JSLT >> 4] = "s<", [BPF_JSGE >> 4] = "s>=", + [BPF_JSLE >> 4] = "s<=", [BPF_CALL >> 4] = "call", [BPF_EXIT >> 4] = "exit", }; @@ -2383,27 +2387,37 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, */ return; - /* LLVM can generate two kind of checks: + /* LLVM can generate four kind of checks: * - * Type 1: + * Type 1/2: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto * * + * r2 = r3; + * r2 += 8; + * if (r2 < pkt_end) goto + * + * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * - * Type 2: + * Type 3/4: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto * * + * r2 = r3; + * r2 += 8; + * if (pkt_end <= r2) goto + * + * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) @@ -2471,6 +2485,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; + case BPF_JLT: + false_reg->umin_value = max(false_reg->umin_value, val); + true_reg->umax_value = min(true_reg->umax_value, val - 1); + break; + case BPF_JSLT: + false_reg->smin_value = max_t(s64, false_reg->smin_value, val); + true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); + break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); @@ -2479,6 +2501,14 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; + case BPF_JLE: + false_reg->umin_value = max(false_reg->umin_value, val + 1); + true_reg->umax_value = min(true_reg->umax_value, val); + break; + case BPF_JSLE: + false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); + true_reg->smax_value = min_t(s64, true_reg->smax_value, val); + break; default: break; } @@ -2527,6 +2557,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; + case BPF_JLT: + true_reg->umin_value = max(true_reg->umin_value, val + 1); + false_reg->umax_value = min(false_reg->umax_value, val); + break; + case BPF_JSLT: + true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); + false_reg->smax_value = min_t(s64, false_reg->smax_value, val); + break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); @@ -2535,6 +2573,14 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; + case BPF_JLE: + true_reg->umin_value = max(true_reg->umin_value, val); + false_reg->umax_value = min(false_reg->umax_value, val - 1); + break; + case BPF_JSLE: + true_reg->smin_value = max_t(s64, true_reg->smin_value, val); + false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); + break; default: break; } @@ -2659,7 +2705,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, u8 opcode = BPF_OP(insn->code); int err; - if (opcode > BPF_EXIT) { + if (opcode > BPF_JSLE) { verbose("invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } @@ -2761,10 +2807,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, dst_reg->type == PTR_TO_PACKET && regs[insn->src_reg].type == PTR_TO_PACKET_END) { find_good_pkt_pointers(this_branch, dst_reg); + } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && + dst_reg->type == PTR_TO_PACKET && + regs[insn->src_reg].type == PTR_TO_PACKET_END) { + find_good_pkt_pointers(other_branch, dst_reg); } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && dst_reg->type == PTR_TO_PACKET_END && regs[insn->src_reg].type == PTR_TO_PACKET) { find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); + } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && + dst_reg->type == PTR_TO_PACKET_END && + regs[insn->src_reg].type == PTR_TO_PACKET) { + find_good_pkt_pointers(this_branch, ®s[insn->src_reg]); } else if (is_pointer_value(env, insn->dst_reg)) { verbose("R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; -- cgit v1.2.3-55-g7522 From 31e482bf7ecfcae51472b90cd572462f58e6a7bc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 10 Aug 2017 01:40:03 +0200 Subject: bpf: add test cases for new BPF_J{LT, LE, SLT, SLE} instructions Add test cases to the verifier selftest suite in order to verify that i) direct packet access, and ii) dynamic map value access is working with the changes related to the new instructions. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 313 ++++++++++++++++++++++++++++ 1 file changed, 313 insertions(+) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index ab0cd1198326..1b767127e141 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -2830,6 +2830,79 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, }, + { + "direct packet access: test25 (marking on <, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -4), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "direct packet access: test26 (marking on <, bad access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_0, BPF_REG_3, 3), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JA, 0, 0, -3), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "direct packet access: test27 (marking on <=, good access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, + { + "direct packet access: test28 (marking on <=, bad access)", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, -4), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, + }, { "helper access to packet: test1, valid packet_ptr range", .insns = { @@ -4488,6 +4561,246 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, + { + "helper access to map: bounds check using <, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using <, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLT, BPF_REG_3, 32, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = REJECT, + .errstr = "R1 unbounded memory access", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using <=, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using <=, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JLE, BPF_REG_3, 32, 4), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = REJECT, + .errstr = "R1 unbounded memory access", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using s<, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 0, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using s<, good access 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using s<, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLT, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = REJECT, + .errstr = "R1 min value is negative", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using s<=, good access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 0, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using s<=, good access 2", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to map: bounds check using s<=, bad access", + .insns = { + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, 32, 2), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + BPF_JMP_IMM(BPF_JSLE, BPF_REG_3, -3, -3), + BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), + BPF_ST_MEM(BPF_B, BPF_REG_1, 0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .result = REJECT, + .errstr = "R1 min value is negative", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, { "map element value is preserved across register spilling", .insns = { -- cgit v1.2.3-55-g7522 From e1fa6d216dd134c7fca6dc5f0a10553c36503901 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 9 Aug 2017 20:41:47 +0200 Subject: rtnetlink: call rtnl_calcit directly There is only a single place in the kernel that regisers the "calcit" callback (to determine min allocation for dumps). This is in rtnetlink.c for PF_UNSPEC RTM_GETLINK. The function that checks for calcit presence at run time will first check the requested family (which will always fail for !PF_UNSPEC as no callsite registers this), then falls back to checking PF_UNSPEC. Therefore we can just check if type is RTM_GETLINK and then do a direct call. Because of fallback to PF_UNSPEC all RTM_GETLINK types used this regardless of family. This has the advantage that we don't need to allocate space for the function pointer for all the other families. A followup patch will drop the calcit function pointer from the rtnl_link callback structure. Signed-off-by: Florian Westphal Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 29 ++++------------------------- 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9201e3621351..8c9d34deea7d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -62,7 +62,6 @@ struct rtnl_link { rtnl_doit_func doit; rtnl_dumpit_func dumpit; - rtnl_calcit_func calcit; }; static DEFINE_MUTEX(rtnl_mutex); @@ -173,21 +172,6 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) return tab[msgindex].dumpit; } -static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex) -{ - struct rtnl_link *tab; - - if (protocol <= RTNL_FAMILY_MAX) - tab = rtnl_msg_handlers[protocol]; - else - tab = NULL; - - if (tab == NULL || tab[msgindex].calcit == NULL) - tab = rtnl_msg_handlers[PF_UNSPEC]; - - return tab[msgindex].calcit; -} - /** * __rtnl_register - Register a rtnetlink message type * @protocol: Protocol family or PF_UNSPEC @@ -231,9 +215,6 @@ int __rtnl_register(int protocol, int msgtype, if (dumpit) tab[msgindex].dumpit = dumpit; - if (calcit) - tab[msgindex].calcit = calcit; - return 0; } EXPORT_SYMBOL_GPL(__rtnl_register); @@ -277,7 +258,6 @@ int rtnl_unregister(int protocol, int msgtype) rtnl_msg_handlers[protocol][msgindex].doit = NULL; rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; - rtnl_msg_handlers[protocol][msgindex].calcit = NULL; return 0; } @@ -4187,15 +4167,14 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { struct sock *rtnl; rtnl_dumpit_func dumpit; - rtnl_calcit_func calcit; u16 min_dump_alloc = 0; dumpit = rtnl_get_dumpit(family, type); if (dumpit == NULL) return -EOPNOTSUPP; - calcit = rtnl_get_calcit(family, type); - if (calcit) - min_dump_alloc = calcit(skb, nlh); + + if (type == RTM_GETLINK) + min_dump_alloc = rtnl_calcit(skb, nlh); __rtnl_unlock(); rtnl = net->rtnl; @@ -4300,7 +4279,7 @@ void __init rtnetlink_init(void) register_netdevice_notifier(&rtnetlink_dev_notifier); rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, - rtnl_dump_ifinfo, rtnl_calcit); + rtnl_dump_ifinfo, NULL); rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL); rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL); -- cgit v1.2.3-55-g7522 From b97bac64a589d0158cf866e8995e831030f68f4f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 9 Aug 2017 20:41:48 +0200 Subject: rtnetlink: make rtnl_register accept a flags parameter This change allows us to later indicate to rtnetlink core that certain doit functions should be called without acquiring rtnl_mutex. This change should have no effect, we simply replace the last (now unused) calcit argument with the new flag. Signed-off-by: Florian Westphal Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/rtnetlink.h | 5 ++--- net/bridge/br_mdb.c | 6 +++--- net/can/gw.c | 6 +++--- net/core/fib_rules.c | 6 +++--- net/core/neighbour.c | 10 +++++----- net/core/net_namespace.c | 4 ++-- net/core/rtnetlink.c | 36 ++++++++++++++++++------------------ net/dcb/dcbnl.c | 4 ++-- net/decnet/dn_dev.c | 6 +++--- net/decnet/dn_fib.c | 4 ++-- net/decnet/dn_route.c | 4 ++-- net/ipv4/devinet.c | 8 ++++---- net/ipv4/fib_frontend.c | 6 +++--- net/ipv4/ipmr.c | 8 ++++---- net/ipv4/route.c | 2 +- net/ipv6/addrconf.c | 14 +++++++------- net/ipv6/addrlabel.c | 6 +++--- net/ipv6/ip6_fib.c | 2 +- net/ipv6/ip6mr.c | 2 +- net/ipv6/route.c | 6 +++--- net/mpls/af_mpls.c | 8 ++++---- net/phonet/pn_netlink.c | 12 ++++++------ net/qrtr/qrtr.c | 2 +- net/sched/act_api.c | 6 +++--- net/sched/cls_api.c | 6 +++--- net/sched/sch_api.c | 12 ++++++------ 26 files changed, 95 insertions(+), 96 deletions(-) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index abe6b733d473..ac32460a0adb 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -7,12 +7,11 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); -typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *); int __rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func); + rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); void rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func, rtnl_calcit_func); + rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); int rtnl_unregister(int protocol, int msgtype); void rtnl_unregister_all(int protocol); diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index a0b11e7d67d9..ca01def49af0 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -713,9 +713,9 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void br_mdb_init(void) { - rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL); - rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL); - rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL); + rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0); + rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0); + rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0); } void br_mdb_uninit(void) diff --git a/net/can/gw.c b/net/can/gw.c index 29748d844c3f..73a02af4b5d7 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -1031,15 +1031,15 @@ static __init int cgw_module_init(void) notifier.notifier_call = cgw_notifier; register_netdevice_notifier(¬ifier); - if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) { + if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, 0)) { unregister_netdevice_notifier(¬ifier); kmem_cache_destroy(cgw_cache); return -ENOBUFS; } /* Only the first call to __rtnl_register can fail */ - __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, NULL); - __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, NULL); + __rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, 0); + __rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, 0); return 0; } diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index fc0b65093417..9a6d97c1d810 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -1026,9 +1026,9 @@ static struct pernet_operations fib_rules_net_ops = { static int __init fib_rules_init(void) { int err; - rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, 0); err = register_pernet_subsys(&fib_rules_net_ops); if (err < 0) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index d0713627deb6..16a1a4c4eb57 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3261,13 +3261,13 @@ EXPORT_SYMBOL(neigh_sysctl_unregister); static int __init neigh_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, 0); rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info, - NULL); - rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL); + 0); + rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, 0); return 0; } diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 8726d051f31d..a7f06d706aa0 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -855,9 +855,9 @@ static int __init net_ns_init(void) register_pernet_subsys(&net_ns_ops); - rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, - NULL); + 0); return 0; } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8c9d34deea7d..67607c540c03 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -178,7 +178,7 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) * @msgtype: rtnetlink message type * @doit: Function pointer called for each request message * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message - * @calcit: Function pointer to calc size of dump message + * @flags: rtnl_link_flags to modifiy behaviour of doit/dumpit functions * * Registers the specified function pointers (at least one of them has * to be non-NULL) to be called whenever a request message for the @@ -192,7 +192,7 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) */ int __rtnl_register(int protocol, int msgtype, rtnl_doit_func doit, rtnl_dumpit_func dumpit, - rtnl_calcit_func calcit) + unsigned int flags) { struct rtnl_link *tab; int msgindex; @@ -230,9 +230,9 @@ EXPORT_SYMBOL_GPL(__rtnl_register); */ void rtnl_register(int protocol, int msgtype, rtnl_doit_func doit, rtnl_dumpit_func dumpit, - rtnl_calcit_func calcit) + unsigned int flags) { - if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0) + if (__rtnl_register(protocol, msgtype, doit, dumpit, flags) < 0) panic("Unable to register rtnetlink message handler, " "protocol = %d, message type = %d\n", protocol, msgtype); @@ -4279,23 +4279,23 @@ void __init rtnetlink_init(void) register_netdevice_notifier(&rtnetlink_dev_notifier); rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, - rtnl_dump_ifinfo, NULL); - rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL); + rtnl_dump_ifinfo, 0); + rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0); - rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); - rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); - rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, NULL); + rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, 0); + rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, 0); + rtnl_register(PF_UNSPEC, RTM_GETNETCONF, NULL, rtnl_dump_all, 0); - rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL); - rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL); - rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL); + rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, 0); + rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, 0); + rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, 0); - rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL); - rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL); - rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL); + rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, 0); + rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, 0); + rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETSTATS, rtnl_stats_get, rtnl_stats_dump, - NULL); + 0); } diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 733f523707ac..bae7d78aa068 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1938,8 +1938,8 @@ static int __init dcbnl_init(void) { INIT_LIST_HEAD(&dcb_app_list); - rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0); return 0; } diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index fa0110b57ca1..4d339de56862 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -1419,9 +1419,9 @@ void __init dn_dev_init(void) dn_dev_devices_on(); - rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, NULL); - rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL); - rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL); + rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, 0); + rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, 0); + rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, 0); proc_create("decnet_dev", S_IRUGO, init_net.proc_net, &dn_dev_seq_fops); diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c index f9f6fb3f3c5b..3d37464c8b4a 100644 --- a/net/decnet/dn_fib.c +++ b/net/decnet/dn_fib.c @@ -791,8 +791,8 @@ void __init dn_fib_init(void) register_dnaddr_notifier(&dn_fib_dnaddr_notifier); - rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, NULL); - rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, NULL); + rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, 0); + rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, 0); } diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index bcbe548f8854..0bd3afd01dd2 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1922,10 +1922,10 @@ void __init dn_route_init(void) #ifdef CONFIG_DECNET_ROUTER rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, - dn_fib_dump, NULL); + dn_fib_dump, 0); #else rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, - dn_cache_dump, NULL); + dn_cache_dump, 0); #endif } diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 38d9af9b917c..d7adc0616599 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2491,9 +2491,9 @@ void __init devinet_init(void) rtnl_af_register(&inet_af_ops); - rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL); - rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); - rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); + rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0); + rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0); + rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, 0); rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, - inet_netconf_dump_devconf, NULL); + inet_netconf_dump_devconf, 0); } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 2cba559f14df..37819ab4cc74 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -1348,7 +1348,7 @@ void __init ip_fib_init(void) register_netdevice_notifier(&fib_netdev_notifier); register_inetaddr_notifier(&fib_inetaddr_notifier); - rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL); - rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL); - rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL); + rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0); + rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0); + rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, 0); } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 06863ea3fc5b..c9b3e6e069ae 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -3114,14 +3114,14 @@ int __init ip_mr_init(void) } #endif rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, - ipmr_rtm_getroute, ipmr_rtm_dumproute, NULL); + ipmr_rtm_getroute, ipmr_rtm_dumproute, 0); rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE, - ipmr_rtm_route, NULL, NULL); + ipmr_rtm_route, NULL, 0); rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE, - ipmr_rtm_route, NULL, NULL); + ipmr_rtm_route, NULL, 0); rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK, - NULL, ipmr_rtm_dumplink, NULL); + NULL, ipmr_rtm_dumplink, 0); return 0; #ifdef CONFIG_IP_PIMSM_V2 diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0383e66f59bc..2ef46294475f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3067,7 +3067,7 @@ int __init ip_rt_init(void) xfrm_init(); xfrm4_init(); #endif - rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL); + rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, 0); #ifdef CONFIG_SYSCTL register_pernet_subsys(&sysctl_route_ops); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 30ee23eef268..640792e1ecb7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -6605,21 +6605,21 @@ int __init addrconf_init(void) rtnl_af_register(&inet6_ops); err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo, - NULL); + 0); if (err < 0) goto errout; /* Only the first call to __rtnl_register can fail */ - __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL); - __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL); + __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, 0); + __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, 0); __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr, - inet6_dump_ifaddr, NULL); + inet6_dump_ifaddr, 0); __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL, - inet6_dump_ifmcaddr, NULL); + inet6_dump_ifmcaddr, 0); __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, - inet6_dump_ifacaddr, NULL); + inet6_dump_ifacaddr, 0); __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf, - inet6_netconf_dump_devconf, NULL); + inet6_netconf_dump_devconf, 0); ipv6_addr_label_rtnl_register(); diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c index 7a428f65c7ec..cea5eb488013 100644 --- a/net/ipv6/addrlabel.c +++ b/net/ipv6/addrlabel.c @@ -593,10 +593,10 @@ out: void __init ipv6_addr_label_rtnl_register(void) { __rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel, - NULL, NULL); + NULL, 0); __rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel, - NULL, NULL); + NULL, 0); __rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get, - ip6addrlbl_dump, NULL); + ip6addrlbl_dump, 0); } diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 69ed0043d117..8c58c7558de0 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -2038,7 +2038,7 @@ int __init fib6_init(void) goto out_kmem_cache_create; ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib, - NULL); + 0); if (ret) goto out_unregister_subsys; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 7454850f2098..f5500f5444e9 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1427,7 +1427,7 @@ int __init ip6_mr_init(void) } #endif rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, - ip6mr_rtm_dumproute, NULL); + ip6mr_rtm_dumproute, 0); return 0; #ifdef CONFIG_IPV6_PIMSM_V2 add_proto_fail: diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c73e61750642..035762fed07d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4105,9 +4105,9 @@ int __init ip6_route_init(void) goto fib6_rules_init; ret = -ENOBUFS; - if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) || - __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) || - __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL)) + if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) || + __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) || + __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, 0)) goto out_register_late_subsys; ret = register_netdevice_notifier(&ip6_route_dev_notifier); diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index ea4f481839dd..c5b9ce41d66f 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -2479,12 +2479,12 @@ static int __init mpls_init(void) rtnl_af_register(&mpls_af_ops); - rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL); - rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL); + rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, 0); + rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, 0); rtnl_register(PF_MPLS, RTM_GETROUTE, mpls_getroute, mpls_dump_routes, - NULL); + 0); rtnl_register(PF_MPLS, RTM_GETNETCONF, mpls_netconf_get_devconf, - mpls_netconf_dump_devconf, NULL); + mpls_netconf_dump_devconf, 0); err = 0; out: return err; diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c index 45b3af3080d8..da754fc926e7 100644 --- a/net/phonet/pn_netlink.c +++ b/net/phonet/pn_netlink.c @@ -300,15 +300,15 @@ out: int __init phonet_netlink_register(void) { int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, - NULL, NULL); + NULL, 0); if (err) return err; /* Further __rtnl_register() cannot fail */ - __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, NULL); - __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, NULL); - __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, NULL); - __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, NULL); - __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, NULL); + __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, 0); + __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, 0); + __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, 0); + __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, 0); + __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, 0); return 0; } diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 5586609afa27..c2f5c13550c0 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -1081,7 +1081,7 @@ static int __init qrtr_proto_init(void) return rc; } - rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL); + rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0); return 0; } diff --git a/net/sched/act_api.c b/net/sched/act_api.c index a2915d958279..02fcb0c78a28 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1255,10 +1255,10 @@ out_module_put: static int __init tc_action_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, - NULL); + 0); return 0; } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 8d1157aebaf7..ebeeb87e6d44 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1010,10 +1010,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev); static int __init tc_filter_init(void) { - rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, - tc_dump_tfilter, NULL); + tc_dump_tfilter, 0); return 0; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index bd24a550e0f9..816c8092e601 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1952,14 +1952,14 @@ static int __init pktsched_init(void) register_qdisc(&mq_qdisc_ops); register_qdisc(&noqueue_qdisc_ops); - rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL); + rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, - NULL); - rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL); - rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL); + 0); + rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0); rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, - NULL); + 0); return 0; } -- cgit v1.2.3-55-g7522 From 019a316992ee0d9832b1c480c899d6bdf2a0a77e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 9 Aug 2017 20:41:49 +0200 Subject: rtnetlink: add reference counting to prevent module unload while dump is in progress I don't see what prevents rmmod (unregister_all is called) while a dump is active. Even if we'd add rtnl lock/unlock pair to unregister_all (as done here), thats not enough either as rtnl_lock is released right before the dump process starts. So this adds a refcount: * acquire rtnl mutex * bump refcount * release mutex * start the dump ... and make unregister_all remove the callbacks (no new dumps possible) and then wait until refcount is 0. Signed-off-by: Florian Westphal Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 67607c540c03..c45a7c5e3232 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -127,6 +127,7 @@ EXPORT_SYMBOL(lockdep_rtnl_is_held); #endif /* #ifdef CONFIG_PROVE_LOCKING */ static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; +static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1]; static inline int rtm_msgindex(int msgtype) { @@ -272,10 +273,18 @@ EXPORT_SYMBOL_GPL(rtnl_unregister); */ void rtnl_unregister_all(int protocol) { + struct rtnl_link *handlers; + BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); - kfree(rtnl_msg_handlers[protocol]); + rtnl_lock(); + handlers = rtnl_msg_handlers[protocol]; rtnl_msg_handlers[protocol] = NULL; + rtnl_unlock(); + + while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 0) + schedule(); + kfree(handlers); } EXPORT_SYMBOL_GPL(rtnl_unregister_all); @@ -4173,6 +4182,8 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, if (dumpit == NULL) return -EOPNOTSUPP; + refcount_inc(&rtnl_msg_handlers_ref[family]); + if (type == RTM_GETLINK) min_dump_alloc = rtnl_calcit(skb, nlh); @@ -4186,6 +4197,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, err = netlink_dump_start(rtnl, skb, nlh, &c); } rtnl_lock(); + refcount_dec(&rtnl_msg_handlers_ref[family]); return err; } -- cgit v1.2.3-55-g7522 From 0cc09020aefe832db23d63cd3a6c889ab7645eec Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 9 Aug 2017 20:41:50 +0200 Subject: rtnetlink: small rtnl lock pushdown instead of rtnl lock/unload at the top level, push it down to the called function. This is just an intermediate step, next commit switches protection of the rtnl_link ops table to rcu, in which case (for dumps) the rtnl lock is acquired only by the netlink dumper infrastructure (current lock/unlock/dump/lock/unlock rtnl sequence becomes rcu lock/rcu unlock/dump). Signed-off-by: Florian Westphal Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index c45a7c5e3232..be01d8e48661 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4178,9 +4178,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, rtnl_dumpit_func dumpit; u16 min_dump_alloc = 0; + rtnl_lock(); + dumpit = rtnl_get_dumpit(family, type); if (dumpit == NULL) - return -EOPNOTSUPP; + goto err_unlock; refcount_inc(&rtnl_msg_handlers_ref[family]); @@ -4196,23 +4198,28 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, }; err = netlink_dump_start(rtnl, skb, nlh, &c); } - rtnl_lock(); refcount_dec(&rtnl_msg_handlers_ref[family]); return err; } + rtnl_lock(); doit = rtnl_get_doit(family, type); if (doit == NULL) - return -EOPNOTSUPP; + goto err_unlock; - return doit(skb, nlh, extack); + err = doit(skb, nlh, extack); + rtnl_unlock(); + + return err; + +err_unlock: + rtnl_unlock(); + return -EOPNOTSUPP; } static void rtnetlink_rcv(struct sk_buff *skb) { - rtnl_lock(); netlink_rcv_skb(skb, &rtnetlink_rcv_msg); - rtnl_unlock(); } static int rtnetlink_bind(struct net *net, int group) -- cgit v1.2.3-55-g7522 From 6853dd488119a42d01ad994060dc10b95c687c76 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 9 Aug 2017 20:41:51 +0200 Subject: rtnetlink: protect handler table with rcu Note that netlink dumps still acquire rtnl mutex via the netlink dump infrastructure. Signed-off-by: Florian Westphal Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 121 +++++++++++++++++++++++++++------------------------ 1 file changed, 65 insertions(+), 56 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index be01d8e48661..d45946177bc8 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -126,7 +126,7 @@ bool lockdep_rtnl_is_held(void) EXPORT_SYMBOL(lockdep_rtnl_is_held); #endif /* #ifdef CONFIG_PROVE_LOCKING */ -static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; +static struct rtnl_link __rcu *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1]; static refcount_t rtnl_msg_handlers_ref[RTNL_FAMILY_MAX + 1]; static inline int rtm_msgindex(int msgtype) @@ -143,36 +143,6 @@ static inline int rtm_msgindex(int msgtype) return msgindex; } -static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex) -{ - struct rtnl_link *tab; - - if (protocol <= RTNL_FAMILY_MAX) - tab = rtnl_msg_handlers[protocol]; - else - tab = NULL; - - if (tab == NULL || tab[msgindex].doit == NULL) - tab = rtnl_msg_handlers[PF_UNSPEC]; - - return tab[msgindex].doit; -} - -static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex) -{ - struct rtnl_link *tab; - - if (protocol <= RTNL_FAMILY_MAX) - tab = rtnl_msg_handlers[protocol]; - else - tab = NULL; - - if (tab == NULL || tab[msgindex].dumpit == NULL) - tab = rtnl_msg_handlers[PF_UNSPEC]; - - return tab[msgindex].dumpit; -} - /** * __rtnl_register - Register a rtnetlink message type * @protocol: Protocol family or PF_UNSPEC @@ -201,18 +171,17 @@ int __rtnl_register(int protocol, int msgtype, BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); msgindex = rtm_msgindex(msgtype); - tab = rtnl_msg_handlers[protocol]; + tab = rcu_dereference(rtnl_msg_handlers[protocol]); if (tab == NULL) { tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL); if (tab == NULL) return -ENOBUFS; - rtnl_msg_handlers[protocol] = tab; + rcu_assign_pointer(rtnl_msg_handlers[protocol], tab); } if (doit) tab[msgindex].doit = doit; - if (dumpit) tab[msgindex].dumpit = dumpit; @@ -249,16 +218,22 @@ EXPORT_SYMBOL_GPL(rtnl_register); */ int rtnl_unregister(int protocol, int msgtype) { + struct rtnl_link *handlers; int msgindex; BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); msgindex = rtm_msgindex(msgtype); - if (rtnl_msg_handlers[protocol] == NULL) + rtnl_lock(); + handlers = rtnl_dereference(rtnl_msg_handlers[protocol]); + if (!handlers) { + rtnl_unlock(); return -ENOENT; + } - rtnl_msg_handlers[protocol][msgindex].doit = NULL; - rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; + handlers[msgindex].doit = NULL; + handlers[msgindex].dumpit = NULL; + rtnl_unlock(); return 0; } @@ -278,10 +253,12 @@ void rtnl_unregister_all(int protocol) BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); rtnl_lock(); - handlers = rtnl_msg_handlers[protocol]; - rtnl_msg_handlers[protocol] = NULL; + handlers = rtnl_dereference(rtnl_msg_handlers[protocol]); + RCU_INIT_POINTER(rtnl_msg_handlers[protocol], NULL); rtnl_unlock(); + synchronize_net(); + while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 0) schedule(); kfree(handlers); @@ -2820,11 +2797,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) * traverse the list of net devices and compute the minimum * buffer size based upon the filter mask. */ - list_for_each_entry(dev, &net->dev_base_head, dev_list) { + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size, if_nlmsg_size(dev, ext_filter_mask)); } + rcu_read_unlock(); return nlmsg_total_size(min_ifinfo_dump_size); } @@ -2836,19 +2815,29 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) if (s_idx == 0) s_idx = 1; + for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) { int type = cb->nlh->nlmsg_type-RTM_BASE; + struct rtnl_link *handlers; + rtnl_dumpit_func dumpit; + if (idx < s_idx || idx == PF_PACKET) continue; - if (rtnl_msg_handlers[idx] == NULL || - rtnl_msg_handlers[idx][type].dumpit == NULL) + + handlers = rtnl_dereference(rtnl_msg_handlers[idx]); + if (!handlers) continue; + + dumpit = READ_ONCE(handlers[type].dumpit); + if (!dumpit) + continue; + if (idx > s_idx) { memset(&cb->args[0], 0, sizeof(cb->args)); cb->prev_seq = 0; cb->seq = 0; } - if (rtnl_msg_handlers[idx][type].dumpit(skb, cb)) + if (dumpit(skb, cb)) break; } cb->family = idx; @@ -4151,11 +4140,12 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); + struct rtnl_link *handlers; + int err = -EOPNOTSUPP; rtnl_doit_func doit; int kind; int family; int type; - int err; type = nlh->nlmsg_type; if (type > RTM_MAX) @@ -4173,23 +4163,40 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN)) return -EPERM; + if (family > ARRAY_SIZE(rtnl_msg_handlers)) + family = PF_UNSPEC; + + rcu_read_lock(); + handlers = rcu_dereference(rtnl_msg_handlers[family]); + if (!handlers) { + family = PF_UNSPEC; + handlers = rcu_dereference(rtnl_msg_handlers[family]); + } + if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { struct sock *rtnl; rtnl_dumpit_func dumpit; u16 min_dump_alloc = 0; - rtnl_lock(); + dumpit = READ_ONCE(handlers[type].dumpit); + if (!dumpit) { + family = PF_UNSPEC; + handlers = rcu_dereference(rtnl_msg_handlers[PF_UNSPEC]); + if (!handlers) + goto err_unlock; - dumpit = rtnl_get_dumpit(family, type); - if (dumpit == NULL) - goto err_unlock; + dumpit = READ_ONCE(handlers[type].dumpit); + if (!dumpit) + goto err_unlock; + } refcount_inc(&rtnl_msg_handlers_ref[family]); if (type == RTM_GETLINK) min_dump_alloc = rtnl_calcit(skb, nlh); - __rtnl_unlock(); + rcu_read_unlock(); + rtnl = net->rtnl; { struct netlink_dump_control c = { @@ -4202,18 +4209,20 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } - rtnl_lock(); - doit = rtnl_get_doit(family, type); - if (doit == NULL) - goto err_unlock; + rcu_read_unlock(); - err = doit(skb, nlh, extack); + rtnl_lock(); + handlers = rtnl_dereference(rtnl_msg_handlers[family]); + if (handlers) { + doit = READ_ONCE(handlers[type].doit); + if (doit) + err = doit(skb, nlh, extack); + } rtnl_unlock(); - return err; err_unlock: - rtnl_unlock(); + rcu_read_unlock(); return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From 62256f98f244fbb1c7a10465e1ee412f209d8978 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 9 Aug 2017 20:41:52 +0200 Subject: rtnetlink: add RTNL_FLAG_DOIT_UNLOCKED Allow callers to tell rtnetlink core that its doit callback should be invoked without holding rtnl mutex. Signed-off-by: Florian Westphal Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- include/net/rtnetlink.h | 4 ++++ net/core/rtnetlink.c | 15 +++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index ac32460a0adb..21837ca68ecc 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -8,6 +8,10 @@ typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, struct netlink_ext_ack *); typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); +enum rtnl_link_flags { + RTNL_FLAG_DOIT_UNLOCKED = 1, +}; + int __rtnl_register(int protocol, int msgtype, rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); void rtnl_register(int protocol, int msgtype, diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d45946177bc8..dd4e50dfa248 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -62,6 +62,7 @@ struct rtnl_link { rtnl_doit_func doit; rtnl_dumpit_func dumpit; + unsigned int flags; }; static DEFINE_MUTEX(rtnl_mutex); @@ -184,6 +185,7 @@ int __rtnl_register(int protocol, int msgtype, tab[msgindex].doit = doit; if (dumpit) tab[msgindex].dumpit = dumpit; + tab[msgindex].flags |= flags; return 0; } @@ -233,6 +235,7 @@ int rtnl_unregister(int protocol, int msgtype) handlers[msgindex].doit = NULL; handlers[msgindex].dumpit = NULL; + handlers[msgindex].flags = 0; rtnl_unlock(); return 0; @@ -4143,6 +4146,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct rtnl_link *handlers; int err = -EOPNOTSUPP; rtnl_doit_func doit; + unsigned int flags; int kind; int family; int type; @@ -4209,6 +4213,17 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } + flags = READ_ONCE(handlers[type].flags); + if (flags & RTNL_FLAG_DOIT_UNLOCKED) { + refcount_inc(&rtnl_msg_handlers_ref[family]); + doit = READ_ONCE(handlers[type].doit); + rcu_read_unlock(); + if (doit) + err = doit(skb, nlh, extack); + refcount_dec(&rtnl_msg_handlers_ref[family]); + return err; + } + rcu_read_unlock(); rtnl_lock(); -- cgit v1.2.3-55-g7522 From 165b9117256c5432bf241fdf4637487e4af68cc8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 9 Aug 2017 20:41:53 +0200 Subject: net: call newid/getid without rtnl mutex held Both functions take nsid_lock and don't rely on rtnl lock. Signed-off-by: Florian Westphal Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/core/net_namespace.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index a7f06d706aa0..6cfdc7c84c48 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -855,9 +855,10 @@ static int __init net_ns_init(void) register_pernet_subsys(&net_ns_ops); - rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, 0); + rtnl_register(PF_UNSPEC, RTM_NEWNSID, rtnl_net_newid, NULL, + RTNL_FLAG_DOIT_UNLOCKED); rtnl_register(PF_UNSPEC, RTM_GETNSID, rtnl_net_getid, rtnl_net_dumpid, - 0); + RTNL_FLAG_DOIT_UNLOCKED); return 0; } -- cgit v1.2.3-55-g7522 From 7c1885ae9aba0d6b3f854dfee099a3c1c796755f Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Tue, 8 Aug 2017 14:28:45 -0500 Subject: ibmvnic: Clean up resources on probe failure Ensure that any resources allocated during probe are released if the probe of the driver fails. Signed-off-by: Nathan Fontenot Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 32c116652755..cf3cf921eb35 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3841,31 +3841,35 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) do { rc = ibmvnic_init(adapter); - if (rc && rc != EAGAIN) { - free_netdev(netdev); - return rc; - } + if (rc && rc != EAGAIN) + goto ibmvnic_init_fail; } while (rc == EAGAIN); netdev->mtu = adapter->req_mtu - ETH_HLEN; rc = device_create_file(&dev->dev, &dev_attr_failover); - if (rc) { - free_netdev(netdev); - return rc; - } + if (rc) + goto ibmvnic_init_fail; rc = register_netdev(netdev); if (rc) { dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc); - device_remove_file(&dev->dev, &dev_attr_failover); - free_netdev(netdev); - return rc; + goto ibmvnic_register_fail; } dev_info(&dev->dev, "ibmvnic registered\n"); adapter->state = VNIC_PROBED; return 0; + +ibmvnic_register_fail: + device_remove_file(&dev->dev, &dev_attr_failover); + +ibmvnic_init_fail: + release_sub_crqs(adapter); + release_crq_queue(adapter); + free_netdev(netdev); + + return rc; } static int ibmvnic_remove(struct vio_dev *dev) -- cgit v1.2.3-55-g7522 From d1cf33d93166f146484659448bda54f1f651379b Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Tue, 8 Aug 2017 15:24:05 -0500 Subject: ibmvnic: Add netdev_dbg output for debugging To ease debugging of the ibmvnic driver add a series of netdev_dbg() statements to track driver status, especially during initialization, removal, and resetting of the driver. Signed-off-by: Nathan Fontenot Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 62 +++++++++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cf3cf921eb35..6b7d6b8aeb7a 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -399,6 +399,7 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) } adapter->stats_token = stok; + netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok); return 0; } @@ -412,6 +413,8 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i]; + netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i); + rc = reset_long_term_buff(adapter, &rx_pool->long_term_buff); if (rc) return rc; @@ -444,6 +447,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) for (i = 0; i < rx_scrqs; i++) { rx_pool = &adapter->rx_pool[i]; + netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); + kfree(rx_pool->free_map); free_long_term_buff(adapter, &rx_pool->long_term_buff); @@ -490,7 +495,7 @@ static int init_rx_pools(struct net_device *netdev) rx_pool = &adapter->rx_pool[i]; netdev_dbg(adapter->netdev, - "Initializing rx_pool %d, %lld buffs, %lld bytes each\n", + "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", i, adapter->req_rx_add_entries_per_subcrq, be64_to_cpu(size_array[i])); @@ -540,6 +545,8 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { + netdev_dbg(adapter->netdev, "Re-setting tx_pool[%d]\n", i); + tx_pool = &adapter->tx_pool[i]; rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); @@ -570,6 +577,7 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); for (i = 0; i < tx_scrqs; i++) { + netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i); tx_pool = &adapter->tx_pool[i]; kfree(tx_pool->tx_buff); free_long_term_buff(adapter, &tx_pool->long_term_buff); @@ -596,6 +604,11 @@ static int init_tx_pools(struct net_device *netdev) for (i = 0; i < tx_subcrqs; i++) { tx_pool = &adapter->tx_pool[i]; + + netdev_dbg(adapter->netdev, + "Initializing tx_pool[%d], %lld buffs\n", + i, adapter->req_tx_entries_per_subcrq); + tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq, sizeof(struct ibmvnic_tx_buff), GFP_KERNEL); @@ -666,8 +679,10 @@ static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter) if (!adapter->napi_enabled) return; - for (i = 0; i < adapter->req_rx_queues; i++) + for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i); napi_disable(&adapter->napi[i]); + } adapter->napi_enabled = false; } @@ -730,8 +745,11 @@ static void release_resources(struct ibmvnic_adapter *adapter) if (adapter->napi) { for (i = 0; i < adapter->req_rx_queues; i++) { - if (&adapter->napi[i]) + if (&adapter->napi[i]) { + netdev_dbg(adapter->netdev, + "Releasing napi[%d]\n", i); netif_napi_del(&adapter->napi[i]); + } } } } @@ -744,7 +762,8 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) bool resend; int rc; - netdev_err(netdev, "setting link state %d\n", link_state); + netdev_dbg(netdev, "setting link state %d\n", link_state); + memset(&crq, 0, sizeof(crq)); crq.logical_link_state.first = IBMVNIC_CRQ_CMD; crq.logical_link_state.cmd = LOGICAL_LINK_STATE; @@ -781,6 +800,9 @@ static int set_real_num_queues(struct net_device *netdev) struct ibmvnic_adapter *adapter = netdev_priv(netdev); int rc; + netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n", + adapter->req_tx_queues, adapter->req_rx_queues); + rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues); if (rc) { netdev_err(netdev, "failed to set the number of tx queues\n"); @@ -818,6 +840,7 @@ static int init_resources(struct ibmvnic_adapter *adapter) return -ENOMEM; for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(netdev, "Adding napi[%d]\n", i); netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll, NAPI_POLL_WEIGHT); } @@ -846,6 +869,7 @@ static int __ibmvnic_open(struct net_device *netdev) * set the logical link state to up */ for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->rx_scrq[i]->irq); else @@ -853,6 +877,7 @@ static int __ibmvnic_open(struct net_device *netdev) } for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i); if (prev_state == VNIC_CLOSED) enable_irq(adapter->tx_scrq[i]->irq); else @@ -926,6 +951,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter) if (!tx_pool) continue; + netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i); for (j = 0; j < tx_entries; j++) { if (tx_pool->tx_buff[j].skb) { dev_kfree_skb_any(tx_pool->tx_buff[j].skb); @@ -953,8 +979,11 @@ static int __ibmvnic_close(struct net_device *netdev) if (adapter->tx_scrq) { for (i = 0; i < adapter->req_tx_queues; i++) - if (adapter->tx_scrq[i]->irq) + if (adapter->tx_scrq[i]->irq) { + netdev_dbg(adapter->netdev, + "Disabling tx_scrq[%d] irq\n", i); disable_irq(adapter->tx_scrq[i]->irq); + } } rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN); @@ -973,8 +1002,11 @@ static int __ibmvnic_close(struct net_device *netdev) break; } - if (adapter->rx_scrq[i]->irq) + if (adapter->rx_scrq[i]->irq) { + netdev_dbg(adapter->netdev, + "Disabling rx_scrq[%d] irq\n", i); disable_irq(adapter->rx_scrq[i]->irq); + } } } @@ -1367,6 +1399,9 @@ static int do_reset(struct ibmvnic_adapter *adapter, struct net_device *netdev = adapter->netdev; int i, rc; + netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n", + rwi->reset_reason); + netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; @@ -1491,6 +1526,7 @@ static void __ibmvnic_reset(struct work_struct *work) } if (rc) { + netdev_dbg(adapter->netdev, "Reset failed\n"); free_all_rwi(adapter); mutex_unlock(&adapter->reset_lock); return; @@ -1524,7 +1560,7 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, list_for_each(entry, &adapter->rwi_list) { tmp = list_entry(entry, struct ibmvnic_rwi, list); if (tmp->reset_reason == reason) { - netdev_err(netdev, "Matching reset found, skipping\n"); + netdev_dbg(netdev, "Skipping matching reset\n"); mutex_unlock(&adapter->rwi_lock); return; } @@ -1540,6 +1576,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter, rwi->reset_reason = reason; list_add_tail(&rwi->list, &adapter->rwi_list); mutex_unlock(&adapter->rwi_lock); + + netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason); schedule_work(&adapter->ibmvnic_reset); } @@ -1900,12 +1938,14 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter) int i, rc; for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i); rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]); if (rc) return rc; } for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i); rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]); if (rc) return rc; @@ -2009,6 +2049,8 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) if (!adapter->tx_scrq[i]) continue; + netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", + i); if (adapter->tx_scrq[i]->irq) { free_irq(adapter->tx_scrq[i]->irq, adapter->tx_scrq[i]); @@ -2028,6 +2070,8 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) if (!adapter->rx_scrq[i]) continue; + netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n", + i); if (adapter->rx_scrq[i]->irq) { free_irq(adapter->rx_scrq[i]->irq, adapter->rx_scrq[i]); @@ -2182,6 +2226,8 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) int rc = 0; for (i = 0; i < adapter->req_tx_queues; i++) { + netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n", + i); scrq = adapter->tx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); @@ -2203,6 +2249,8 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter) } for (i = 0; i < adapter->req_rx_queues; i++) { + netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n", + i); scrq = adapter->rx_scrq[i]; scrq->irq = irq_create_mapping(NULL, scrq->hw_irq); if (!scrq->irq) { -- cgit v1.2.3-55-g7522 From 16587c210cc58c2571ee3905a1c1213ca2642506 Mon Sep 17 00:00:00 2001 From: Nathan Fontenot Date: Tue, 8 Aug 2017 15:26:18 -0500 Subject: ibmvnic: Correct 'unused variable' warning in build. Commit a248878d7a1d ("ibmvnic: Check for transport event on driver resume") removed the loop to kick irqs on driver resume but didn't remove the now unused loop variable 'i'. Signed-off-by: Nathan Fontenot Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6b7d6b8aeb7a..5ac873173b2e 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4013,7 +4013,6 @@ static int ibmvnic_resume(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct ibmvnic_adapter *adapter = netdev_priv(netdev); - int i; if (adapter->state != VNIC_OPEN) return 0; -- cgit v1.2.3-55-g7522 From 6eb7939371104e4ff78261c165c6f33ff6d90031 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 8 Aug 2017 15:51:02 -0600 Subject: net: ipv6: lower ndisc notifier priority below addrconf ndisc_notify is used to send unsolicited neighbor advertisements (e.g., on a link up). Currently, the ndisc notifier is run before the addrconf notifer which means NA's are not sent for link-local addresses which are added by the addrconf notifier. Fix by lowering the priority of the ndisc notifier. Setting the priority to ADDRCONF_NOTIFY_PRIORITY - 5 means it runs after addrconf and before the route notifier which is ADDRCONF_NOTIFY_PRIORITY - 10. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/ndisc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 0327c1f2e6fc..5e338eb89509 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1779,6 +1779,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, static struct notifier_block ndisc_netdev_notifier = { .notifier_call = ndisc_netdev_event, + .priority = ADDRCONF_NOTIFY_PRIORITY - 5, }; #ifdef CONFIG_SYSCTL -- cgit v1.2.3-55-g7522 From 42013e9038225aeaed98abf7ac4973c2a6f4ffc6 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Tue, 8 Aug 2017 19:34:28 -0700 Subject: liquidio: napi cleanup Disable napi when interface is going down. Delete napi when destroying the interface. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 15 +++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 14 ++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 3ec0dd9b7201..cbd6287e578e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1736,6 +1736,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) oct->droq[0]->ops.poll_mode = 0; } + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -2770,6 +2774,17 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + if (oct->props[lio->ifidx].napi_enabled) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } ifstate_reset(lio, LIO_IFSTATE_RUNNING); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 935ff299cdd9..c6f52f235647 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1137,6 +1137,10 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) oct->droq[0]->ops.poll_mode = 0; } + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) unregister_netdev(netdev); @@ -1784,6 +1788,16 @@ static int liquidio_stop(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + if (oct->props[lio->ifidx].napi_enabled) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + oct->droq[0]->ops.poll_mode = 0; + } netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); /* Inform that netif carrier is down */ -- cgit v1.2.3-55-g7522 From d78d6776bc958b16b9a8883278f36d62e3145409 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Wed, 9 Aug 2017 10:34:15 +0530 Subject: net: dsa: make dsa_switch_ops const Make these structures const as they are only stored in the ops field of a dsa_switch structure, which is const. Done using Coccinelle. Signed-off-by: Bhumika Goyal Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/dsa_loop.c | 2 +- drivers/net/dsa/lan9303-core.c | 2 +- drivers/net/dsa/mt7530.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index 76d66604a34e..7819a9fe8321 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -257,7 +257,7 @@ static int dsa_loop_port_vlan_del(struct dsa_switch *ds, int port, return 0; } -static struct dsa_switch_ops dsa_loop_driver = { +static const struct dsa_switch_ops dsa_loop_driver = { .get_tag_protocol = dsa_loop_get_protocol, .setup = dsa_loop_setup, .get_strings = dsa_loop_get_strings, diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 46fc1d5d3c9e..b471413d3df9 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -797,7 +797,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port, } } -static struct dsa_switch_ops lan9303_switch_ops = { +static const struct dsa_switch_ops lan9303_switch_ops = { .get_tag_protocol = lan9303_get_tag_protocol, .setup = lan9303_setup, .get_strings = lan9303_get_strings, diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 8faa796a115f..c142b97add2c 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1017,7 +1017,7 @@ mt7530_setup(struct dsa_switch *ds) return 0; } -static struct dsa_switch_ops mt7530_switch_ops = { +static const struct dsa_switch_ops mt7530_switch_ops = { .get_tag_protocol = mtk_get_tag_protocol, .setup = mt7530_setup, .get_strings = mt7530_get_strings, -- cgit v1.2.3-55-g7522 From 46c4b7a5694ccfdc2659fc8beb8736d7fb1c9841 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Wed, 9 Aug 2017 14:49:15 +0530 Subject: atm: make atmdev_ops const Make these structures const as they are either passed to the function atm_dev_register having the corresponding argument as const or stored in the ops field of a atm_dev structure, which is also const. Done using Coccinelle. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/atm/adummy.c | 2 +- drivers/atm/atmtcp.c | 2 +- drivers/atm/he.c | 2 +- drivers/atm/idt77252.c | 2 +- drivers/atm/nicstar.c | 2 +- drivers/atm/solos-pci.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index da27ddfa75a7..8d98130ecd40 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -130,7 +130,7 @@ adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page) return 0; } -static struct atmdev_ops adummy_ops = +static const struct atmdev_ops adummy_ops = { .open = adummy_open, .close = adummy_close, diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index 56fa16c85ebf..afebeb1c3e1e 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -342,7 +342,7 @@ static struct atmdev_ops atmtcp_v_dev_ops = { */ -static struct atmdev_ops atmtcp_c_dev_ops = { +static const struct atmdev_ops atmtcp_c_dev_ops = { .close = atmtcp_c_close, .send = atmtcp_c_send }; diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 8f6156d475d1..e58538c29377 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -161,7 +161,7 @@ static unsigned int clocktab[] = { CLK_LOW }; -static struct atmdev_ops he_ops = +static const struct atmdev_ops he_ops = { .open = he_open, .close = he_close, diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index b7a168c46692..47f3c4ae0594 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -134,7 +134,7 @@ static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos, static void idt77252_softint(struct work_struct *work); -static struct atmdev_ops idt77252_ops = +static const struct atmdev_ops idt77252_ops = { .dev_close = idt77252_dev_close, .open = idt77252_open, diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 9588d80f318e..a9702836cbae 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -154,7 +154,7 @@ static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; -static struct atmdev_ops atm_ops = { +static const struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 8754793223cd..0df1a1c80b00 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1187,7 +1187,7 @@ static int psend(struct atm_vcc *vcc, struct sk_buff *skb) return 0; } -static struct atmdev_ops fpga_ops = { +static const struct atmdev_ops fpga_ops = { .open = popen, .close = pclose, .ioctl = NULL, -- cgit v1.2.3-55-g7522 From 800bb47e71cac00e98c822919174bb2e1d2e8071 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Wed, 9 Aug 2017 15:02:08 +0530 Subject: net: atm: make atmdev_ops const Make these const as they are only stored in the ops field of a atm_dev structure, which is const. Done using Coccinelle. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- net/atm/clip.c | 2 +- net/atm/lec.c | 2 +- net/atm/mpc.c | 2 +- net/atm/signaling.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/net/atm/clip.c b/net/atm/clip.c index f271a7bcf5b2..65f706e4344c 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -617,7 +617,7 @@ static void atmarpd_close(struct atm_vcc *vcc) module_put(THIS_MODULE); } -static struct atmdev_ops atmarpd_dev_ops = { +static const struct atmdev_ops atmarpd_dev_ops = { .close = atmarpd_close }; diff --git a/net/atm/lec.c b/net/atm/lec.c index 093fe8707731..a3d93a1bb133 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -486,7 +486,7 @@ static void lec_atm_close(struct atm_vcc *vcc) module_put(THIS_MODULE); } -static struct atmdev_ops lecdev_ops = { +static const struct atmdev_ops lecdev_ops = { .close = lec_atm_close, .send = lec_atm_send }; diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 680a4b9095a1..5677147209e8 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -779,7 +779,7 @@ static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb) netif_rx(new_skb); } -static struct atmdev_ops mpc_ops = { /* only send is required */ +static const struct atmdev_ops mpc_ops = { /* only send is required */ .close = mpoad_close, .send = msg_from_mpoad }; diff --git a/net/atm/signaling.c b/net/atm/signaling.c index 983c3a21a133..0a20f6e953ac 100644 --- a/net/atm/signaling.c +++ b/net/atm/signaling.c @@ -217,7 +217,7 @@ static void sigd_close(struct atm_vcc *vcc) read_unlock(&vcc_sklist_lock); } -static struct atmdev_ops sigd_dev_ops = { +static const struct atmdev_ops sigd_dev_ops = { .close = sigd_close, .send = sigd_send }; -- cgit v1.2.3-55-g7522 From 0c07ce7f1a4c2f64bc9c8a67397010772a78647a Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 9 Aug 2017 12:09:31 +0200 Subject: net-next: mediatek: fix typos inside the header file Trivial patch fixing 2 typos. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 4594862e5a9b..940517af8039 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -599,8 +599,8 @@ struct mtk_soc_data { * @pctl: The register map pointing at the range used to setup * GMAC port drive/slew values * @dma_refcnt: track how many netdevs are using the DMA engine - * @tx_ring: Pointer to the memore holding info about the TX ring - * @rx_ring: Pointer to the memore holding info about the RX ring + * @tx_ring: Pointer to the memory holding info about the TX ring + * @rx_ring: Pointer to the memory holding info about the RX ring * @tx_napi: The TX NAPI struct * @rx_napi: The RX NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring -- cgit v1.2.3-55-g7522 From 6427dc1da51dfc47d65caf86e45e8338d1a3905c Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 9 Aug 2017 12:09:32 +0200 Subject: net-next: mediatek: bring up QDMA RX ring 0 This patch is in preparation for adding HW flow and QoS offloading. For those features to work, the driver needs to bring up the first QDMA RX ring. This ring is used by the PPE offloading HW. Signed-off-by: John Crisp in Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 36 +++++++++++++++++++++-------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 3 +++ 2 files changed, 29 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index acf2b3b8009c..5e81a7263654 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1285,9 +1285,19 @@ static void mtk_tx_clean(struct mtk_eth *eth) static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) { - struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; + struct mtk_rx_ring *ring; int rx_data_len, rx_dma_size; int i; + u32 offset = 0; + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + if (ring_no) + return -EINVAL; + ring = ð->rx_ring_qdma; + offset = 0x1000; + } else { + ring = ð->rx_ring[ring_no]; + } if (rx_flag == MTK_RX_FLAGS_HWLRO) { rx_data_len = MTK_MAX_LRO_RX_LENGTH; @@ -1337,17 +1347,16 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) */ wmb(); - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no)); - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no)); - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX); + mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); + mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); return 0; } -static void mtk_rx_clean(struct mtk_eth *eth, int ring_no) +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) { - struct mtk_rx_ring *ring = ð->rx_ring[ring_no]; int i; if (ring->data && ring->dma) { @@ -1673,6 +1682,10 @@ static int mtk_dma_init(struct mtk_eth *eth) if (err) return err; + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); + if (err) + return err; + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); if (err) return err; @@ -1712,12 +1725,13 @@ static void mtk_dma_free(struct mtk_eth *eth) eth->phy_scratch_ring = 0; } mtk_tx_clean(eth); - mtk_rx_clean(eth, 0); + mtk_rx_clean(eth, ð->rx_ring[0]); + mtk_rx_clean(eth, ð->rx_ring_qdma); if (eth->hwlro) { mtk_hwlro_rx_uninit(eth); for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) - mtk_rx_clean(eth, i); + mtk_rx_clean(eth, ð->rx_ring[i]); } kfree(eth->scratch_head); @@ -1784,7 +1798,9 @@ static int mtk_start_dma(struct mtk_eth *eth) mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | - MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO, + MTK_DMA_SIZE_16DWORDS | MTK_NDP_CO_PRO | + MTK_RX_DMA_EN | MTK_RX_2B_OFFSET | + MTK_RX_BT_32DWORDS, MTK_QDMA_GLO_CFG); mtk_w32(eth, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 940517af8039..3d3c24a28112 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -532,6 +532,7 @@ struct mtk_tx_ring { enum mtk_rx_flags { MTK_RX_FLAGS_NORMAL = 0, MTK_RX_FLAGS_HWLRO, + MTK_RX_FLAGS_QDMA, }; /* struct mtk_rx_ring - This struct holds info describing a RX ring @@ -601,6 +602,7 @@ struct mtk_soc_data { * @dma_refcnt: track how many netdevs are using the DMA engine * @tx_ring: Pointer to the memory holding info about the TX ring * @rx_ring: Pointer to the memory holding info about the RX ring + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring * @tx_napi: The TX NAPI struct * @rx_napi: The RX NAPI struct * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring @@ -633,6 +635,7 @@ struct mtk_eth { atomic_t dma_refcnt; struct mtk_tx_ring tx_ring; struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; + struct mtk_rx_ring rx_ring_qdma; struct napi_struct tx_napi; struct napi_struct rx_napi; struct mtk_tx_dma *scratch_ring; -- cgit v1.2.3-55-g7522 From 68277a2c9d32fd9090247a5c08aaf1353049c0b1 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 9 Aug 2017 14:41:16 +0200 Subject: net-next: dsa: move struct dsa_device_ops to the global header file We need to access this struct from within the flow_dissector to fix dissection for packets coming in on DSA devices. Signed-off-by: Muciri Gatimu Signed-off-by: Shashidhar Lakkavalli Signed-off-by: John Crispin Signed-off-by: David S. Miller --- include/net/dsa.h | 7 +++++++ net/dsa/dsa_priv.h | 7 ------- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index a4f66dbb4b7c..65d7804c6f69 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -101,6 +101,13 @@ struct dsa_platform_data { struct packet_type; +struct dsa_device_ops { + struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); + struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, + struct net_device *orig_dev); +}; + struct dsa_switch_tree { struct list_head list; diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h index 1debf9c42fc4..9c3eeb72462d 100644 --- a/net/dsa/dsa_priv.h +++ b/net/dsa/dsa_priv.h @@ -65,13 +65,6 @@ struct dsa_notifier_vlan_info { int port; }; -struct dsa_device_ops { - struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); - struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev); -}; - struct dsa_slave_priv { /* Copy of dp->ds->dst->tag_ops->xmit for faster access in hot path */ struct sk_buff * (*xmit)(struct sk_buff *skb, -- cgit v1.2.3-55-g7522 From 598a968011ffc4d624934995fe46d06bd450cdf4 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 9 Aug 2017 14:41:17 +0200 Subject: net-next: dsa: add flow_dissect callback to struct dsa_device_ops When the flow dissector first sees packets coming in on a DSA devices the 802.3 header wont be located where the code expects it to be as the tag is still present. Adding this new callback allows a DSA device to provide a new function that the flow_dissector can use to get the correct protocol and offset of the network header. Signed-off-by: Muciri Gatimu Signed-off-by: Shashidhar Lakkavalli Signed-off-by: John Crispin Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- include/net/dsa.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/net/dsa.h b/include/net/dsa.h index 65d7804c6f69..7f46b521313e 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -106,6 +106,8 @@ struct dsa_device_ops { struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev); + int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, + int *offset); }; struct dsa_switch_tree { -- cgit v1.2.3-55-g7522 From 2dd592b274861021018446102fb443e24f72fe72 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 9 Aug 2017 14:41:18 +0200 Subject: net-next: tag_mtk: add flow_dissect callback to the ops struct The MT7530 inserts the 4 magic header in between the 802.3 address and protocol field. The patch implements the callback that can be called by the flow dissector to figure out the real protocol and offset of the network header. With this patch applied we can properly parse the packet and thus make hashing function properly. Signed-off-by: Muciri Gatimu Signed-off-by: Shashidhar Lakkavalli Signed-off-by: John Crispin Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- net/dsa/tag_mtk.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c index 2f32b7ea3365..02163c045a96 100644 --- a/net/dsa/tag_mtk.c +++ b/net/dsa/tag_mtk.c @@ -87,7 +87,17 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, return skb; } +static int mtk_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto, + int *offset) +{ + *offset = 4; + *proto = ((__be16 *)skb->data)[1]; + + return 0; +} + const struct dsa_device_ops mtk_netdev_ops = { - .xmit = mtk_tag_xmit, - .rcv = mtk_tag_rcv, + .xmit = mtk_tag_xmit, + .rcv = mtk_tag_rcv, + .flow_dissect = mtk_tag_flow_dissect, }; -- cgit v1.2.3-55-g7522 From 43e665287f931a167cd2eea3387efda901bff0ce Mon Sep 17 00:00:00 2001 From: John Crispin Date: Wed, 9 Aug 2017 14:41:19 +0200 Subject: net-next: dsa: fix flow dissection RPS and probably other kernel features are currently broken on some if not all DSA devices. The root cause of this is that skb_hash will call the flow_dissector. At this point the skb still contains the magic switch header and the skb->protocol field is not set up to the correct 802.3 value yet. By the time the tag specific code is called, removing the header and properly setting the protocol an invalid hash is already set. In the case of the mt7530 this will result in all flows always having the same hash. Signed-off-by: Muciri Gatimu Signed-off-by: Shashidhar Lakkavalli Signed-off-by: John Crispin Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 0cc672aba1f0..5b5be9577257 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -440,6 +441,17 @@ bool __skb_flow_dissect(const struct sk_buff *skb, skb->vlan_proto : skb->protocol; nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); + if (unlikely(netdev_uses_dsa(skb->dev))) { + const struct dsa_device_ops *ops; + int offset; + + ops = skb->dev->dsa_ptr->tag_ops; + if (ops->flow_dissect && + !ops->flow_dissect(skb, &proto, &offset)) { + hlen -= offset; + nhoff += offset; + } + } } /* It is ensured by skb_flow_dissector_init() that control key will -- cgit v1.2.3-55-g7522 From c5c60b2d577e88c50beae6ecf469b7a78643be4f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:53:42 +0530 Subject: ar5523: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ar5523/ar5523.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 106d6f8d471a..68f0463ed8df 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1749,7 +1749,7 @@ static void ar5523_disconnect(struct usb_interface *intf) { USB_DEVICE((vendor), (device) + 1), \ .driver_info = AR5523_FLAG_ABG|AR5523_FLAG_PRE_FIRMWARE } -static struct usb_device_id ar5523_id_table[] = { +static const struct usb_device_id ar5523_id_table[] = { AR5523_DEVICE_UG(0x168c, 0x0001), /* Atheros / AR5523 */ AR5523_DEVICE_UG(0x0cf3, 0x0001), /* Atheros2 / AR5523_1 */ AR5523_DEVICE_UG(0x0cf3, 0x0003), /* Atheros2 / AR5523_2 */ -- cgit v1.2.3-55-g7522 From 76f6a5c80a4b9e2d9ae2fe53c6ee6be0f630ea88 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:54:41 +0530 Subject: carl9170: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/carl9170/usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 99ab20334d21..e7c3f3b8457d 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -64,7 +64,7 @@ MODULE_ALIAS("arusb_lnx"); * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ), * whenever you add a new device. */ -static struct usb_device_id carl9170_usb_ids[] = { +static const struct usb_device_id carl9170_usb_ids[] = { /* Atheros 9170 */ { USB_DEVICE(0x0cf3, 0x9170) }, /* Atheros TG121N */ -- cgit v1.2.3-55-g7522 From 22796d149229ceadd95abe7cf90331d2c9db7bcf Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:55:03 +0530 Subject: at76c50x: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/atmel/at76c50x-usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 09defbcedd5e..94bf01f8b2a8 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -130,7 +130,7 @@ MODULE_FIRMWARE("atmel_at76c505amx-rfmd.bin"); #define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) -static struct usb_device_id dev_table[] = { +static const struct usb_device_id dev_table[] = { /* * at76c503-i3861 */ -- cgit v1.2.3-55-g7522 From c4291319b6fe43b82f4ffb4b0b9fdc711c88264b Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:56:28 +0530 Subject: orinoco: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index c84fd8490601..56f6e3b71f48 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -210,7 +210,7 @@ struct ezusb_packet { } __packed; /* Table of devices that work or may work with this driver */ -static struct usb_device_id ezusb_table[] = { +static const struct usb_device_id ezusb_table[] = { {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)}, -- cgit v1.2.3-55-g7522 From 54c9f21605d8e9e76303b1f03d1ef93fc897a824 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:56:43 +0530 Subject: p54: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/intersil/p54/p54usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c index 043bd1c23c19..b0b86f701061 100644 --- a/drivers/net/wireless/intersil/p54/p54usb.c +++ b/drivers/net/wireless/intersil/p54/p54usb.c @@ -41,7 +41,7 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] = { +static const struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ -- cgit v1.2.3-55-g7522 From 3673c417f5091caa29221d000855eeebb25bcfa4 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:57:28 +0530 Subject: libertas: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/if_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index e53025ea6689..16e54c757dd0 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -52,7 +52,7 @@ static const struct lbs_fw_table fw_table[] = { { MODEL_8682, "libertas/usb8682.bin", NULL } }; -static struct usb_device_id if_usb_table[] = { +static const struct usb_device_id if_usb_table[] = { /* Enter the device signature inside */ { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 }, { USB_DEVICE(0x05a3, 0x8388), .driver_info = MODEL_8388 }, -- cgit v1.2.3-55-g7522 From 8a3132fa6bbb8bc25b7c8f2c8dcac34beb398625 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:57:29 +0530 Subject: libertas_tf: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas_tf/if_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index e0ade40d9497..e9104eca327b 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -31,7 +31,7 @@ module_param_named(fw_name, lbtf_fw_name, charp, 0644); MODULE_FIRMWARE("lbtf_usb.bin"); -static struct usb_device_id if_usb_table[] = { +static const struct usb_device_id if_usb_table[] = { /* Enter the device signature inside */ { USB_DEVICE(0x1286, 0x2001) }, { USB_DEVICE(0x05a3, 0x8388) }, -- cgit v1.2.3-55-g7522 From 7516dbd470e32fe93cc12b632156029f19a30178 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:57:59 +0530 Subject: mwifiex: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index 880ef1cb4088..f4f2b9b27e32 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -24,7 +24,7 @@ static struct mwifiex_if_ops usb_ops; -static struct usb_device_id mwifiex_usb_table[] = { +static const struct usb_device_id mwifiex_usb_table[] = { /* 8766 */ {USB_DEVICE(USB8XXX_VID, USB8766_PID_1)}, {USB_DEVICE_AND_INTERFACE_INFO(USB8XXX_VID, USB8766_PID_2, -- cgit v1.2.3-55-g7522 From e2717b3127502a3c3344f93f0562b60b88ccd4ef Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:58:24 +0530 Subject: mt7601u: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt7601u/usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c index 416c6045ff31..b9e4f6793138 100644 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c @@ -19,7 +19,7 @@ #include "usb.h" #include "trace.h" -static struct usb_device_id mt7601u_device_table[] = { +static const struct usb_device_id mt7601u_device_table[] = { { USB_DEVICE(0x0b05, 0x17d3) }, { USB_DEVICE(0x0e8d, 0x760a) }, { USB_DEVICE(0x0e8d, 0x760b) }, -- cgit v1.2.3-55-g7522 From 88ee79a1a11c52a9eb96109220a4533c805848df Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:59:14 +0530 Subject: rt2500usb: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2500usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index 529e05999abb..f4b48b77c491 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -1911,7 +1911,7 @@ static const struct rt2x00_ops rt2500usb_ops = { /* * rt2500usb module information. */ -static struct usb_device_id rt2500usb_device_table[] = { +static const struct usb_device_id rt2500usb_device_table[] = { /* ASUS */ { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, -- cgit v1.2.3-55-g7522 From c7bb7d79cf4f2ee11ff8d5e0f1883c81c8ab9c0f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:59:15 +0530 Subject: rt2800usb: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index 3e5d3a40d986..24fc6d2045ef 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -924,7 +924,7 @@ static const struct rt2x00_ops rt2800usb_ops = { /* * rt2800usb module information. */ -static struct usb_device_id rt2800usb_device_table[] = { +static const struct usb_device_id rt2800usb_device_table[] = { /* Abocom */ { USB_DEVICE(0x07b8, 0x2870) }, { USB_DEVICE(0x07b8, 0x2770) }, -- cgit v1.2.3-55-g7522 From 543e4f87508b612c81dff44037c071450bcc5ddc Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:59:48 +0530 Subject: rt73usb: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt73usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index fd913222abd1..9a212823f42c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -2408,7 +2408,7 @@ static const struct rt2x00_ops rt73usb_ops = { /* * rt73usb module information. */ -static struct usb_device_id rt73usb_device_table[] = { +static const struct usb_device_id rt73usb_device_table[] = { /* AboCom */ { USB_DEVICE(0x07b8, 0xb21b) }, { USB_DEVICE(0x07b8, 0xb21c) }, -- cgit v1.2.3-55-g7522 From e0b081b20523a960f2f3faf074922f4c3f81004c Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 22:00:13 +0530 Subject: rtl8187: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 55198ac2b755..121b94f09714 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -43,7 +43,7 @@ MODULE_AUTHOR("Larry Finger "); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] = { +static const struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ -- cgit v1.2.3-55-g7522 From 5033d70de16916fe8962c3fdc2ac70ba589dc395 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 22:00:39 +0530 Subject: rtl8xxxu: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 21e5ef021260..7806a4d2b1fc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -6190,7 +6190,7 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface) ieee80211_free_hw(hw); } -static struct usb_device_id dev_table[] = { +static const struct usb_device_id dev_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8723au_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x1724, 0xff, 0xff, 0xff), -- cgit v1.2.3-55-g7522 From 3a55a4afd187b57b5205bb0736e8b9c7b422647c Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 22:01:05 +0530 Subject: rtl8192cu: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index dfbbd35bb966..43e021b49260 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -279,7 +279,7 @@ static struct rtl_hal_cfg rtl92cu_hal_cfg = { #define USB_VENDER_ID_REALTEK 0x0bda /* 2010-10-19 DID_USB_V3.4 */ -static struct usb_device_id rtl8192c_usb_ids[] = { +static const struct usb_device_id rtl8192c_usb_ids[] = { /*=== Realtek demoboard ===*/ /* Default ID */ -- cgit v1.2.3-55-g7522 From b924ffd7a4cd2a34b8ad5456ea552be838ddf9af Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 22:01:24 +0530 Subject: zd1201: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/zydas/zd1201.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 7f586d76cf17..581e8577a221 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -25,7 +25,7 @@ #include #include "zd1201.h" -static struct usb_device_id zd1201_table[] = { +static const struct usb_device_id zd1201_table[] = { {USB_DEVICE(0x0586, 0x3400)}, /* Peabird Wireless USB Adapter */ {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ -- cgit v1.2.3-55-g7522 From ecf23a788e13a00b1f02400dfa6876850ed21397 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 22:01:42 +0530 Subject: zd1211rw: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/zydas/zd1211rw/zd_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index 01ca1d57b3d9..c30bf118c67d 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -35,7 +35,7 @@ #include "zd_mac.h" #include "zd_usb.h" -static struct usb_device_id usb_ids[] = { +static const struct usb_device_id usb_ids[] = { /* ZD1211 */ { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 }, { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 }, -- cgit v1.2.3-55-g7522 From f18bbe5c86fde8ed1a0fdab079b57c3a909b0aeb Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 9 Aug 2017 02:31:43 +0000 Subject: mwifiex: uap: enable 11d based on userspace configruation This patch check whether userspace beacon data include country ie, if so then download command to enable 11d setup in firmeare accordingly. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 ++ drivers/net/wireless/marvell/mwifiex/main.h | 3 +++ drivers/net/wireless/marvell/mwifiex/uap_cmd.c | 34 ++++++++++++++++--------- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index b16b19af812d..32c5074da84c 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2006,6 +2006,8 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy, priv->state_11h.is_11h_active = false; } + mwifiex_config_uap_11d(priv, ¶ms->beacon); + if (mwifiex_config_start_uap(priv, bss_cfg)) { mwifiex_dbg(priv->adapter, ERROR, "Failed to start AP\n"); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 0aaae0878742..a76bd797e454 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1564,6 +1564,9 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, void mwifiex_uap_del_sta_data(struct mwifiex_private *priv, struct mwifiex_sta_node *node); +void mwifiex_config_uap_11d(struct mwifiex_private *priv, + struct cfg80211_beacon_data *beacon_data); + void mwifiex_init_11h_params(struct mwifiex_private *priv); int mwifiex_is_11h_active(struct mwifiex_private *priv); int mwifiex_11h_activate(struct mwifiex_private *priv, bool flag); diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index 477c29c9f5d9..18f7d9bf30b2 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -444,6 +444,28 @@ mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size) return; } +/* This function enable 11D if userspace set the country IE. + */ +void mwifiex_config_uap_11d(struct mwifiex_private *priv, + struct cfg80211_beacon_data *beacon_data) +{ + enum state_11d_t state_11d; + const u8 *country_ie; + + country_ie = cfg80211_find_ie(WLAN_EID_COUNTRY, beacon_data->tail, + beacon_data->tail_len); + if (country_ie) { + /* Send cmd to FW to enable 11D function */ + state_11d = ENABLE_11D; + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, + HostCmd_ACT_GEN_SET, DOT11D_I, + &state_11d, true)) { + mwifiex_dbg(priv->adapter, ERROR, + "11D: failed to enable 11D\n"); + } + } +} + /* This function parses BSS related parameters from structure * and prepares TLVs. These TLVs are appended to command buffer. */ @@ -848,8 +870,6 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv, int mwifiex_config_start_uap(struct mwifiex_private *priv, struct mwifiex_uap_bss_param *bss_cfg) { - enum state_11d_t state_11d; - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_SYS_CONFIG, HostCmd_ACT_GEN_SET, UAP_BSS_PARAMS_I, bss_cfg, true)) { @@ -858,16 +878,6 @@ int mwifiex_config_start_uap(struct mwifiex_private *priv, return -1; } - /* Send cmd to FW to enable 11D function */ - state_11d = ENABLE_11D; - if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_SNMP_MIB, - HostCmd_ACT_GEN_SET, DOT11D_I, - &state_11d, true)) { - mwifiex_dbg(priv->adapter, ERROR, - "11D: failed to enable 11D\n"); - return -1; - } - if (mwifiex_send_cmd(priv, HostCmd_CMD_UAP_BSS_START, HostCmd_ACT_GEN_SET, 0, NULL, true)) { mwifiex_dbg(priv->adapter, ERROR, -- cgit v1.2.3-55-g7522 From 2d5716456404a1ba097d46770f82f23a2457a873 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Thu, 10 Aug 2017 10:09:03 +0200 Subject: net: core: fix compile error inside flow_dissector due to new dsa callback The following error was introduced by commit 43e665287f93 ("net-next: dsa: fix flow dissection") due to a missing #if guard net/core/flow_dissector.c: In function '__skb_flow_dissect': net/core/flow_dissector.c:448:18: error: 'struct net_device' has no member named 'dsa_ptr' ops = skb->dev->dsa_ptr->tag_ops; ^ make[3]: *** [net/core/flow_dissector.o] Error 1 Signed-off-by: John Crispin Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 5b5be9577257..79b9c06c83ad 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -441,6 +441,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, skb->vlan_proto : skb->protocol; nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); +#if IS_ENABLED(CONFIG_NET_DSA) if (unlikely(netdev_uses_dsa(skb->dev))) { const struct dsa_device_ops *ops; int offset; @@ -452,6 +453,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, nhoff += offset; } } +#endif } /* It is ensured by skb_flow_dissector_init() that control key will -- cgit v1.2.3-55-g7522 From 377cb248840907adc407324b4d23f97b3ee70c98 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 10 Aug 2017 16:52:57 +0200 Subject: rtnetlink: use rcu_dereference_raw to silence rcu splat Ido reports a rcu splat in __rtnl_register. The splat is correct; as rtnl_register doesn't grab any logs and doesn't use rcu locks either. It has always been like this. handler families are not registered in parallel so there are no races wrt. the kmalloc ordering. The only reason to use rcu_dereference in the first place was to avoid sparse from complaining about this. Thus this switches to _raw() to not have rcu checks here. The alternative is to add rtnl locking to register/unregister, however, I don't see a compelling reason to do so as this has been lockless for the past twenty years or so. Fixes: 6853dd4881 ("rtnetlink: protect handler table with rcu") Reported-by: Ido Schimmel Signed-off-by: Florian Westphal Tested-by: Ido Schimmel Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index dd4e50dfa248..a5bc5bd0dc12 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -172,7 +172,7 @@ int __rtnl_register(int protocol, int msgtype, BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX); msgindex = rtm_msgindex(msgtype); - tab = rcu_dereference(rtnl_msg_handlers[protocol]); + tab = rcu_dereference_raw(rtnl_msg_handlers[protocol]); if (tab == NULL) { tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL); if (tab == NULL) -- cgit v1.2.3-55-g7522 From 5c2bb9b6e27d9207c2e12b9cdb6bfc171afa663e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 10 Aug 2017 16:52:58 +0200 Subject: rtnetlink: do not use RTM_GETLINK directly Userspace sends RTM_GETLINK type, but the kernel substracts RTM_BASE from this, i.e. 'type' doesn't contain RTM_GETLINK anymore but instead RTM_GETLINK - RTM_BASE. This caused the calcit callback to not be invoked when it should have been (and vice versa). While at it, also fix a off-by one when checking family index. vs handler array size. Fixes: e1fa6d216dd ("rtnetlink: call rtnl_calcit directly") Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a5bc5bd0dc12..a9b5ebc1af21 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4167,7 +4167,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN)) return -EPERM; - if (family > ARRAY_SIZE(rtnl_msg_handlers)) + if (family >= ARRAY_SIZE(rtnl_msg_handlers)) family = PF_UNSPEC; rcu_read_lock(); @@ -4196,7 +4196,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, refcount_inc(&rtnl_msg_handlers_ref[family]); - if (type == RTM_GETLINK) + if (type == RTM_GETLINK - RTM_BASE) min_dump_alloc = rtnl_calcit(skb, nlh); rcu_read_unlock(); -- cgit v1.2.3-55-g7522 From 8515ae38435895ba2862840d3e82140fc0a77554 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 10 Aug 2017 16:52:59 +0200 Subject: rtnetlink: switch rtnl_link_get_slave_info_data_size to rcu David Ahern reports following splat: RTNL: assertion failed at net/core/dev.c (5717) netdev_master_upper_dev_get+0x5f/0x70 if_nlmsg_size+0x158/0x240 rtnl_calcit.isra.26+0xa3/0xf0 rtnl_link_get_slave_info_data_size currently assumes RTNL protection, but there appears to be no hard requirement for this, so use rcu instead. At the time of this writing, there are three 'get_slave_size' callbacks (now invoked under rcu): bond_get_slave_size, vrf_get_slave_size and br_port_get_slave_size, all return constant only (i.e. they don't sleep). Fixes: 6853dd488119 ("rtnetlink: protect handler table with rcu") Reported-by: David Ahern Signed-off-by: Florian Westphal Acked-by: David Ahern Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a9b5ebc1af21..087f2434813a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -402,16 +402,24 @@ static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev) { struct net_device *master_dev; const struct rtnl_link_ops *ops; + size_t size = 0; - master_dev = netdev_master_upper_dev_get((struct net_device *) dev); + rcu_read_lock(); + + master_dev = netdev_master_upper_dev_get_rcu((struct net_device *)dev); if (!master_dev) - return 0; + goto out; + ops = master_dev->rtnl_link_ops; if (!ops || !ops->get_slave_size) - return 0; + goto out; /* IFLA_INFO_SLAVE_DATA + nested data */ - return nla_total_size(sizeof(struct nlattr)) + + size = nla_total_size(sizeof(struct nlattr)) + ops->get_slave_size(master_dev, dev); + +out: + rcu_read_unlock(); + return size; } static size_t rtnl_link_get_size(const struct net_device *dev) -- cgit v1.2.3-55-g7522 From d38a65125f93b2fbd17cd37ccc5ec0f0b799cc55 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 10 Aug 2017 16:53:00 +0200 Subject: rtnetlink: init handler refcounts to 1 If using CONFIG_REFCOUNT_FULL=y we get following splat: refcount_t: increment on 0; use-after-free. WARNING: CPU: 0 PID: 304 at lib/refcount.c:152 refcount_inc+0x47/0x50 Call Trace: rtnetlink_rcv_msg+0x191/0x260 ... This warning is harmless (0 is "no callback running", not "memory was freed"). Use '1' as the new 'no handler is running' base instead of 0 to avoid this. Fixes: 019a316992ee ("rtnetlink: add reference counting to prevent module unload while dump is in progress") Reported-by: Sabrina Dubroca Reported-by: kernel test robot Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 087f2434813a..59eda6952bc9 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -262,7 +262,7 @@ void rtnl_unregister_all(int protocol) synchronize_net(); - while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 0) + while (refcount_read(&rtnl_msg_handlers_ref[protocol]) > 1) schedule(); kfree(handlers); } @@ -4324,6 +4324,11 @@ static struct pernet_operations rtnetlink_net_ops = { void __init rtnetlink_init(void) { + int i; + + for (i = 0; i < ARRAY_SIZE(rtnl_msg_handlers_ref); i++) + refcount_set(&rtnl_msg_handlers_ref[i], 1); + if (register_pernet_subsys(&rtnetlink_net_ops)) panic("rtnetlink_init: cannot initialize rtnetlink\n"); -- cgit v1.2.3-55-g7522 From 8caa38b56c56ef48af48f8c70fcd7cb4580e0c24 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 10 Aug 2017 16:53:01 +0200 Subject: rtnetlink: fallback to UNSPEC if current family has no doit callback We need to use PF_UNSPEC in case the requested family has no doit callback, otherwise this now fails with EOPNOTSUPP instead of running the unspec doit callback, as before. Fixes: 6853dd488119 ("rtnetlink: protect handler table with rcu") Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 59eda6952bc9..9e9f1419be60 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4221,6 +4221,12 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } + doit = READ_ONCE(handlers[type].doit); + if (!doit) { + family = PF_UNSPEC; + handlers = rcu_dereference(rtnl_msg_handlers[family]); + } + flags = READ_ONCE(handlers[type].flags); if (flags & RTNL_FLAG_DOIT_UNLOCKED) { refcount_inc(&rtnl_msg_handlers_ref[family]); -- cgit v1.2.3-55-g7522 From 33b01b7b4f19f82198a298936de225eef942fc7c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 10 Aug 2017 16:53:02 +0200 Subject: selftests: add rtnetlink test script add a simple script to exercise some rtnetlink call paths, so KASAN, lockdep etc. can yell at developer before patches are sent upstream. This can be extended to also cover bond, team, vrf and the like. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- tools/testing/selftests/net/Makefile | 2 +- tools/testing/selftests/net/rtnetlink.sh | 199 +++++++++++++++++++++++++++++++ 2 files changed, 200 insertions(+), 1 deletion(-) create mode 100755 tools/testing/selftests/net/rtnetlink.sh diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 6135a8448900..de1f5772b878 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -3,7 +3,7 @@ CFLAGS = -Wall -Wl,--no-as-needed -O2 -g CFLAGS += -I../../../../usr/include/ -TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh +TEST_PROGS := run_netsocktests run_afpackettests test_bpf.sh netdevice.sh rtnetlink.sh TEST_GEN_FILES = socket TEST_GEN_FILES += psock_fanout psock_tpacket TEST_GEN_FILES += reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh new file mode 100755 index 000000000000..5b04ad912525 --- /dev/null +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -0,0 +1,199 @@ +#!/bin/sh +# +# This test is for checking rtnetlink callpaths, and get as much coverage as possible. +# +# set -e + +devdummy="test-dummy0" +ret=0 + +# set global exit status, but never reset nonzero one. +check_err() +{ + if [ $ret -eq 0 ]; then + ret=$1 + fi +} + +kci_add_dummy() +{ + ip link add name "$devdummy" type dummy + check_err $? + ip link set "$devdummy" up + check_err $? +} + +kci_del_dummy() +{ + ip link del dev "$devdummy" + check_err $? +} + +# add a bridge with vlans on top +kci_test_bridge() +{ + devbr="test-br0" + vlandev="testbr-vlan1" + + ret=0 + ip link add name "$devbr" type bridge + check_err $? + + ip link set dev "$devdummy" master "$devbr" + check_err $? + + ip link set "$devbr" up + check_err $? + + ip link add link "$devbr" name "$vlandev" type vlan id 1 + check_err $? + ip addr add dev "$vlandev" 10.200.7.23/30 + check_err $? + ip -6 addr add dev "$vlandev" dead:42::1234/64 + check_err $? + ip -d link > /dev/null + check_err $? + ip r s t all > /dev/null + check_err $? + ip -6 addr del dev "$vlandev" dead:42::1234/64 + check_err $? + + ip link del dev "$vlandev" + check_err $? + ip link del dev "$devbr" + check_err $? + + if [ $ret -ne 0 ];then + echo "FAIL: bridge setup" + return 1 + fi + echo "PASS: bridge setup" + +} + +kci_test_gre() +{ + gredev=neta + rem=10.42.42.1 + loc=10.0.0.1 + + ret=0 + ip tunnel add $gredev mode gre remote $rem local $loc ttl 1 + check_err $? + ip link set $gredev up + check_err $? + ip addr add 10.23.7.10 dev $gredev + check_err $? + ip route add 10.23.8.0/30 dev $gredev + check_err $? + ip addr add dev "$devdummy" 10.23.7.11/24 + check_err $? + ip link > /dev/null + check_err $? + ip addr > /dev/null + check_err $? + ip addr del dev "$devdummy" 10.23.7.11/24 + check_err $? + + ip link del $gredev + check_err $? + + if [ $ret -ne 0 ];then + echo "FAIL: gre tunnel endpoint" + return 1 + fi + echo "PASS: gre tunnel endpoint" +} + +# tc uses rtnetlink too, for full tc testing +# please see tools/testing/selftests/tc-testing. +kci_test_tc() +{ + dev=lo + ret=0 + + tc qdisc add dev "$dev" root handle 1: htb + check_err $? + tc class add dev "$dev" parent 1: classid 1:10 htb rate 1mbit + check_err $? + tc filter add dev "$dev" parent 1:0 prio 5 handle ffe: protocol ip u32 divisor 256 + check_err $? + tc filter add dev "$dev" parent 1:0 prio 5 handle ffd: protocol ip u32 divisor 256 + check_err $? + tc filter add dev "$dev" parent 1:0 prio 5 handle ffc: protocol ip u32 divisor 256 + check_err $? + tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32 ht ffe:2: match ip src 10.0.0.3 flowid 1:10 + check_err $? + tc filter add dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:2 u32 ht ffe:2: match ip src 10.0.0.2 flowid 1:10 + check_err $? + tc filter show dev "$dev" parent 1:0 > /dev/null + check_err $? + tc filter del dev "$dev" protocol ip parent 1: prio 5 handle ffe:2:3 u32 + check_err $? + tc filter show dev "$dev" parent 1:0 > /dev/null + check_err $? + tc qdisc del dev "$dev" root handle 1: htb + check_err $? + + if [ $ret -ne 0 ];then + echo "FAIL: tc htb hierarchy" + return 1 + fi + echo "PASS: tc htb hierarchy" + +} + +kci_test_polrouting() +{ + ret=0 + ip rule add fwmark 1 lookup 100 + check_err $? + ip route add local 0.0.0.0/0 dev lo table 100 + check_err $? + ip r s t all > /dev/null + check_err $? + ip rule del fwmark 1 lookup 100 + check_err $? + ip route del local 0.0.0.0/0 dev lo table 100 + check_err $? + + if [ $ret -ne 0 ];then + echo "FAIL: policy route test" + return 1 + fi + echo "PASS: policy routing" +} + +kci_test_rtnl() +{ + kci_add_dummy + if [ $ret -ne 0 ];then + echo "FAIL: cannot add dummy interface" + return 1 + fi + + kci_test_polrouting + kci_test_tc + kci_test_gre + kci_test_bridge + + kci_del_dummy +} + +#check for needed privileges +if [ "$(id -u)" -ne 0 ];then + echo "SKIP: Need root privileges" + exit 0 +fi + +for x in ip tc;do + $x -Version 2>/dev/null >/dev/null + if [ $? -ne 0 ];then + echo "SKIP: Could not run test without the $x tool" + exit 0 + fi +done + +kci_test_rtnl + +exit $ret -- cgit v1.2.3-55-g7522 From 077fbac405bfc6d41419ad6c1725804ad4e9887c Mon Sep 17 00:00:00 2001 From: Lorenzo Colitti Date: Fri, 11 Aug 2017 02:11:33 +0900 Subject: net: xfrm: support setting an output mark. On systems that use mark-based routing it may be necessary for routing lookups to use marks in order for packets to be routed correctly. An example of such a system is Android, which uses socket marks to route packets via different networks. Currently, routing lookups in tunnel mode always use a mark of zero, making routing incorrect on such systems. This patch adds a new output_mark element to the xfrm state and a corresponding XFRMA_OUTPUT_MARK netlink attribute. The output mark differs from the existing xfrm mark in two ways: 1. The xfrm mark is used to match xfrm policies and states, while the xfrm output mark is used to set the mark (and influence the routing) of the packets emitted by those states. 2. The existing mark is constrained to be a subset of the bits of the originating socket or transformed packet, but the output mark is arbitrary and depends only on the state. The use of a separate mark provides additional flexibility. For example: - A packet subject to two transforms (e.g., transport mode inside tunnel mode) can have two different output marks applied to it, one for the transport mode SA and one for the tunnel mode SA. - On a system where socket marks determine routing, the packets emitted by an IPsec tunnel can be routed based on a mark that is determined by the tunnel, not by the marks of the unencrypted packets. - Support for setting the output marks can be introduced without breaking any existing setups that employ both mark-based routing and xfrm tunnel mode. Simply changing the code to use the xfrm mark for routing output packets could xfrm mark could change behaviour in a way that breaks these setups. If the output mark is unspecified or set to zero, the mark is not set or changed. Tested: make allyesconfig; make -j64 Tested: https://android-review.googlesource.com/452776 Signed-off-by: Lorenzo Colitti Signed-off-by: Steffen Klassert --- include/net/xfrm.h | 9 ++++++--- include/uapi/linux/xfrm.h | 1 + net/ipv4/xfrm4_policy.c | 14 +++++++++----- net/ipv6/xfrm6_policy.c | 9 ++++++--- net/xfrm/xfrm_device.c | 3 ++- net/xfrm/xfrm_output.c | 3 +++ net/xfrm/xfrm_policy.c | 17 +++++++++-------- net/xfrm/xfrm_user.c | 11 +++++++++++ 8 files changed, 47 insertions(+), 20 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 18d7de34a5c3..9c7b70cce6d6 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -165,6 +165,7 @@ struct xfrm_state { int header_len; int trailer_len; u32 extra_flags; + u32 output_mark; } props; struct xfrm_lifetime_cfg lft; @@ -298,10 +299,12 @@ struct xfrm_policy_afinfo { struct dst_entry *(*dst_lookup)(struct net *net, int tos, int oif, const xfrm_address_t *saddr, - const xfrm_address_t *daddr); + const xfrm_address_t *daddr, + u32 mark); int (*get_saddr)(struct net *net, int oif, xfrm_address_t *saddr, - xfrm_address_t *daddr); + xfrm_address_t *daddr, + u32 mark); void (*decode_session)(struct sk_buff *skb, struct flowi *fl, int reverse); @@ -1640,7 +1643,7 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr, - int family); + int family, u32 mark); struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp); diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index 2b384ff09fa0..5fe7370a2bef 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -304,6 +304,7 @@ enum xfrm_attr_type_t { XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ XFRMA_PAD, XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ + XFRMA_OUTPUT_MARK, /* __u32 */ __XFRMA_MAX #define XFRMA_MAX (__XFRMA_MAX - 1) diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 4aefb149fe0a..d7bf0b041885 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -20,7 +20,8 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, int tos, int oif, const xfrm_address_t *saddr, - const xfrm_address_t *daddr) + const xfrm_address_t *daddr, + u32 mark) { struct rtable *rt; @@ -28,6 +29,7 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, fl4->daddr = daddr->a4; fl4->flowi4_tos = tos; fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif); + fl4->flowi4_mark = mark; if (saddr) fl4->saddr = saddr->a4; @@ -42,20 +44,22 @@ static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4, static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, - const xfrm_address_t *daddr) + const xfrm_address_t *daddr, + u32 mark) { struct flowi4 fl4; - return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr); + return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark); } static int xfrm4_get_saddr(struct net *net, int oif, - xfrm_address_t *saddr, xfrm_address_t *daddr) + xfrm_address_t *saddr, xfrm_address_t *daddr, + u32 mark) { struct dst_entry *dst; struct flowi4 fl4; - dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr); + dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark); if (IS_ERR(dst)) return -EHOSTUNREACH; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index f44b25a48478..11d1314ab6c5 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -27,7 +27,8 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, - const xfrm_address_t *daddr) + const xfrm_address_t *daddr, + u32 mark) { struct flowi6 fl6; struct dst_entry *dst; @@ -36,6 +37,7 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = l3mdev_master_ifindex_by_index(net, oif); fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF; + fl6.flowi6_mark = mark; memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr)); if (saddr) memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr)); @@ -52,12 +54,13 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif, } static int xfrm6_get_saddr(struct net *net, int oif, - xfrm_address_t *saddr, xfrm_address_t *daddr) + xfrm_address_t *saddr, xfrm_address_t *daddr, + u32 mark) { struct dst_entry *dst; struct net_device *dev; - dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr); + dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark); if (IS_ERR(dst)) return -EHOSTUNREACH; diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 1904127f5fb8..acf00104ef31 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -79,7 +79,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, daddr = &x->props.saddr; } - dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, x->props.family); + dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, + x->props.family, x->props.output_mark); if (IS_ERR(dst)) return 0; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 8c0b6722aaa8..31a2e6d34dba 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -66,6 +66,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err) goto error_nolock; } + if (x->props.output_mark) + skb->mark = x->props.output_mark; + err = x->outer_mode->output(x, skb); if (err) { XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 06c3bf7ab86b..1de52f36caf5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -122,7 +122,7 @@ static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short fa struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, const xfrm_address_t *saddr, const xfrm_address_t *daddr, - int family) + int family, u32 mark) { const struct xfrm_policy_afinfo *afinfo; struct dst_entry *dst; @@ -131,7 +131,7 @@ struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, if (unlikely(afinfo == NULL)) return ERR_PTR(-EAFNOSUPPORT); - dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr); + dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark); rcu_read_unlock(); @@ -143,7 +143,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, int oif, xfrm_address_t *prev_saddr, xfrm_address_t *prev_daddr, - int family) + int family, u32 mark) { struct net *net = xs_net(x); xfrm_address_t *saddr = &x->props.saddr; @@ -159,7 +159,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, daddr = x->coaddr; } - dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family); + dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark); if (!IS_ERR(dst)) { if (prev_saddr != saddr) @@ -1340,14 +1340,14 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) static int xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, - xfrm_address_t *remote, unsigned short family) + xfrm_address_t *remote, unsigned short family, u32 mark) { int err; const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); if (unlikely(afinfo == NULL)) return -EINVAL; - err = afinfo->get_saddr(net, oif, local, remote); + err = afinfo->get_saddr(net, oif, local, remote, mark); rcu_read_unlock(); return err; } @@ -1378,7 +1378,7 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, if (xfrm_addr_any(local, tmpl->encap_family)) { error = xfrm_get_saddr(net, fl->flowi_oif, &tmp, remote, - tmpl->encap_family); + tmpl->encap_family, 0); if (error) goto fail; local = &tmp; @@ -1598,7 +1598,8 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { family = xfrm[i]->props.family; dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, - &saddr, &daddr, family); + &saddr, &daddr, family, + xfrm[i]->props.output_mark); err = PTR_ERR(dst); if (IS_ERR(dst)) goto put_states; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index ffe8d5ef09eb..cc3268d814b4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -584,6 +584,9 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, xfrm_mark_get(attrs, &x->mark); + if (attrs[XFRMA_OUTPUT_MARK]) + x->props.output_mark = nla_get_u32(attrs[XFRMA_OUTPUT_MARK]); + err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); if (err) goto error; @@ -899,6 +902,11 @@ static int copy_to_user_state_extra(struct xfrm_state *x, goto out; if (x->security) ret = copy_sec_ctx(x->security, skb); + if (x->props.output_mark) { + ret = nla_put_u32(skb, XFRMA_OUTPUT_MARK, x->props.output_mark); + if (ret) + goto out; + } out: return ret; } @@ -2454,6 +2462,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_PROTO] = { .type = NLA_U8 }, [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, + [XFRMA_OUTPUT_MARK] = { .len = NLA_U32 }, }; static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { @@ -2673,6 +2682,8 @@ static inline size_t xfrm_sa_len(struct xfrm_state *x) l += nla_total_size(sizeof(x->props.extra_flags)); if (x->xso.dev) l += nla_total_size(sizeof(x->xso)); + if (x->props.output_mark) + l += nla_total_size(sizeof(x->props.output_mark)); /* Must count x->lastused as it may become non-zero behind our back. */ l += nla_total_size_64bit(sizeof(u64)); -- cgit v1.2.3-55-g7522 From a67b133b9da539ba30b50a0bb4c333d4e38407e7 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Fri, 11 Aug 2017 10:53:50 +0530 Subject: brcm80211: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 8f20a4bb40d9..11ffaa01599e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1468,7 +1468,7 @@ static int brcmf_usb_reset_resume(struct usb_interface *intf) #define CYPRESS_USB_DEVICE(dev_id) \ { USB_DEVICE(CY_USB_VENDOR_ID_CYPRESS, dev_id) } -static struct usb_device_id brcmf_usb_devid_table[] = { +static const struct usb_device_id brcmf_usb_devid_table[] = { BRCMF_USB_DEVICE(BRCM_USB_43143_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43236_DEVICE_ID), BRCMF_USB_DEVICE(BRCM_USB_43242_DEVICE_ID), -- cgit v1.2.3-55-g7522 From e881a6584941c89ae64f6b6568cae35f9cc9f3d9 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:53:43 +0530 Subject: ath6kl: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Reviewed-by: Steve deRosier Tested-by: Steve deRosier Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath6kl/usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c index 9da3594fd010..4defb7a0330f 100644 --- a/drivers/net/wireless/ath/ath6kl/usb.c +++ b/drivers/net/wireless/ath/ath6kl/usb.c @@ -1201,7 +1201,7 @@ static int ath6kl_usb_pm_resume(struct usb_interface *interface) #endif /* table of devices that work with this driver */ -static struct usb_device_id ath6kl_usb_ids[] = { +static const struct usb_device_id ath6kl_usb_ids[] = { {USB_DEVICE(0x0cf3, 0x9375)}, {USB_DEVICE(0x0cf3, 0x9374)}, { /* Terminating entry */ }, -- cgit v1.2.3-55-g7522 From 76b07b30c46bcc6a03ff12d45e525794b8e8c14a Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 9 Aug 2017 21:54:11 +0530 Subject: ath9k: constify usb_device_id usb_device_id are not supposed to change at runtime. All functions working with usb_device_id provided by work with const usb_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hif_usb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 0d9687a2aa98..c5f4dd808745 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -20,7 +20,7 @@ MODULE_FIRMWARE(HTC_7010_MODULE_FW); MODULE_FIRMWARE(HTC_9271_MODULE_FW); -static struct usb_device_id ath9k_hif_usb_ids[] = { +static const struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */ { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */ -- cgit v1.2.3-55-g7522 From afa6c45429f6e5ddd1eb6b77a36358f9c4b789da Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:45 +0800 Subject: sctp: remove the unused typedef sctp_packet_phandler_t Remove this function typedef, there is even no places using it. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index fbe6e81b889b..73e9509c057e 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -657,8 +657,6 @@ struct sctp_sockaddr_entry { #define SCTP_ADDRESS_TICK_DELAY 500 -typedef struct sctp_chunk *(sctp_packet_phandler_t)(struct sctp_association *); - /* This structure holds lists of chunks as we are assembling for * transmission. */ -- cgit v1.2.3-55-g7522 From edf903f83ebca988e04a39f515ab6eacb92055df Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:46 +0800 Subject: sctp: remove the typedef sctp_sender_hb_info_t This patch is to remove the typedef sctp_sender_hb_info_t, and replace with struct sctp_sender_hb_info in the places where it's using this typedef. It is also to use sizeof(variable) instead of sizeof(type). Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 4 ++-- net/sctp/sm_make_chunk.c | 6 +++--- net/sctp/sm_sideeffect.c | 4 ++-- net/sctp/sm_statefuns.c | 12 +++++------- 4 files changed, 12 insertions(+), 14 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 73e9509c057e..60033a263193 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -371,12 +371,12 @@ union sctp_params { * chunk is sent and the destination transport address to which this * HEARTBEAT is sent (see Section 8.3). */ -typedef struct sctp_sender_hb_info { +struct sctp_sender_hb_info { struct sctp_paramhdr param_hdr; union sctp_addr daddr; unsigned long sent_at; __u64 hb_nonce; -} sctp_sender_hb_info_t; +}; int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt, gfp_t gfp); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 3a8fb1dffbc1..51de638a88b2 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1142,10 +1142,10 @@ nodata: /* Make a HEARTBEAT chunk. */ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, - const struct sctp_transport *transport) + const struct sctp_transport *transport) { + struct sctp_sender_hb_info hbinfo; struct sctp_chunk *retval; - sctp_sender_hb_info_t hbinfo; retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo), GFP_ATOMIC); @@ -1154,7 +1154,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, goto nodata; hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; - hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); + hbinfo.param_hdr.length = htons(sizeof(hbinfo)); hbinfo.daddr = transport->ipaddr; hbinfo.sent_at = jiffies; hbinfo.hb_nonce = transport->hb_nonce; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 4a12d29d9aa1..5e8e41879b03 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -714,7 +714,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, struct sctp_transport *t, struct sctp_chunk *chunk) { - sctp_sender_hb_info_t *hbinfo; + struct sctp_sender_hb_info *hbinfo; int was_unconfirmed = 0; /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the @@ -768,7 +768,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, if (t->rto_pending == 0) t->rto_pending = 1; - hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; + hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); /* Update the heartbeat timer. */ diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ac6aaa046529..af93419209df 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -1155,27 +1155,25 @@ sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, void *arg, sctp_cmd_seq_t *commands) { + struct sctp_sender_hb_info *hbinfo; struct sctp_chunk *chunk = arg; - union sctp_addr from_addr; struct sctp_transport *link; - sctp_sender_hb_info_t *hbinfo; unsigned long max_interval; + union sctp_addr from_addr; if (!sctp_vtag_verify(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_chunkhdr) + - sizeof(sctp_sender_hb_info_t))) + sizeof(*hbinfo))) return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, commands); - hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; + hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; /* Make sure that the length of the parameter is what we expect */ - if (ntohs(hbinfo->param_hdr.length) != - sizeof(sctp_sender_hb_info_t)) { + if (ntohs(hbinfo->param_hdr.length) != sizeof(*hbinfo)) return SCTP_DISPOSITION_DISCARD; - } from_addr = hbinfo->daddr; link = sctp_assoc_lookup_paddr(asoc, &from_addr); -- cgit v1.2.3-55-g7522 From 74439f344b1becd57ec822bc0e2c1a4cbf240a53 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:47 +0800 Subject: sctp: remove the typedef sctp_endpoint_type_t This patch is to remove the typedef sctp_endpoint_type_t, and replace with enum sctp_endpoint_type in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 60033a263193..937187f3bffc 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1142,10 +1142,10 @@ int sctp_is_ep_boundall(struct sock *sk); /* What type of endpoint? */ -typedef enum { +enum sctp_endpoint_type { SCTP_EP_TYPE_SOCKET, SCTP_EP_TYPE_ASSOCIATION, -} sctp_endpoint_type_t; +}; /* * A common base class to bridge the implmentation view of a @@ -1169,7 +1169,7 @@ struct sctp_ep_common { int hashent; /* Runtime type information. What kind of endpoint is this? */ - sctp_endpoint_type_t type; + enum sctp_endpoint_type type; /* Some fields to help us manage this object. * refcnt - Reference count access to this object. -- cgit v1.2.3-55-g7522 From a05437ac5deb100f94e290ad4c5eef3e78f4b6bb Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:48 +0800 Subject: sctp: remove the typedef sctp_cmsgs_t This patch is to remove the typedef sctp_cmsgs_t, and replace with struct sctp_cmsgs in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 4 ++-- net/sctp/socket.c | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 937187f3bffc..e171d3a3d2b4 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1985,11 +1985,11 @@ int sctp_cmp_addr_exact(const union sctp_addr *ss1, struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc); /* A convenience structure to parse out SCTP specific CMSGs. */ -typedef struct sctp_cmsgs { +struct sctp_cmsgs { struct sctp_initmsg *init; struct sctp_sndrcvinfo *srinfo; struct sctp_sndinfo *sinfo; -} sctp_cmsgs_t; +}; /* Structure for tracking memory objects */ typedef struct { diff --git a/net/sctp/socket.c b/net/sctp/socket.c index a1e2113806dd..018190655d63 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1593,7 +1593,8 @@ static int sctp_error(struct sock *sk, int flags, int err) */ /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ -static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); +static int sctp_msghdr_parse(const struct msghdr *msg, + struct sctp_cmsgs *cmsgs); static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) { @@ -1609,7 +1610,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) struct sctp_sndrcvinfo *sinfo; struct sctp_initmsg *sinit; sctp_assoc_t associd = 0; - sctp_cmsgs_t cmsgs = { NULL }; + struct sctp_cmsgs cmsgs = { NULL }; enum sctp_scope scope; bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; @@ -7445,10 +7446,10 @@ static int sctp_autobind(struct sock *sk) * msg_control * points here */ -static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) +static int sctp_msghdr_parse(const struct msghdr *msg, struct sctp_cmsgs *cmsgs) { - struct cmsghdr *cmsg; struct msghdr *my_msg = (struct msghdr *)msg; + struct cmsghdr *cmsg; for_each_cmsghdr(cmsg, my_msg) { if (!CMSG_OK(my_msg, cmsg)) -- cgit v1.2.3-55-g7522 From d38ef5ae35b0960f3219f1cf0203e19819e757c7 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:49 +0800 Subject: sctp: remove the typedef sctp_dbg_objcnt_entry_t This patch is to remove the typedef sctp_dbg_objcnt_entry_t, and replace with struct sctp_dbg_objcnt_entry in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/structs.h | 4 ++-- net/sctp/objcnt.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index e171d3a3d2b4..b6d75b37f84d 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1992,9 +1992,9 @@ struct sctp_cmsgs { }; /* Structure for tracking memory objects */ -typedef struct { +struct sctp_dbg_objcnt_entry { char *label; atomic_t *counter; -} sctp_dbg_objcnt_entry_t; +}; #endif /* __sctp_structs_h__ */ diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c index 105ac3327b28..aeea6da81441 100644 --- a/net/sctp/objcnt.c +++ b/net/sctp/objcnt.c @@ -57,7 +57,7 @@ SCTP_DBG_OBJCNT(keys); /* An array to make it easy to pretty print the debug information * to the proc fs. */ -static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = { +static struct sctp_dbg_objcnt_entry sctp_dbg_objcnt[] = { SCTP_DBG_OBJCNT_ENTRY(sock), SCTP_DBG_OBJCNT_ENTRY(ep), SCTP_DBG_OBJCNT_ENTRY(assoc), -- cgit v1.2.3-55-g7522 From b7ef2618a0bf75c1e480b05739b0c5f2a42081cd Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:50 +0800 Subject: sctp: remove the typedef sctp_socket_type_t This patch is to remove the typedef sctp_socket_type_t, and replace with enum sctp_socket_type in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sctp.h | 3 ++- include/net/sctp/structs.h | 6 +++--- net/sctp/socket.c | 7 ++++--- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 24ff7931d38c..06b4f515e157 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -550,7 +550,8 @@ static inline int sctp_ep_hashfn(struct net *net, __u16 lport) /* Is a socket of this style? */ #define sctp_style(sk, style) __sctp_style((sk), (SCTP_SOCKET_##style)) -static inline int __sctp_style(const struct sock *sk, sctp_socket_type_t style) +static inline int __sctp_style(const struct sock *sk, + enum sctp_socket_type style) { return sctp_sk(sk)->type == style; } diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index b6d75b37f84d..0477945de1a3 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -150,18 +150,18 @@ extern struct sctp_globals { #define sctp_checksum_disable (sctp_globals.checksum_disable) /* SCTP Socket type: UDP or TCP style. */ -typedef enum { +enum sctp_socket_type { SCTP_SOCKET_UDP = 0, SCTP_SOCKET_UDP_HIGH_BANDWIDTH, SCTP_SOCKET_TCP -} sctp_socket_type_t; +}; /* Per socket SCTP information. */ struct sctp_sock { /* inet_sock has to be the first member of sctp_sock */ struct inet_sock inet; /* What kind of a socket is this? */ - sctp_socket_type_t type; + enum sctp_socket_type type; /* PF_ family specific functions. */ struct sctp_pf *pf; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 018190655d63..c01af72cc603 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -100,8 +100,9 @@ static int sctp_send_asconf(struct sctp_association *asoc, struct sctp_chunk *chunk); static int sctp_do_bind(struct sock *, union sctp_addr *, int); static int sctp_autobind(struct sock *sk); -static void sctp_sock_migrate(struct sock *, struct sock *, - struct sctp_association *, sctp_socket_type_t); +static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, + struct sctp_association *assoc, + enum sctp_socket_type type); static unsigned long sctp_memory_pressure; static atomic_long_t sctp_memory_allocated; @@ -8086,7 +8087,7 @@ static inline void sctp_copy_descendant(struct sock *sk_to, */ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, struct sctp_association *assoc, - sctp_socket_type_t type) + enum sctp_socket_type type) { struct sctp_sock *oldsp = sctp_sk(oldsk); struct sctp_sock *newsp = sctp_sk(newsk); -- cgit v1.2.3-55-g7522 From e2c3108ab25b4dbab3821e8b6084bfb73afb655c Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:51 +0800 Subject: sctp: remove the typedef sctp_cmd_t This patch is to remove the typedef sctp_cmd_t, and replace with enum sctp_cmd in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/command.h | 14 +++++++------- net/sctp/sm_sideeffect.c | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 376cb78b6247..4e9e589b8f18 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -196,15 +196,15 @@ static inline sctp_arg_t SCTP_NULL(void) return retval; } -typedef struct { +struct sctp_cmd { sctp_arg_t obj; sctp_verb_t verb; -} sctp_cmd_t; +}; typedef struct { - sctp_cmd_t cmds[SCTP_MAX_NUM_COMMANDS]; - sctp_cmd_t *last_used_slot; - sctp_cmd_t *next_cmd; + struct sctp_cmd cmds[SCTP_MAX_NUM_COMMANDS]; + struct sctp_cmd *last_used_slot; + struct sctp_cmd *next_cmd; } sctp_cmd_seq_t; @@ -228,7 +228,7 @@ static inline int sctp_init_cmd_seq(sctp_cmd_seq_t *seq) static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) { - sctp_cmd_t *cmd = seq->last_used_slot - 1; + struct sctp_cmd *cmd = seq->last_used_slot - 1; BUG_ON(cmd < seq->cmds); @@ -240,7 +240,7 @@ static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, /* Return the next command structure in an sctp_cmd_seq. * Return NULL at the end of the sequence. */ -static inline sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq) +static inline struct sctp_cmd *sctp_next_cmd(sctp_cmd_seq_t *seq) { if (seq->next_cmd <= seq->last_used_slot) return NULL; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 5e8e41879b03..0cb3d5a723af 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1277,7 +1277,7 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, struct sctp_sock *sp = sctp_sk(sk); int error = 0; int force; - sctp_cmd_t *cmd; + struct sctp_cmd *cmd; struct sctp_chunk *new_obj; struct sctp_chunk *chunk = NULL; struct sctp_packet *packet; -- cgit v1.2.3-55-g7522 From a85bbeb221d860097859f110ba1321f2b0653f07 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:52 +0800 Subject: sctp: remove the typedef sctp_cmd_seq_t This patch is to remove the typedef sctp_cmd_seq_t, and replace with struct sctp_cmd_seq in the places where it's using this typedef. Note that it doesn't fix many indents although it should, as sctp_disposition_t's removal would mess them up again. So better to fix them when removing sctp_disposition_t in the later patch. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/command.h | 14 +-- include/net/sctp/sm.h | 2 +- net/sctp/probe.c | 2 +- net/sctp/sm_sideeffect.c | 54 ++++++------ net/sctp/sm_statefuns.c | 216 +++++++++++++++++++++++---------------------- 5 files changed, 145 insertions(+), 143 deletions(-) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 4e9e589b8f18..cbf6798866ef 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -110,7 +110,7 @@ typedef enum { SCTP_CMD_LAST } sctp_verb_t; -/* How many commands can you put in an sctp_cmd_seq_t? +/* How many commands can you put in an struct sctp_cmd_seq? * This is a rather arbitrary number, ideally derived from a careful * analysis of the state functions, but in reality just taken from * thin air in the hopes othat we don't trigger a kernel panic. @@ -201,17 +201,17 @@ struct sctp_cmd { sctp_verb_t verb; }; -typedef struct { +struct sctp_cmd_seq { struct sctp_cmd cmds[SCTP_MAX_NUM_COMMANDS]; struct sctp_cmd *last_used_slot; struct sctp_cmd *next_cmd; -} sctp_cmd_seq_t; +}; /* Initialize a block of memory as a command sequence. * Return 0 if the initialization fails. */ -static inline int sctp_init_cmd_seq(sctp_cmd_seq_t *seq) +static inline int sctp_init_cmd_seq(struct sctp_cmd_seq *seq) { /* cmds[] is filled backwards to simplify the overflow BUG() check */ seq->last_used_slot = seq->cmds + SCTP_MAX_NUM_COMMANDS; @@ -220,12 +220,12 @@ static inline int sctp_init_cmd_seq(sctp_cmd_seq_t *seq) } -/* Add a command to an sctp_cmd_seq_t. +/* Add a command to an struct sctp_cmd_seq. * * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above * to wrap data which goes in the obj argument. */ -static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, +static inline void sctp_add_cmd_sf(struct sctp_cmd_seq *seq, sctp_verb_t verb, sctp_arg_t obj) { struct sctp_cmd *cmd = seq->last_used_slot - 1; @@ -240,7 +240,7 @@ static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, /* Return the next command structure in an sctp_cmd_seq. * Return NULL at the end of the sequence. */ -static inline struct sctp_cmd *sctp_next_cmd(sctp_cmd_seq_t *seq) +static inline struct sctp_cmd *sctp_next_cmd(struct sctp_cmd_seq *seq) { if (seq->next_cmd <= seq->last_used_slot) return NULL; diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 1e7651c3b158..9af64b98d96b 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -75,7 +75,7 @@ typedef sctp_disposition_t (sctp_state_fn_t) (struct net *, const struct sctp_association *, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *); + struct sctp_cmd_seq *); typedef void (sctp_timer_event_t) (unsigned long); typedef struct { sctp_state_fn_t *fn; diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 43837dfc86a7..34097a167431 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -132,7 +132,7 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sk_buff *skb = chunk->skb; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 0cb3d5a723af..6dd5934cbda6 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -58,7 +58,7 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, gfp_t gfp); static int sctp_side_effects(enum sctp_event event_type, union sctp_subtype subtype, @@ -67,7 +67,7 @@ static int sctp_side_effects(enum sctp_event event_type, struct sctp_association **asoc, void *event_arg, sctp_disposition_t status, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, gfp_t gfp); /******************************************************************** @@ -150,7 +150,7 @@ static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, /* Generate SACK if necessary. We call this at the end of a packet. */ static int sctp_gen_sack(struct sctp_association *asoc, int force, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { __u32 ctsn, max_tsn_seen; struct sctp_chunk *sack; @@ -506,7 +506,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { * notification SHOULD be sent to the upper layer. * */ -static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, +static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, struct sctp_association *asoc, struct sctp_transport *transport, int is_hb) @@ -578,7 +578,7 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, } /* Worker routine to handle INIT command failure. */ -static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, +static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands, struct sctp_association *asoc, unsigned int error) { @@ -601,7 +601,7 @@ static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, } /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ -static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, +static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands, struct sctp_association *asoc, enum sctp_event event_type, union sctp_subtype subtype, @@ -645,7 +645,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, * since all other cases use "temporary" associations and can do all * their work in statefuns directly. */ -static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, +static int sctp_cmd_process_init(struct sctp_cmd_seq *commands, struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_init_chunk *peer_init, @@ -667,7 +667,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, } /* Helper function to break out starting up of heartbeat timers. */ -static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, +static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds, struct sctp_association *asoc) { struct sctp_transport *t; @@ -680,7 +680,7 @@ static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, sctp_transport_reset_hb_timer(t); } -static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, +static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds, struct sctp_association *asoc) { struct sctp_transport *t; @@ -695,7 +695,7 @@ static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, } /* Helper function to stop any pending T3-RTX timers */ -static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, +static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds, struct sctp_association *asoc) { struct sctp_transport *t; @@ -709,7 +709,7 @@ static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, /* Helper function to handle the reception of an HEARTBEAT ACK. */ -static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, +static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds, struct sctp_association *asoc, struct sctp_transport *t, struct sctp_chunk *chunk) @@ -780,7 +780,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, /* Helper function to process the process SACK command. */ -static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, +static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { @@ -802,7 +802,7 @@ static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set * the transport for a shutdown chunk. */ -static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, +static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { @@ -819,7 +819,7 @@ static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; } -static void sctp_cmd_assoc_update(sctp_cmd_seq_t *cmds, +static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds, struct sctp_association *asoc, struct sctp_association *new) { @@ -842,7 +842,7 @@ static void sctp_cmd_assoc_update(sctp_cmd_seq_t *cmds, } /* Helper function to change the state of an association. */ -static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, +static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds, struct sctp_association *asoc, enum sctp_state state) { @@ -902,7 +902,7 @@ static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, } /* Helper function to delete an association. */ -static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, +static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds, struct sctp_association *asoc) { struct sock *sk = asoc->base.sk; @@ -924,9 +924,9 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, * destination address (we use active path instead of primary path just * because primary path may be inactive. */ -static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, - struct sctp_association *asoc, - struct sctp_chunk *chunk) +static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds, + struct sctp_association *asoc, + struct sctp_chunk *chunk) { struct sctp_transport *t; @@ -936,7 +936,7 @@ static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, } /* Process an incoming Operation Error Chunk. */ -static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, +static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds, struct sctp_association *asoc, struct sctp_chunk *chunk) { @@ -1025,9 +1025,9 @@ static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) } /* Helper function to generate an association change event */ -static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, - struct sctp_association *asoc, - u8 state) +static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands, + struct sctp_association *asoc, + u8 state) { struct sctp_ulpevent *ev; @@ -1040,7 +1040,7 @@ static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, } /* Helper function to generate an adaptation indication event */ -static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, +static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands, struct sctp_association *asoc) { struct sctp_ulpevent *ev; @@ -1145,7 +1145,7 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, gfp_t gfp) { - sctp_cmd_seq_t commands; + struct sctp_cmd_seq commands; const sctp_sm_table_entry_t *state_fn; sctp_disposition_t status; int error = 0; @@ -1184,7 +1184,7 @@ static int sctp_side_effects(enum sctp_event event_type, struct sctp_association **asoc, void *event_arg, sctp_disposition_t status, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, gfp_t gfp) { int error; @@ -1270,7 +1270,7 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, struct sctp_association *asoc, void *event_arg, sctp_disposition_t status, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, gfp_t gfp) { struct sock *sk = ep->base.sk; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index af93419209df..93b6f42a9252 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -67,7 +67,7 @@ static struct sctp_packet *sctp_abort_pkt_new(struct net *net, size_t paylen); static int sctp_eat_data(const struct sctp_association *asoc, struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, const struct sctp_association *asoc, const struct sctp_chunk *chunk); @@ -75,30 +75,30 @@ static void sctp_send_stale_cookie_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, struct sctp_chunk *err_chunk); static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, __be16 error, int sk_err, const struct sctp_association *asoc, struct sctp_transport *transport); @@ -108,7 +108,7 @@ static sctp_disposition_t sctp_sf_abort_violation( const struct sctp_endpoint *ep, const struct sctp_association *asoc, void *arg, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, const __u8 *payload, const size_t paylen); @@ -118,7 +118,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static sctp_disposition_t sctp_sf_violation_paramlen( struct net *net, @@ -126,7 +126,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, void *ext, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static sctp_disposition_t sctp_sf_violation_ctsn( struct net *net, @@ -134,7 +134,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static sctp_disposition_t sctp_sf_violation_chunk( struct net *net, @@ -142,7 +142,7 @@ static sctp_disposition_t sctp_sf_violation_chunk( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); static enum sctp_ierror sctp_sf_authenticate( struct net *net, @@ -156,7 +156,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands); + struct sctp_cmd_seq *commands); /* Small helper function that checks if the chunk length * is of the appropriate length. The 'required_length' argument @@ -219,7 +219,7 @@ sctp_disposition_t sctp_sf_do_4_C(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_ulpevent *ev; @@ -305,7 +305,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg, *repl, *err_chunk; struct sctp_unrecognized_param *unk_param; @@ -499,7 +499,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_init_chunk *initchunk; @@ -648,7 +648,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_association *new_asoc; @@ -875,7 +875,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_ulpevent *ev; @@ -953,7 +953,7 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = (struct sctp_transport *) arg; struct sctp_chunk *reply; @@ -979,7 +979,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = (struct sctp_transport *) arg; @@ -1026,7 +1026,7 @@ sctp_disposition_t sctp_sf_send_reconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = arg; @@ -1078,7 +1078,7 @@ sctp_disposition_t sctp_sf_beat_8_3(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_paramhdr *param_hdr; struct sctp_chunk *chunk = arg; @@ -1153,7 +1153,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_sender_hb_info *hbinfo; struct sctp_chunk *chunk = arg; @@ -1225,7 +1225,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, */ static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa, struct sctp_chunk *init, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { int len; struct sctp_packet *pkt; @@ -1290,7 +1290,7 @@ static bool list_has_sctp_addr(const struct list_head *list, static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, const struct sctp_association *asoc, struct sctp_chunk *init, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct net *net = sock_net(new_asoc->base.sk); struct sctp_transport *new_addr; @@ -1415,7 +1415,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, - void *arg, sctp_cmd_seq_t *commands) + void *arg, struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg, *repl, *err_chunk; struct sctp_unrecognized_param *unk_param; @@ -1627,7 +1627,7 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* Call helper to do the real work for both simulataneous and * duplicate INIT chunk handling. @@ -1681,7 +1681,7 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* Call helper to do the real work for both simulataneous and * duplicate INIT chunk handling. @@ -1703,7 +1703,8 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, - void *arg, sctp_cmd_seq_t *commands) + void *arg, + struct sctp_cmd_seq *commands) { /* Per the above section, we'll discard the chunk if we have an * endpoint. If this is an OOTB INIT-ACK, treat it as such. @@ -1723,7 +1724,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, struct sctp_association *new_asoc) { struct sctp_init_chunk *peer_init; @@ -1838,7 +1839,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, struct sctp_association *new_asoc) { struct sctp_init_chunk *peer_init; @@ -1909,7 +1910,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, struct sctp_association *new_asoc) { /* The cookie should be silently discarded. @@ -1931,7 +1932,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, struct sctp_association *new_asoc) { struct sctp_ulpevent *ev = NULL, *ai_ev = NULL; @@ -2027,7 +2028,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { sctp_disposition_t retval; struct sctp_chunk *chunk = arg; @@ -2146,7 +2147,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -2188,7 +2189,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -2239,7 +2240,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* The same T2 timer, so we should be able to use * common function with the SHUTDOWN-SENT state. @@ -2266,7 +2267,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_errhdr *err; @@ -2330,7 +2331,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { int attempts = asoc->init_err_counter + 1; struct sctp_chunk *chunk = arg, *reply; @@ -2452,7 +2453,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -2489,7 +2490,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; unsigned int len; @@ -2527,7 +2528,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; unsigned int len; @@ -2566,7 +2567,7 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR, ENOPROTOOPT, asoc, @@ -2581,7 +2582,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. @@ -2595,7 +2596,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net, * This is common code called by several sctp_sf_*_abort() functions above. */ static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, __be16 error, int sk_err, const struct sctp_association *asoc, struct sctp_transport *transport) @@ -2653,7 +2654,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; sctp_disposition_t disposition; @@ -2742,7 +2743,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_shutdownhdr *sdh; @@ -2795,7 +2796,7 @@ sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = (struct sctp_chunk *) arg; struct sctp_chunk *reply; @@ -2859,7 +2860,7 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_cwrhdr *cwr; @@ -2915,7 +2916,7 @@ sctp_disposition_t sctp_sf_do_ecne(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_ecnehdr *ecne; @@ -2972,7 +2973,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; sctp_arg_t force = SCTP_NOFORCE(); @@ -3092,7 +3093,7 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; int error; @@ -3183,7 +3184,7 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_sackhdr *sackh; @@ -3257,7 +3258,7 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; @@ -3307,7 +3308,7 @@ sctp_disposition_t sctp_sf_operr_notify(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_errhdr *err; @@ -3345,7 +3346,7 @@ sctp_disposition_t sctp_sf_do_9_2_final(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *reply; @@ -3428,7 +3429,7 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sk_buff *skb = chunk->skb; @@ -3521,7 +3522,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; @@ -3583,7 +3584,7 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -3607,7 +3608,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *asconf_ack = NULL; @@ -3725,7 +3726,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *asconf_ack = arg; struct sctp_chunk *last_asconf = asoc->addip_last_asconf; @@ -3843,7 +3844,7 @@ sctp_disposition_t sctp_sf_do_reconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_paramhdr *err_param = NULL; struct sctp_chunk *chunk = arg; @@ -3920,7 +3921,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; @@ -3991,7 +3992,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; @@ -4158,7 +4159,7 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_authhdr *auth_hdr; @@ -4255,7 +4256,7 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *unk_chunk = arg; struct sctp_chunk *err_chunk; @@ -4335,7 +4336,7 @@ sctp_disposition_t sctp_sf_discard_chunk(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -4375,7 +4376,7 @@ sctp_disposition_t sctp_sf_pdiscard(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS); sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); @@ -4403,7 +4404,7 @@ sctp_disposition_t sctp_sf_violation(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -4423,7 +4424,7 @@ static sctp_disposition_t sctp_sf_abort_violation( const struct sctp_endpoint *ep, const struct sctp_association *asoc, void *arg, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, const __u8 *payload, const size_t paylen) { @@ -4541,7 +4542,7 @@ static sctp_disposition_t sctp_sf_violation_chunklen( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { static const char err_str[] = "The following chunk had invalid length:"; @@ -4561,7 +4562,7 @@ static sctp_disposition_t sctp_sf_violation_paramlen( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, void *ext, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_paramhdr *param = ext; @@ -4604,7 +4605,7 @@ static sctp_disposition_t sctp_sf_violation_ctsn( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:"; @@ -4624,7 +4625,7 @@ static sctp_disposition_t sctp_sf_violation_chunk( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { static const char err_str[] = "The following chunk violates protocol:"; @@ -4699,7 +4700,7 @@ sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *repl; struct sctp_association *my_asoc; @@ -4811,7 +4812,7 @@ sctp_disposition_t sctp_sf_do_prm_send(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_datamsg *msg = arg; @@ -4851,7 +4852,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { int disposition; @@ -4907,7 +4908,7 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* From 9.1 Abort of an Association * Upon receipt of the ABORT primitive from its upper @@ -4944,7 +4945,7 @@ sctp_disposition_t sctp_sf_error_closed(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL)); return SCTP_DISPOSITION_CONSUME; @@ -4958,7 +4959,7 @@ sctp_disposition_t sctp_sf_error_shutdown(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-ESHUTDOWN)); @@ -4985,7 +4986,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); @@ -5019,7 +5020,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, - void *arg, sctp_cmd_seq_t *commands) + void *arg, struct sctp_cmd_seq *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. @@ -5047,7 +5048,7 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *abort = arg; @@ -5096,7 +5097,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. @@ -5122,7 +5123,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* Stop the T5-shutdown guard timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, @@ -5149,7 +5150,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* Stop the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, @@ -5180,7 +5181,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { /* The same T2 timer, so we should be able to use * common function with the SHUTDOWN-SENT state. @@ -5216,7 +5217,7 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type, (struct sctp_transport *)arg, commands)) @@ -5248,7 +5249,7 @@ sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -5264,7 +5265,8 @@ sctp_disposition_t sctp_sf_do_prm_reconf(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, - void *arg, sctp_cmd_seq_t *commands) + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -5283,7 +5285,7 @@ sctp_disposition_t sctp_sf_ignore_primitive( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { pr_debug("%s: primitive type:%d is ignored\n", __func__, type.primitive); @@ -5307,7 +5309,7 @@ sctp_disposition_t sctp_sf_do_no_pending_tsn( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_ulpevent *event; @@ -5339,7 +5341,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *reply; @@ -5409,7 +5411,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = (struct sctp_chunk *) arg; struct sctp_chunk *reply; @@ -5482,7 +5484,7 @@ sctp_disposition_t sctp_sf_ignore_other(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { pr_debug("%s: the event other type:%d is ignored\n", __func__, type.other); @@ -5510,7 +5512,7 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = arg; @@ -5598,7 +5600,7 @@ sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS); sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); @@ -5629,7 +5631,7 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *repl = NULL; struct sctp_bind_addr *bp; @@ -5693,7 +5695,7 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *repl = NULL; int attempts = asoc->init_err_counter + 1; @@ -5743,7 +5745,7 @@ sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *reply = NULL; @@ -5814,7 +5816,7 @@ sctp_disposition_t sctp_sf_t4_timer_expire( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = asoc->addip_last_asconf; struct sctp_transport *transport = chunk->transport; @@ -5885,7 +5887,7 @@ sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_chunk *reply = NULL; @@ -5922,7 +5924,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire( const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { int disposition; @@ -5964,7 +5966,7 @@ sctp_disposition_t sctp_sf_not_impl(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { return SCTP_DISPOSITION_NOT_IMPL; } @@ -5982,7 +5984,7 @@ sctp_disposition_t sctp_sf_bug(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { return SCTP_DISPOSITION_BUG; } @@ -6003,7 +6005,7 @@ sctp_disposition_t sctp_sf_timer_ignore(struct net *net, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { pr_debug("%s: timer %d ignored\n", __func__, type.chunk); @@ -6169,7 +6171,7 @@ static void sctp_send_stale_cookie_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands, + struct sctp_cmd_seq *commands, struct sctp_chunk *err_chunk) { struct sctp_packet *packet; @@ -6198,7 +6200,7 @@ static void sctp_send_stale_cookie_err(struct net *net, /* Process a data chunk */ static int sctp_eat_data(const struct sctp_association *asoc, struct sctp_chunk *chunk, - sctp_cmd_seq_t *commands) + struct sctp_cmd_seq *commands) { struct sctp_datahdr *data_hdr; struct sctp_chunk *err; -- cgit v1.2.3-55-g7522 From c488b7704ed0eed18e11f9b685931558735f2a68 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:53 +0800 Subject: sctp: remove the typedef sctp_arg_t This patch is to remove the typedef sctp_arg_t, and replace with union sctp_arg in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/command.h | 26 +++++++++++++------------- net/sctp/sm_statefuns.c | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index cbf6798866ef..f5fc425b5a4f 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -117,7 +117,7 @@ typedef enum { */ #define SCTP_MAX_NUM_COMMANDS 20 -typedef union { +union sctp_arg { void *zero_all; /* Set to NULL to clear the entire union */ __s32 i32; __u32 u32; @@ -137,24 +137,24 @@ typedef union { struct sctp_packet *packet; struct sctp_sackhdr *sackh; struct sctp_datamsg *msg; -} sctp_arg_t; +}; /* We are simulating ML type constructors here. * * SCTP_ARG_CONSTRUCTOR(NAME, TYPE, ELT) builds a function called * SCTP_NAME() which takes an argument of type TYPE and returns an - * sctp_arg_t. It does this by inserting the sole argument into the - * ELT union element of a local sctp_arg_t. + * union sctp_arg. It does this by inserting the sole argument into + * the ELT union element of a local union sctp_arg. * * E.g., SCTP_ARG_CONSTRUCTOR(I32, __s32, i32) builds SCTP_I32(arg), - * which takes an __s32 and returns a sctp_arg_t containing the + * which takes an __s32 and returns a union sctp_arg containing the * __s32. So, after foo = SCTP_I32(arg), foo.i32 == arg. */ #define SCTP_ARG_CONSTRUCTOR(name, type, elt) \ -static inline sctp_arg_t \ +static inline union sctp_arg \ SCTP_## name (type arg) \ -{ sctp_arg_t retval;\ +{ union sctp_arg retval;\ retval.zero_all = NULL;\ retval.elt = arg;\ return retval;\ @@ -179,25 +179,25 @@ SCTP_ARG_CONSTRUCTOR(PACKET, struct sctp_packet *, packet) SCTP_ARG_CONSTRUCTOR(SACKH, struct sctp_sackhdr *, sackh) SCTP_ARG_CONSTRUCTOR(DATAMSG, struct sctp_datamsg *, msg) -static inline sctp_arg_t SCTP_FORCE(void) +static inline union sctp_arg SCTP_FORCE(void) { return SCTP_I32(1); } -static inline sctp_arg_t SCTP_NOFORCE(void) +static inline union sctp_arg SCTP_NOFORCE(void) { return SCTP_I32(0); } -static inline sctp_arg_t SCTP_NULL(void) +static inline union sctp_arg SCTP_NULL(void) { - sctp_arg_t retval; + union sctp_arg retval; retval.zero_all = NULL; return retval; } struct sctp_cmd { - sctp_arg_t obj; + union sctp_arg obj; sctp_verb_t verb; }; @@ -226,7 +226,7 @@ static inline int sctp_init_cmd_seq(struct sctp_cmd_seq *seq) * to wrap data which goes in the obj argument. */ static inline void sctp_add_cmd_sf(struct sctp_cmd_seq *seq, sctp_verb_t verb, - sctp_arg_t obj) + union sctp_arg obj) { struct sctp_cmd *cmd = seq->last_used_slot - 1; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 93b6f42a9252..3394c4d34ea4 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2975,8 +2975,8 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, void *arg, struct sctp_cmd_seq *commands) { + union sctp_arg force = SCTP_NOFORCE(); struct sctp_chunk *chunk = arg; - sctp_arg_t force = SCTP_NOFORCE(); int error; if (!sctp_vtag_verify(chunk, asoc)) { -- cgit v1.2.3-55-g7522 From e08af95df1130883762b388a19bb150ae5d16c09 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:54 +0800 Subject: sctp: remove the typedef sctp_verb_t This patch is to remove the typedef sctp_verb_t, and replace with enum sctp_verb in the places where it's using this typedef. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/command.h | 10 +++++----- net/sctp/sm_statefuns.c | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index f5fc425b5a4f..b55c6a48a206 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h @@ -40,7 +40,7 @@ #include -typedef enum { +enum sctp_verb { SCTP_CMD_NOP = 0, /* Do nothing. */ SCTP_CMD_NEW_ASOC, /* Register a new association. */ SCTP_CMD_DELETE_TCB, /* Delete the current association. */ @@ -108,7 +108,7 @@ typedef enum { SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/ SCTP_CMD_SET_ASOC, /* Restore association context */ SCTP_CMD_LAST -} sctp_verb_t; +}; /* How many commands can you put in an struct sctp_cmd_seq? * This is a rather arbitrary number, ideally derived from a careful @@ -198,7 +198,7 @@ static inline union sctp_arg SCTP_NULL(void) struct sctp_cmd { union sctp_arg obj; - sctp_verb_t verb; + enum sctp_verb verb; }; struct sctp_cmd_seq { @@ -225,8 +225,8 @@ static inline int sctp_init_cmd_seq(struct sctp_cmd_seq *seq) * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above * to wrap data which goes in the obj argument. */ -static inline void sctp_add_cmd_sf(struct sctp_cmd_seq *seq, sctp_verb_t verb, - union sctp_arg obj) +static inline void sctp_add_cmd_sf(struct sctp_cmd_seq *seq, + enum sctp_verb verb, union sctp_arg obj) { struct sctp_cmd *cmd = seq->last_used_slot - 1; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 3394c4d34ea4..adc1dde34bfe 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -6205,7 +6205,7 @@ static int sctp_eat_data(const struct sctp_association *asoc, struct sctp_datahdr *data_hdr; struct sctp_chunk *err; size_t datalen; - sctp_verb_t deliver; + enum sctp_verb deliver; int tmp; __u32 tsn; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; -- cgit v1.2.3-55-g7522 From eb662a6a9b2a4ee3c76bbfc4c23f4dc56859b0f3 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:55 +0800 Subject: sctp: remove the unused typedef sctp_sm_command_t Remove this typedef including the struct, there is even no places using it. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 9af64b98d96b..3ca75a6c4b5e 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -65,11 +65,6 @@ typedef enum { SCTP_DISPOSITION_BUG, /* This is a bug. */ } sctp_disposition_t; -typedef struct { - int name; - int action; -} sctp_sm_command_t; - typedef sctp_disposition_t (sctp_state_fn_t) (struct net *, const struct sctp_endpoint *, const struct sctp_association *, -- cgit v1.2.3-55-g7522 From 8ee821aea39c6bf4142c9319adecea6d3e1af4a2 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:56 +0800 Subject: sctp: remove the typedef sctp_sm_table_entry_t This patch is to remove the typedef sctp_sm_table_entry_t, and replace with struct sctp_sm_table_entry in the places where it's using this typedef. It is also to fix some indents. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 6 +++--- net/sctp/sm_sideeffect.c | 2 +- net/sctp/sm_statetable.c | 42 +++++++++++++++++++++++++----------------- 3 files changed, 29 insertions(+), 21 deletions(-) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 3ca75a6c4b5e..7ad240228a0f 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -72,10 +72,10 @@ typedef sctp_disposition_t (sctp_state_fn_t) (struct net *, void *arg, struct sctp_cmd_seq *); typedef void (sctp_timer_event_t) (unsigned long); -typedef struct { +struct sctp_sm_table_entry { sctp_state_fn_t *fn; const char *name; -} sctp_sm_table_entry_t; +}; /* A naming convention of "sctp_sf_xxx" applies to all the state functions * currently in use. @@ -170,7 +170,7 @@ sctp_state_fn_t sctp_sf_autoclose_timer_expire; /* Prototypes for utility support functions. */ __u8 sctp_get_chunk_type(struct sctp_chunk *chunk); -const sctp_sm_table_entry_t *sctp_sm_lookup_event( +const struct sctp_sm_table_entry *sctp_sm_lookup_event( struct net *net, enum sctp_event event_type, enum sctp_state state, diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 6dd5934cbda6..2bc1204becbd 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1146,7 +1146,7 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type, void *event_arg, gfp_t gfp) { struct sctp_cmd_seq commands; - const sctp_sm_table_entry_t *state_fn; + const struct sctp_sm_table_entry *state_fn; sctp_disposition_t status; int error = 0; typedef const char *(printfn_t)(union sctp_subtype); diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index d437f3801399..79b6bee5b768 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -45,27 +45,27 @@ #include #include -static const sctp_sm_table_entry_t +static const struct sctp_sm_table_entry primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES]; -static const sctp_sm_table_entry_t +static const struct sctp_sm_table_entry other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES]; -static const sctp_sm_table_entry_t +static const struct sctp_sm_table_entry timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES]; -static const sctp_sm_table_entry_t *sctp_chunk_event_lookup( +static const struct sctp_sm_table_entry *sctp_chunk_event_lookup( struct net *net, enum sctp_cid cid, enum sctp_state state); -static const sctp_sm_table_entry_t bug = { +static const struct sctp_sm_table_entry bug = { .fn = sctp_sf_bug, .name = "sctp_sf_bug" }; #define DO_LOOKUP(_max, _type, _table) \ ({ \ - const sctp_sm_table_entry_t *rtn; \ + const struct sctp_sm_table_entry *rtn; \ \ if ((event_subtype._type > (_max))) { \ pr_warn("table %p possible attack: event %d exceeds max %d\n", \ @@ -77,7 +77,7 @@ static const sctp_sm_table_entry_t bug = { rtn; \ }) -const sctp_sm_table_entry_t *sctp_sm_lookup_event( +const struct sctp_sm_table_entry *sctp_sm_lookup_event( struct net *net, enum sctp_event event_type, enum sctp_state state, @@ -394,7 +394,8 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event( * * For base protocol (RFC 2960). */ -static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_DATA, TYPE_SCTP_INIT, TYPE_SCTP_INIT_ACK, @@ -453,7 +454,8 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][ /* The primary index for this table is the chunk type. * The secondary index for this table is the state. */ -static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_ASCONF, TYPE_SCTP_ASCONF_ACK, }; /*state_fn_t addip_chunk_event_table[][] */ @@ -480,7 +482,8 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_ /* The primary index for this table is the chunk type. * The secondary index for this table is the state. */ -static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_FWD_TSN, }; /*state_fn_t prsctp_chunk_event_table[][] */ @@ -506,7 +509,8 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN /* The primary index for this table is the chunk type. * The secondary index for this table is the state. */ -static const sctp_sm_table_entry_t reconf_chunk_event_table[SCTP_NUM_RECONF_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +reconf_chunk_event_table[SCTP_NUM_RECONF_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_RECONF, }; /*state_fn_t reconf_chunk_event_table[][] */ @@ -532,11 +536,12 @@ static const sctp_sm_table_entry_t reconf_chunk_event_table[SCTP_NUM_RECONF_CHUN /* The primary index for this table is the chunk type. * The secondary index for this table is the state. */ -static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_AUTH, }; /*state_fn_t auth_chunk_event_table[][] */ -static const sctp_sm_table_entry_t +static const struct sctp_sm_table_entry chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { /* SCTP_STATE_CLOSED */ TYPE_SCTP_FUNC(sctp_sf_ootb), @@ -693,7 +698,8 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { /* The primary index for this table is the primitive type. * The secondary index for this table is the state. */ -static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_PRIMITIVE_ASSOCIATE, TYPE_SCTP_PRIMITIVE_SHUTDOWN, TYPE_SCTP_PRIMITIVE_ABORT, @@ -741,7 +747,8 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ } -static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_OTHER_NO_PENDING_TSN, TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH, }; @@ -955,7 +962,8 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } -static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { +static const struct sctp_sm_table_entry +timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_EVENT_TIMEOUT_NONE, TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE, TYPE_SCTP_EVENT_TIMEOUT_T1_INIT, @@ -969,7 +977,7 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, }; -static const sctp_sm_table_entry_t *sctp_chunk_event_lookup( +static const struct sctp_sm_table_entry *sctp_chunk_event_lookup( struct net *net, enum sctp_cid cid, enum sctp_state state) -- cgit v1.2.3-55-g7522 From 172a1599ba88df7147f6503a75686fb89c8a1f3f Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:57 +0800 Subject: sctp: remove the typedef sctp_disposition_t This patch is to remove the typedef sctp_disposition_t, and replace with enum sctp_disposition in the places where it's using this typedef. It's also to fix the indent for many functions' defination. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 19 +- net/sctp/probe.c | 13 +- net/sctp/sm_sideeffect.c | 50 +- net/sctp/sm_statefuns.c | 1316 ++++++++++++++++++++++++---------------------- 4 files changed, 717 insertions(+), 681 deletions(-) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 7ad240228a0f..33077f317995 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -53,7 +53,7 @@ /* * Possible values for the disposition are: */ -typedef enum { +enum sctp_disposition { SCTP_DISPOSITION_DISCARD, /* No further processing. */ SCTP_DISPOSITION_CONSUME, /* Process return values normally. */ SCTP_DISPOSITION_NOMEM, /* We ran out of memory--recover. */ @@ -63,14 +63,15 @@ typedef enum { SCTP_DISPOSITION_NOT_IMPL, /* This entry is not implemented. */ SCTP_DISPOSITION_ERROR, /* This is plain old user error. */ SCTP_DISPOSITION_BUG, /* This is a bug. */ -} sctp_disposition_t; - -typedef sctp_disposition_t (sctp_state_fn_t) (struct net *, - const struct sctp_endpoint *, - const struct sctp_association *, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *); +}; + +typedef enum sctp_disposition (sctp_state_fn_t) ( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands); typedef void (sctp_timer_event_t) (unsigned long); struct sctp_sm_table_entry { sctp_state_fn_t *fn; diff --git a/net/sctp/probe.c b/net/sctp/probe.c index 34097a167431..1280f85a598d 100644 --- a/net/sctp/probe.c +++ b/net/sctp/probe.c @@ -127,12 +127,13 @@ static const struct file_operations sctpprobe_fops = { .llseek = noop_llseek, }; -static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +static enum sctp_disposition jsctp_sf_eat_sack( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sk_buff *skb = chunk->skb; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 2bc1204becbd..e6a2974e020e 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -57,7 +57,7 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, - sctp_disposition_t status, + enum sctp_disposition status, struct sctp_cmd_seq *commands, gfp_t gfp); static int sctp_side_effects(enum sctp_event event_type, @@ -66,7 +66,7 @@ static int sctp_side_effects(enum sctp_event event_type, struct sctp_endpoint *ep, struct sctp_association **asoc, void *event_arg, - sctp_disposition_t status, + enum sctp_disposition status, struct sctp_cmd_seq *commands, gfp_t gfp); @@ -97,8 +97,8 @@ static void sctp_do_ecn_ce_work(struct sctp_association *asoc, * that was originally marked with the CE bit. */ static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, - __u32 lowest_tsn, - struct sctp_chunk *chunk) + __u32 lowest_tsn, + struct sctp_chunk *chunk) { struct sctp_chunk *repl; @@ -152,9 +152,9 @@ static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, static int sctp_gen_sack(struct sctp_association *asoc, int force, struct sctp_cmd_seq *commands) { + struct sctp_transport *trans = asoc->peer.last_data_from; __u32 ctsn, max_tsn_seen; struct sctp_chunk *sack; - struct sctp_transport *trans = asoc->peer.last_data_from; int error = 0; if (force || @@ -244,11 +244,11 @@ nomem: */ void sctp_generate_t3_rtx_event(unsigned long peer) { - int error; struct sctp_transport *transport = (struct sctp_transport *) peer; struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); + int error; /* Check whether a task is in the sock. */ @@ -361,12 +361,12 @@ static void sctp_generate_autoclose_event(unsigned long data) */ void sctp_generate_heartbeat_event(unsigned long data) { - int error = 0; struct sctp_transport *transport = (struct sctp_transport *) data; struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); u32 elapsed, timeout; + int error = 0; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { @@ -406,7 +406,7 @@ out_unlock: */ void sctp_generate_proto_unreach_event(unsigned long data) { - struct sctp_transport *transport = (struct sctp_transport *) data; + struct sctp_transport *transport = (struct sctp_transport *)data; struct sctp_association *asoc = transport->asoc; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); @@ -472,7 +472,7 @@ out_unlock: /* Inject a SACK Timeout event into the state machine. */ static void sctp_generate_sack_event(unsigned long data) { - struct sctp_association *asoc = (struct sctp_association *) data; + struct sctp_association *asoc = (struct sctp_association *)data; sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); } @@ -610,6 +610,7 @@ static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands, { struct sctp_ulpevent *event; struct sctp_chunk *abort; + /* Cancel any partial delivery in progress. */ sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); @@ -991,6 +992,7 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk) { struct sctp_fwdtsn_skip *skip; + /* Walk through all the skipped SSNs */ sctp_walk_fwdtsn(skip, chunk) { sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); @@ -1003,8 +1005,8 @@ static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, static void sctp_cmd_del_non_primary(struct sctp_association *asoc) { struct sctp_transport *t; - struct list_head *pos; struct list_head *temp; + struct list_head *pos; list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { t = list_entry(pos, struct sctp_transport, transports); @@ -1145,15 +1147,15 @@ int sctp_do_sm(struct net *net, enum sctp_event event_type, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, gfp_t gfp) { - struct sctp_cmd_seq commands; - const struct sctp_sm_table_entry *state_fn; - sctp_disposition_t status; - int error = 0; typedef const char *(printfn_t)(union sctp_subtype); static printfn_t *table[] = { NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, }; printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; + const struct sctp_sm_table_entry *state_fn; + struct sctp_cmd_seq commands; + enum sctp_disposition status; + int error = 0; /* Look up the state function, run it, and then process the * side effects. These three steps are the heart of lksctp. @@ -1183,7 +1185,7 @@ static int sctp_side_effects(enum sctp_event event_type, struct sctp_endpoint *ep, struct sctp_association **asoc, void *event_arg, - sctp_disposition_t status, + enum sctp_disposition status, struct sctp_cmd_seq *commands, gfp_t gfp) { @@ -1269,23 +1271,21 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, - sctp_disposition_t status, + enum sctp_disposition status, struct sctp_cmd_seq *commands, gfp_t gfp) { - struct sock *sk = ep->base.sk; - struct sctp_sock *sp = sctp_sk(sk); - int error = 0; - int force; - struct sctp_cmd *cmd; - struct sctp_chunk *new_obj; - struct sctp_chunk *chunk = NULL; + struct sctp_sock *sp = sctp_sk(ep->base.sk); + struct sctp_chunk *chunk = NULL, *new_obj; struct sctp_packet *packet; + struct sctp_sackhdr sackh; struct timer_list *timer; - unsigned long timeout; struct sctp_transport *t; - struct sctp_sackhdr sackh; + unsigned long timeout; + struct sctp_cmd *cmd; int local_cork = 0; + int error = 0; + int force; if (SCTP_EVENT_T_TIMEOUT != event_type) chunk = event_arg; diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index adc1dde34bfe..8f8ccded13e4 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -59,37 +59,41 @@ #include #include -static struct sctp_packet *sctp_abort_pkt_new(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - struct sctp_chunk *chunk, - const void *payload, - size_t paylen); +static struct sctp_packet *sctp_abort_pkt_new( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + const void *payload, size_t paylen); static int sctp_eat_data(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_cmd_seq *commands); -static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, - const struct sctp_association *asoc, - const struct sctp_chunk *chunk); +static struct sctp_packet *sctp_ootb_pkt_new( + struct net *net, + const struct sctp_association *asoc, + const struct sctp_chunk *chunk); static void sctp_send_stale_cookie_err(struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *chunk, struct sctp_cmd_seq *commands, struct sctp_chunk *err_chunk); -static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands); -static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands); -static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, +static enum sctp_disposition sctp_sf_do_5_2_6_stale( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands); +static enum sctp_disposition sctp_sf_shut_8_4_5( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands); +static enum sctp_disposition sctp_sf_tabort_8_4_8( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -97,61 +101,63 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, struct sctp_cmd_seq *commands); static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); -static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, - struct sctp_cmd_seq *commands, - __be16 error, int sk_err, - const struct sctp_association *asoc, - struct sctp_transport *transport); +static enum sctp_disposition sctp_stop_t1_and_abort( + struct net *net, + struct sctp_cmd_seq *commands, + __be16 error, int sk_err, + const struct sctp_association *asoc, + struct sctp_transport *transport); -static sctp_disposition_t sctp_sf_abort_violation( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - void *arg, - struct sctp_cmd_seq *commands, - const __u8 *payload, - const size_t paylen); +static enum sctp_disposition sctp_sf_abort_violation( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + void *arg, + struct sctp_cmd_seq *commands, + const __u8 *payload, + const size_t paylen); -static sctp_disposition_t sctp_sf_violation_chunklen( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands); +static enum sctp_disposition sctp_sf_violation_chunklen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands); -static sctp_disposition_t sctp_sf_violation_paramlen( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, void *ext, - struct sctp_cmd_seq *commands); +static enum sctp_disposition sctp_sf_violation_paramlen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, void *ext, + struct sctp_cmd_seq *commands); -static sctp_disposition_t sctp_sf_violation_ctsn( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands); +static enum sctp_disposition sctp_sf_violation_ctsn( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands); -static sctp_disposition_t sctp_sf_violation_chunk( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands); +static enum sctp_disposition sctp_sf_violation_chunk( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands); static enum sctp_ierror sctp_sf_authenticate( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - struct sctp_chunk *chunk); + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + struct sctp_chunk *chunk); -static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, +static enum sctp_disposition __sctp_sf_do_9_1_abort( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -165,8 +171,8 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, * false = Invalid length * */ -static inline bool -sctp_chunk_length_valid(struct sctp_chunk *chunk, __u16 required_length) +static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk, + __u16 required_length) { __u16 chunk_length = ntohs(chunk->chunk_hdr->length); @@ -214,12 +220,11 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk, __u16 required_length) * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_4_C(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_4_C(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_ulpevent *ev; @@ -300,12 +305,12 @@ sctp_disposition_t sctp_sf_do_4_C(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_5_1B_init(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg, *repl, *err_chunk; struct sctp_unrecognized_param *unk_param; @@ -494,15 +499,15 @@ nomem: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = arg; struct sctp_init_chunk *initchunk; + struct sctp_chunk *chunk = arg; struct sctp_chunk *err_chunk; struct sctp_packet *packet; @@ -644,20 +649,21 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = arg; + struct sctp_ulpevent *ev, *ai_ev = NULL; struct sctp_association *new_asoc; struct sctp_init_chunk *peer_init; - struct sctp_chunk *repl; - struct sctp_ulpevent *ev, *ai_ev = NULL; - int error = 0; + struct sctp_chunk *chunk = arg; struct sctp_chunk *err_chk_p; + struct sctp_chunk *repl; struct sock *sk; + int error = 0; /* If the packet is an OOTB packet which is temporarily on the * control endpoint, respond with an ABORT. @@ -871,11 +877,12 @@ nomem: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_5_1E_ca(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_ulpevent *ev; @@ -949,11 +956,12 @@ nomem: } /* Generate and sendout a heartbeat packet. */ -static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_heartbeat( + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = (struct sctp_transport *) arg; struct sctp_chunk *reply; @@ -974,12 +982,12 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, } /* Generate a HEARTBEAT packet on the given transport. */ -sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_sendbeat_8_3(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = (struct sctp_transport *) arg; @@ -1022,11 +1030,12 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, } /* resend asoc strreset_chunk. */ -sctp_disposition_t sctp_sf_send_reconf(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_send_reconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = arg; @@ -1073,12 +1082,11 @@ sctp_disposition_t sctp_sf_send_reconf(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_beat_8_3(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_beat_8_3(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { struct sctp_paramhdr *param_hdr; struct sctp_chunk *chunk = arg; @@ -1148,12 +1156,12 @@ nomem: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_backbeat_8_3(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_sender_hb_info *hbinfo; struct sctp_chunk *chunk = arg; @@ -1227,13 +1235,13 @@ static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa, struct sctp_chunk *init, struct sctp_cmd_seq *commands) { - int len; - struct sctp_packet *pkt; + struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family); union sctp_addr_param *addrparm; struct sctp_errhdr *errhdr; - struct sctp_endpoint *ep; char buffer[sizeof(*errhdr) + sizeof(*addrparm)]; - struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family); + struct sctp_endpoint *ep; + struct sctp_packet *pkt; + int len; /* Build the error on the stack. We are way to malloc crazy * throughout the code today. @@ -1410,18 +1418,19 @@ static char sctp_tietags_compare(struct sctp_association *new_asoc, /* Common helper routine for both duplicate and simulataneous INIT * chunk handling. */ -static sctp_disposition_t sctp_sf_do_unexpected_init( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_do_unexpected_init( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg, *repl, *err_chunk; struct sctp_unrecognized_param *unk_param; struct sctp_association *new_asoc; + enum sctp_disposition retval; struct sctp_packet *packet; - sctp_disposition_t retval; int len; /* 6.10 Bundling @@ -1622,12 +1631,13 @@ cleanup: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_5_2_1_siminit( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* Call helper to do the real work for both simulataneous and * duplicate INIT chunk handling. @@ -1676,7 +1686,8 @@ sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, +enum sctp_disposition sctp_sf_do_5_2_2_dupinit( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -1699,12 +1710,13 @@ sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, * An unexpected INIT ACK usually indicates the processing of an old or * duplicated INIT chunk. */ -sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_5_2_3_initack( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* Per the above section, we'll discard the chunk if we have an * endpoint. If this is an OOTB INIT-ACK, treat it as such. @@ -1720,7 +1732,8 @@ sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net, * Section 5.2.4 * A) In this case, the peer may have restarted. */ -static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, +static enum sctp_disposition sctp_sf_do_dupcook_a( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, @@ -1728,10 +1741,10 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, struct sctp_association *new_asoc) { struct sctp_init_chunk *peer_init; + enum sctp_disposition disposition; struct sctp_ulpevent *ev; struct sctp_chunk *repl; struct sctp_chunk *err; - sctp_disposition_t disposition; /* new_asoc is a brand-new association, so these are not yet * side effects--it is safe to run them here. @@ -1835,7 +1848,8 @@ nomem: * after responding to the local endpoint's INIT */ /* This case represents an initialization collision. */ -static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net, +static enum sctp_disposition sctp_sf_do_dupcook_b( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, @@ -1906,7 +1920,8 @@ nomem: * but a new tag of its own. */ /* This case represents an initialization collision. */ -static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net, +static enum sctp_disposition sctp_sf_do_dupcook_c( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, @@ -1928,7 +1943,8 @@ static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net, * enter the ESTABLISHED state, if it has not already done so. */ /* This case represents an initialization collision. */ -static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net, +static enum sctp_disposition sctp_sf_do_dupcook_d( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, struct sctp_chunk *chunk, @@ -2023,19 +2039,20 @@ nomem: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, +enum sctp_disposition sctp_sf_do_5_2_4_dupcook( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, struct sctp_cmd_seq *commands) { - sctp_disposition_t retval; - struct sctp_chunk *chunk = arg; struct sctp_association *new_asoc; + struct sctp_chunk *chunk = arg; + enum sctp_disposition retval; + struct sctp_chunk *err_chk_p; int error = 0; char action; - struct sctp_chunk *err_chk_p; /* Make sure that the chunk has a valid length from the protocol * perspective. In this case check to make sure we have at least @@ -2141,13 +2158,13 @@ nomem: * * See sctp_sf_do_9_1_abort(). */ -sctp_disposition_t sctp_sf_shutdown_pending_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_shutdown_pending_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -2184,7 +2201,8 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort( * * See sctp_sf_do_9_1_abort(). */ -sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net, +enum sctp_disposition sctp_sf_shutdown_sent_abort( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -2234,13 +2252,13 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net, * * See sctp_sf_do_9_1_abort(). */ -sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_shutdown_ack_sent_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* The same T2 timer, so we should be able to use * common function with the SHUTDOWN-SENT state. @@ -2262,7 +2280,8 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, +enum sctp_disposition sctp_sf_cookie_echoed_err( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -2326,12 +2345,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, * * The return value is the disposition of the chunk. */ -static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_do_5_2_6_stale( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { int attempts = asoc->init_err_counter + 1; struct sctp_chunk *chunk = arg, *reply; @@ -2448,7 +2468,8 @@ nomem: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, +enum sctp_disposition sctp_sf_do_9_1_abort( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -2485,16 +2506,17 @@ sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); } -static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, +static enum sctp_disposition __sctp_sf_do_9_1_abort( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, void *arg, struct sctp_cmd_seq *commands) { + __be16 error = SCTP_ERROR_NO_ERROR; struct sctp_chunk *chunk = arg; unsigned int len; - __be16 error = SCTP_ERROR_NO_ERROR; /* See if we have an error cause code in the chunk. */ len = ntohs(chunk->chunk_hdr->length); @@ -2523,16 +2545,17 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, * * See sctp_sf_do_9_1_abort() above. */ -sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_cookie_wait_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { + __be16 error = SCTP_ERROR_NO_ERROR; struct sctp_chunk *chunk = arg; unsigned int len; - __be16 error = SCTP_ERROR_NO_ERROR; if (!sctp_vtag_verify_either(chunk, asoc)) return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); @@ -2562,7 +2585,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, /* * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state) */ -sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net, +enum sctp_disposition sctp_sf_cookie_wait_icmp_abort( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -2577,12 +2601,13 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net, /* * Process an ABORT. (COOKIE-ECHOED state) */ -sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_cookie_echoed_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. @@ -2595,11 +2620,12 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net, * * This is common code called by several sctp_sf_*_abort() functions above. */ -static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, - struct sctp_cmd_seq *commands, - __be16 error, int sk_err, - const struct sctp_association *asoc, - struct sctp_transport *transport) +static enum sctp_disposition sctp_stop_t1_and_abort( + struct net *net, + struct sctp_cmd_seq *commands, + __be16 error, int sk_err, + const struct sctp_association *asoc, + struct sctp_transport *transport) { pr_debug("%s: ABORT received (INIT)\n", __func__); @@ -2649,15 +2675,16 @@ static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) -{ +enum sctp_disposition sctp_sf_do_9_2_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) +{ + enum sctp_disposition disposition; struct sctp_chunk *chunk = arg; - sctp_disposition_t disposition; struct sctp_shutdownhdr *sdh; struct sctp_ulpevent *ev; __u32 ctsn; @@ -2738,12 +2765,13 @@ out: * The Cumulative TSN Ack of the received SHUTDOWN chunk * MUST be processed. */ -sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_9_2_shut_ctsn( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_shutdownhdr *sdh; @@ -2791,14 +2819,15 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, * that belong to this association, it should discard the INIT chunk and * retransmit the SHUTDOWN ACK chunk. */ -sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_9_2_reshutack( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = (struct sctp_chunk *) arg; + struct sctp_chunk *chunk = arg; struct sctp_chunk *reply; /* Make sure that the chunk has a valid length */ @@ -2855,12 +2884,12 @@ nomem: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_ecn_cwr(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_cwrhdr *cwr; @@ -2911,12 +2940,11 @@ sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_ecne(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_ecne(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_ecnehdr *ecne; @@ -2968,12 +2996,12 @@ sctp_disposition_t sctp_sf_do_ecne(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_eat_data_6_2(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { union sctp_arg force = SCTP_NOFORCE(); struct sctp_chunk *chunk = arg; @@ -3088,12 +3116,13 @@ discard_noforce: * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_eat_data_fast_4_4( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; int error; @@ -3179,12 +3208,12 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_eat_sack_6_2(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_sackhdr *sackh; @@ -3253,7 +3282,8 @@ sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, * * The return value is the disposition of the chunk. */ -static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, +static enum sctp_disposition sctp_sf_tabort_8_4_8( + struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, const union sctp_subtype type, @@ -3303,12 +3333,12 @@ static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_operr_notify(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_operr_notify(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_errhdr *err; @@ -3341,12 +3371,12 @@ sctp_disposition_t sctp_sf_operr_notify(struct net *net, * * The return value is the disposition. */ -sctp_disposition_t sctp_sf_do_9_2_final(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_9_2_final(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_chunk *reply; @@ -3424,20 +3454,19 @@ nomem: * receiver of the OOTB packet shall discard the OOTB packet and take * no further action. */ -sctp_disposition_t sctp_sf_ootb(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_ootb(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sk_buff *skb = chunk->skb; struct sctp_chunkhdr *ch; struct sctp_errhdr *err; - __u8 *ch_end; - int ootb_shut_ack = 0; int ootb_cookie_ack = 0; + int ootb_shut_ack = 0; + __u8 *ch_end; SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); @@ -3513,16 +3542,17 @@ sctp_disposition_t sctp_sf_ootb(struct net *net, * (endpoint, asoc, type, arg, commands) * * Outputs - * (sctp_disposition_t) + * (enum sctp_disposition) * * The return value is the disposition of the chunk. */ -static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_shut_8_4_5( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; @@ -3579,12 +3609,12 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, * chunks. --piggy ] * */ -sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_8_5_1_E_sa(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -3604,17 +3634,18 @@ sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net, } /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */ -sctp_disposition_t sctp_sf_do_asconf(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_asconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = arg; - struct sctp_chunk *asconf_ack = NULL; - struct sctp_paramhdr *err_param = NULL; - struct sctp_addiphdr *hdr; - __u32 serial; + struct sctp_paramhdr *err_param = NULL; + struct sctp_chunk *asconf_ack = NULL; + struct sctp_chunk *chunk = arg; + struct sctp_addiphdr *hdr; + __u32 serial; if (!sctp_vtag_verify(chunk, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, @@ -3721,19 +3752,19 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, * When building TLV parameters for the ASCONF Chunk that will add or * delete IP addresses the D0 to D13 rules should be applied: */ -sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *asconf_ack = arg; - struct sctp_chunk *last_asconf = asoc->addip_last_asconf; - struct sctp_chunk *abort; - struct sctp_paramhdr *err_param = NULL; - struct sctp_addiphdr *addip_hdr; - __u32 sent_serial, rcvd_serial; + struct sctp_chunk *last_asconf = asoc->addip_last_asconf; + struct sctp_paramhdr *err_param = NULL; + struct sctp_chunk *asconf_ack = arg; + struct sctp_addiphdr *addip_hdr; + __u32 sent_serial, rcvd_serial; + struct sctp_chunk *abort; if (!sctp_vtag_verify(asconf_ack, asoc)) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, @@ -3840,11 +3871,12 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, } /* RE-CONFIG Section 5.2 Upon reception of an RECONF Chunk. */ -sctp_disposition_t sctp_sf_do_reconf(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_reconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_paramhdr *err_param = NULL; struct sctp_chunk *chunk = arg; @@ -3916,15 +3948,15 @@ sctp_disposition_t sctp_sf_do_reconf(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_eat_fwd_tsn(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; + struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_skip *skip; __u16 len; __u32 tsn; @@ -3986,16 +4018,16 @@ discard_noforce: return SCTP_DISPOSITION_DISCARD; } -sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_eat_fwd_tsn_fast( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_hdr *fwdtsn_hdr; + struct sctp_chunk *chunk = arg; struct sctp_fwdtsn_skip *skip; __u16 len; __u32 tsn; @@ -4079,18 +4111,17 @@ gen_shutdown: * The return value is the disposition of the chunk. */ static enum sctp_ierror sctp_sf_authenticate( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - struct sctp_chunk *chunk) + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + struct sctp_chunk *chunk) { struct sctp_authhdr *auth_hdr; + __u8 *save_digest, *digest; struct sctp_hmac *hmac; unsigned int sig_len; __u16 key_id; - __u8 *save_digest; - __u8 *digest; /* Pull in the auth header, so we can do some more verification */ auth_hdr = (struct sctp_authhdr *)chunk->skb->data; @@ -4154,12 +4185,11 @@ nomem: return SCTP_IERROR_NOMEM; } -sctp_disposition_t sctp_sf_eat_auth(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_eat_auth(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; struct sctp_authhdr *auth_hdr; @@ -4251,12 +4281,12 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_unk_chunk(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_unk_chunk(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *unk_chunk = arg; struct sctp_chunk *err_chunk; @@ -4331,12 +4361,12 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_discard_chunk(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_discard_chunk(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -4371,12 +4401,11 @@ sctp_disposition_t sctp_sf_discard_chunk(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_pdiscard(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_pdiscard(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS); sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); @@ -4399,12 +4428,12 @@ sctp_disposition_t sctp_sf_pdiscard(struct net *net, * We simply tag the chunk as a violation. The state machine will log * the violation and continue. */ -sctp_disposition_t sctp_sf_violation(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_violation(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -4419,14 +4448,14 @@ sctp_disposition_t sctp_sf_violation(struct net *net, /* * Common function to handle a protocol violation. */ -static sctp_disposition_t sctp_sf_abort_violation( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - void *arg, - struct sctp_cmd_seq *commands, - const __u8 *payload, - const size_t paylen) +static enum sctp_disposition sctp_sf_abort_violation( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + void *arg, + struct sctp_cmd_seq *commands, + const __u8 *payload, + const size_t paylen) { struct sctp_packet *packet = NULL; struct sctp_chunk *chunk = arg; @@ -4536,18 +4565,18 @@ nomem: * * Generate an ABORT chunk and terminate the association. */ -static sctp_disposition_t sctp_sf_violation_chunklen( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_violation_chunklen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { static const char err_str[] = "The following chunk had invalid length:"; return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, - sizeof(err_str)); + sizeof(err_str)); } /* @@ -4556,17 +4585,17 @@ static sctp_disposition_t sctp_sf_violation_chunklen( * or accumulated length in multi parameters exceeds the end of the chunk, * the length is considered as invalid. */ -static sctp_disposition_t sctp_sf_violation_paramlen( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, void *ext, - struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_violation_paramlen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, void *ext, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = arg; struct sctp_paramhdr *param = ext; struct sctp_chunk *abort = NULL; + struct sctp_chunk *chunk = arg; if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) goto discard; @@ -4599,18 +4628,18 @@ nomem: * We inform the other end by sending an ABORT with a Protocol Violation * error code. */ -static sctp_disposition_t sctp_sf_violation_ctsn( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_violation_ctsn( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:"; return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, - sizeof(err_str)); + sizeof(err_str)); } /* Handle protocol violation of an invalid chunk bundling. For example, @@ -4619,13 +4648,13 @@ static sctp_disposition_t sctp_sf_violation_ctsn( * statement from the specs. Additionally, there might be an attacker * on the path and we may not want to continue this communication. */ -static sctp_disposition_t sctp_sf_violation_chunk( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +static enum sctp_disposition sctp_sf_violation_chunk( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { static const char err_str[] = "The following chunk violates protocol:"; @@ -4633,7 +4662,7 @@ static sctp_disposition_t sctp_sf_violation_chunk( return sctp_sf_violation(net, ep, asoc, type, arg, commands); return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, - sizeof(err_str)); + sizeof(err_str)); } /*************************************************************************** * These are the state functions for handling primitive (Section 10) events. @@ -4695,15 +4724,15 @@ static sctp_disposition_t sctp_sf_violation_chunk( * * The return value is a disposition. */ -sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_prm_asoc(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *repl; struct sctp_association *my_asoc; + struct sctp_chunk *repl; /* The comment below says that we enter COOKIE-WAIT AFTER * sending the INIT, but that doesn't actually work in our @@ -4807,12 +4836,12 @@ nomem: * * The return value is the disposition. */ -sctp_disposition_t sctp_sf_do_prm_send(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_prm_send(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_datamsg *msg = arg; @@ -4846,15 +4875,15 @@ sctp_disposition_t sctp_sf_do_prm_send(struct net *net, * * The return value is the disposition. */ -sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_9_2_prm_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - int disposition; + enum sctp_disposition disposition; /* From 9.2 Shutdown of an Association * Upon receipt of the SHUTDOWN primitive from its upper @@ -4872,6 +4901,7 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, arg, commands); } + return disposition; } @@ -4902,13 +4932,13 @@ sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( * * The return value is the disposition. */ -sctp_disposition_t sctp_sf_do_9_1_prm_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_9_1_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* From 9.1 Abort of an Association * Upon receipt of the ABORT primitive from its upper @@ -4940,12 +4970,12 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( } /* We tried an illegal operation on an association which is closed. */ -sctp_disposition_t sctp_sf_error_closed(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_error_closed(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL)); return SCTP_DISPOSITION_CONSUME; @@ -4954,12 +4984,13 @@ sctp_disposition_t sctp_sf_error_closed(struct net *net, /* We tried an illegal operation on an association which is shutting * down. */ -sctp_disposition_t sctp_sf_error_shutdown(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_error_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-ESHUTDOWN)); @@ -4980,13 +5011,13 @@ sctp_disposition_t sctp_sf_error_shutdown(struct net *net, * Outputs * (timers) */ -sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_cookie_wait_prm_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); @@ -5015,12 +5046,13 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( * Outputs * (timers) */ -sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_cookie_echoed_prm_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. @@ -5042,13 +5074,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( * Outputs * (timers) */ -sctp_disposition_t sctp_sf_cookie_wait_prm_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_cookie_wait_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *abort = arg; @@ -5091,13 +5123,13 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( * Outputs * (timers) */ -sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_cookie_echoed_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* There is a single T1 timer, so we should be able to use * common function with the COOKIE-WAIT state. @@ -5117,13 +5149,13 @@ sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( * Outputs * (timers) */ -sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_shutdown_pending_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* Stop the T5-shutdown guard timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, @@ -5144,13 +5176,13 @@ sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( * Outputs * (timers) */ -sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_shutdown_sent_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* Stop the T2-shutdown timer. */ sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, @@ -5175,13 +5207,13 @@ sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( * Outputs * (timers) */ -sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_shutdown_ack_sent_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { /* The same T2 timer, so we should be able to use * common function with the SHUTDOWN-SENT state. @@ -5211,7 +5243,7 @@ sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( * o destination transport address - the transport address of the * association on which a heartbeat should be issued. */ -sctp_disposition_t sctp_sf_do_prm_requestheartbeat( +enum sctp_disposition sctp_sf_do_prm_requestheartbeat( struct net *net, const struct sctp_endpoint *ep, const struct sctp_association *asoc, @@ -5244,12 +5276,12 @@ sctp_disposition_t sctp_sf_do_prm_requestheartbeat( * When an endpoint has an ASCONF signaled change to be sent to the * remote endpoint it should do A1 to A9 */ -sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_prm_asconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -5261,12 +5293,12 @@ sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, } /* RE-CONFIG Section 5.1 RECONF Chunk Procedures */ -sctp_disposition_t sctp_sf_do_prm_reconf(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_prm_reconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = arg; @@ -5279,13 +5311,13 @@ sctp_disposition_t sctp_sf_do_prm_reconf(struct net *net, * * The return value is the disposition of the primitive. */ -sctp_disposition_t sctp_sf_ignore_primitive( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_ignore_primitive( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { pr_debug("%s: primitive type:%d is ignored\n", __func__, type.primitive); @@ -5303,13 +5335,13 @@ sctp_disposition_t sctp_sf_ignore_primitive( * subscribes to this event, if there is no data to be sent or * retransmit, the stack will immediately send up this notification. */ -sctp_disposition_t sctp_sf_do_no_pending_tsn( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_no_pending_tsn( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_ulpevent *event; @@ -5335,13 +5367,13 @@ sctp_disposition_t sctp_sf_do_no_pending_tsn( * * The return value is the disposition. */ -sctp_disposition_t sctp_sf_do_9_2_start_shutdown( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_9_2_start_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *reply; @@ -5405,15 +5437,15 @@ nomem: * * The return value is the disposition. */ -sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_9_2_shutdown_ack( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *chunk = (struct sctp_chunk *) arg; + struct sctp_chunk *chunk = arg; struct sctp_chunk *reply; /* There are 2 ways of getting here: @@ -5479,12 +5511,12 @@ nomem: * * The return value is the disposition of the event. */ -sctp_disposition_t sctp_sf_ignore_other(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_ignore_other(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { pr_debug("%s: the event other type:%d is ignored\n", __func__, type.other); @@ -5507,12 +5539,12 @@ sctp_disposition_t sctp_sf_ignore_other(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_6_3_3_rtx(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_transport *transport = arg; @@ -5595,12 +5627,12 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, * allow. However, an SCTP transmitter MUST NOT be more aggressive than * the following algorithms allow. */ -sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_do_6_2_sack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS); sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); @@ -5626,16 +5658,17 @@ sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net, * (timers, events) * */ -sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_t1_init_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { + int attempts = asoc->init_err_counter + 1; struct sctp_chunk *repl = NULL; struct sctp_bind_addr *bp; - int attempts = asoc->init_err_counter + 1; pr_debug("%s: timer T1 expired (INIT)\n", __func__); @@ -5690,15 +5723,16 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, * (timers, events) * */ -sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_t1_cookie_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - struct sctp_chunk *repl = NULL; int attempts = asoc->init_err_counter + 1; + struct sctp_chunk *repl = NULL; pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__); @@ -5740,12 +5774,13 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, * the T2-Shutdown timer, giving its peer ample opportunity to transmit * all of its queued DATA chunks that have not yet been sent. */ -sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_t2_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *reply = NULL; @@ -5810,13 +5845,13 @@ nomem: * ADDIP Section 4.1 ASCONF CHunk Procedures * If the T4 RTO timer expires the endpoint should do B1 to B5 */ -sctp_disposition_t sctp_sf_t4_timer_expire( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_t4_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *chunk = asoc->addip_last_asconf; struct sctp_transport *transport = chunk->transport; @@ -5882,12 +5917,13 @@ sctp_disposition_t sctp_sf_t4_timer_expire( * At the expiration of this timer the sender SHOULD abort the association * by sending an ABORT chunk. */ -sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_t5_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { struct sctp_chunk *reply = NULL; @@ -5918,15 +5954,15 @@ nomem: * The work that needs to be done is same as when SHUTDOWN is initiated by * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown(). */ -sctp_disposition_t sctp_sf_autoclose_timer_expire( - struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_autoclose_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { - int disposition; + enum sctp_disposition disposition; SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS); @@ -5946,6 +5982,7 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire( disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, arg, commands); } + return disposition; } @@ -5961,12 +5998,11 @@ sctp_disposition_t sctp_sf_autoclose_timer_expire( * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_not_impl(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_not_impl(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { return SCTP_DISPOSITION_NOT_IMPL; } @@ -5979,12 +6015,11 @@ sctp_disposition_t sctp_sf_not_impl(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_bug(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_bug(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, struct sctp_cmd_seq *commands) { return SCTP_DISPOSITION_BUG; } @@ -6000,12 +6035,12 @@ sctp_disposition_t sctp_sf_bug(struct net *net, * * The return value is the disposition of the chunk. */ -sctp_disposition_t sctp_sf_timer_ignore(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - const union sctp_subtype type, - void *arg, - struct sctp_cmd_seq *commands) +enum sctp_disposition sctp_sf_timer_ignore(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const union sctp_subtype type, + void *arg, + struct sctp_cmd_seq *commands) { pr_debug("%s: timer %d ignored\n", __func__, type.chunk); @@ -6020,9 +6055,9 @@ sctp_disposition_t sctp_sf_timer_ignore(struct net *net, static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) { struct sctp_sackhdr *sack; + __u16 num_dup_tsns; unsigned int len; __u16 num_blocks; - __u16 num_dup_tsns; /* Protect ourselves from reading too far into * the skb from a bogus sender. @@ -6044,12 +6079,12 @@ static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) /* Create an ABORT packet to be sent as a response, with the specified * error causes. */ -static struct sctp_packet *sctp_abort_pkt_new(struct net *net, - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - struct sctp_chunk *chunk, - const void *payload, - size_t paylen) +static struct sctp_packet *sctp_abort_pkt_new( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + const void *payload, size_t paylen) { struct sctp_packet *packet; struct sctp_chunk *abort; @@ -6086,14 +6121,14 @@ static struct sctp_packet *sctp_abort_pkt_new(struct net *net, } /* Allocate a packet for responding in the OOTB conditions. */ -static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, - const struct sctp_association *asoc, - const struct sctp_chunk *chunk) +static struct sctp_packet *sctp_ootb_pkt_new( + struct net *net, + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) { - struct sctp_packet *packet; struct sctp_transport *transport; - __u16 sport; - __u16 dport; + struct sctp_packet *packet; + __u16 sport, dport; __u32 vtag; /* Get the source and destination port from the inbound packet. */ @@ -6202,18 +6237,17 @@ static int sctp_eat_data(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_cmd_seq *commands) { - struct sctp_datahdr *data_hdr; - struct sctp_chunk *err; - size_t datalen; - enum sctp_verb deliver; - int tmp; - __u32 tsn; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; struct sock *sk = asoc->base.sk; struct net *net = sock_net(sk); - u16 ssn; - u16 sid; + struct sctp_datahdr *data_hdr; + struct sctp_chunk *err; + enum sctp_verb deliver; + size_t datalen; u8 ordered = 0; + u16 ssn, sid; + __u32 tsn; + int tmp; data_hdr = (struct sctp_datahdr *)chunk->skb->data; chunk->subh.data_hdr = data_hdr; -- cgit v1.2.3-55-g7522 From 327c0dab8d1301cd866816de8c7f32eb872cabac Mon Sep 17 00:00:00 2001 From: Xin Long Date: Fri, 11 Aug 2017 10:23:58 +0800 Subject: sctp: fix some indents in sm_make_chunk.c There are some bad indents of functions' defination in sm_make_chunk.c. They have been there since beginning, it was probably caused by that the typedef sctp_chunk_t was replaced with struct sctp_chunk. So it's the best time to fix them in this patchset, it's also to fix some bad indents in other functions' defination in sm_make_chunk.c. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/net/sctp/sm.h | 158 ++++++++++----------- net/sctp/sm_make_chunk.c | 348 +++++++++++++++++++++++------------------------ 2 files changed, 249 insertions(+), 257 deletions(-) diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 33077f317995..2db3d3a9ce1d 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h @@ -184,68 +184,69 @@ __u32 sctp_generate_verification_tag(void); void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag); /* Prototypes for chunk-building functions. */ -struct sctp_chunk *sctp_make_init(const struct sctp_association *, - const struct sctp_bind_addr *, - gfp_t gfp, int vparam_len); -struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *, - const struct sctp_chunk *, - const gfp_t gfp, - const int unkparam_len); -struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *, - const struct sctp_chunk *); -struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *, - const struct sctp_chunk *); -struct sctp_chunk *sctp_make_cwr(const struct sctp_association *, +struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, + const struct sctp_bind_addr *bp, + gfp_t gfp, int vparam_len); +struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const gfp_t gfp, const int unkparam_len); +struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, + const struct sctp_chunk *chunk); +struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, + const struct sctp_chunk *chunk); +struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, const __u32 lowest_tsn, - const struct sctp_chunk *); -struct sctp_chunk * sctp_make_datafrag_empty(struct sctp_association *, - const struct sctp_sndrcvinfo *sinfo, - int len, const __u8 flags, - __u16 ssn, gfp_t gfp); -struct sctp_chunk *sctp_make_ecne(const struct sctp_association *, - const __u32); -struct sctp_chunk *sctp_make_sack(const struct sctp_association *); + const struct sctp_chunk *chunk); +struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, + const struct sctp_sndrcvinfo *sinfo, + int len, const __u8 flags, + __u16 ssn, gfp_t gfp); +struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, + const __u32 lowest_tsn); +struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc); struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, const struct sctp_chunk *chunk); struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, - const struct sctp_chunk *); -struct sctp_chunk *sctp_make_shutdown_complete(const struct sctp_association *, - const struct sctp_chunk *); -void sctp_init_cause(struct sctp_chunk *, __be16 cause, size_t); -struct sctp_chunk *sctp_make_abort(const struct sctp_association *, - const struct sctp_chunk *, - const size_t hint); -struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *, - const struct sctp_chunk *, - __u32 tsn); -struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *, - struct msghdr *, size_t msg_len); -struct sctp_chunk *sctp_make_abort_violation(const struct sctp_association *, - const struct sctp_chunk *, - const __u8 *, - const size_t ); -struct sctp_chunk *sctp_make_violation_paramlen(const struct sctp_association *, - const struct sctp_chunk *, - struct sctp_paramhdr *); -struct sctp_chunk *sctp_make_violation_max_retrans(const struct sctp_association *, - const struct sctp_chunk *); -struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *, - const struct sctp_transport *); -struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *, - const struct sctp_chunk *, - const void *payload, - const size_t paylen); -struct sctp_chunk *sctp_make_op_error(const struct sctp_association *, - const struct sctp_chunk *chunk, - __be16 cause_code, - const void *payload, - size_t paylen, - size_t reserve_tail); - -struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *, - union sctp_addr *, - struct sockaddr *, - int, __be16); + const struct sctp_chunk *chunk); +struct sctp_chunk *sctp_make_shutdown_complete( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk); +void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause, size_t paylen); +struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const size_t hint); +struct sctp_chunk *sctp_make_abort_no_data(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + __u32 tsn); +struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, + struct msghdr *msg, size_t msg_len); +struct sctp_chunk *sctp_make_abort_violation( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const __u8 *payload, + const size_t paylen); +struct sctp_chunk *sctp_make_violation_paramlen( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + struct sctp_paramhdr *param); +struct sctp_chunk *sctp_make_violation_max_retrans( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk); +struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, + const struct sctp_transport *transport); +struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const void *payload, + const size_t paylen); +struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + __be16 cause_code, const void *payload, + size_t paylen, size_t reserve_tail); + +struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, + union sctp_addr *laddr, + struct sockaddr *addrs, + int addrcnt, __be16 flags); struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr); bool sctp_verify_asconf(const struct sctp_association *asoc, @@ -259,27 +260,25 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, __u32 new_cum_tsn, size_t nstreams, struct sctp_fwdtsn_skip *skiplist); struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); -struct sctp_chunk *sctp_make_strreset_req( - const struct sctp_association *asoc, - __u16 stream_num, __u16 *stream_list, - bool out, bool in); +struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc, + __u16 stream_num, __u16 *stream_list, + bool out, bool in); struct sctp_chunk *sctp_make_strreset_tsnreq( - const struct sctp_association *asoc); + const struct sctp_association *asoc); struct sctp_chunk *sctp_make_strreset_addstrm( - const struct sctp_association *asoc, - __u16 out, __u16 in); -struct sctp_chunk *sctp_make_strreset_resp( - const struct sctp_association *asoc, - __u32 result, __u32 sn); -struct sctp_chunk *sctp_make_strreset_tsnresp( - struct sctp_association *asoc, - __u32 result, __u32 sn, - __u32 sender_tsn, __u32 receiver_tsn); + const struct sctp_association *asoc, + __u16 out, __u16 in); +struct sctp_chunk *sctp_make_strreset_resp(const struct sctp_association *asoc, + __u32 result, __u32 sn); +struct sctp_chunk *sctp_make_strreset_tsnresp(struct sctp_association *asoc, + __u32 result, __u32 sn, + __u32 sender_tsn, + __u32 receiver_tsn); bool sctp_verify_reconf(const struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_paramhdr **errp); -void sctp_chunk_assign_tsn(struct sctp_chunk *); -void sctp_chunk_assign_ssn(struct sctp_chunk *); +void sctp_chunk_assign_tsn(struct sctp_chunk *chunk); +void sctp_chunk_assign_ssn(struct sctp_chunk *chunk); /* Prototypes for stream-processing functions. */ struct sctp_chunk *sctp_process_strreset_outreq( @@ -322,11 +321,12 @@ void sctp_generate_proto_unreach_event(unsigned long peer); void sctp_ootb_pkt_free(struct sctp_packet *packet); -struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - struct sctp_chunk *chunk, - gfp_t gfp, int *err, - struct sctp_chunk **err_chk_p); +struct sctp_association *sctp_unpack_cookie( + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + gfp_t gfp, int *err, + struct sctp_chunk **err_chk_p); /* 3rd level prototypes */ __u32 sctp_generate_tag(const struct sctp_endpoint *ep); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 51de638a88b2..ca8f196b6c6c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -132,8 +132,8 @@ static const struct sctp_paramhdr prsctp_param = { * provided chunk, as most cause codes will be embedded inside an * abort chunk. */ -void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, - size_t paylen) +void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, + size_t paylen) { struct sctp_errhdr err; __u16 len; @@ -151,7 +151,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, * if there isn't enough space in the op error chunk */ static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, - size_t paylen) + size_t paylen) { struct sctp_errhdr err; __u16 len; @@ -213,32 +213,31 @@ static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, * Supported Address Types (Note 4) Optional 12 */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, - const struct sctp_bind_addr *bp, - gfp_t gfp, int vparam_len) + const struct sctp_bind_addr *bp, + gfp_t gfp, int vparam_len) { struct net *net = sock_net(asoc->base.sk); + struct sctp_supported_ext_param ext_param; + struct sctp_adaptation_ind_param aiparam; + struct sctp_paramhdr *auth_chunks = NULL; + struct sctp_paramhdr *auth_hmacs = NULL; + struct sctp_supported_addrs_param sat; struct sctp_endpoint *ep = asoc->ep; - struct sctp_inithdr init; - union sctp_params addrs; - size_t chunksize; struct sctp_chunk *retval = NULL; int num_types, addrs_len = 0; + struct sctp_inithdr init; + union sctp_params addrs; struct sctp_sock *sp; - struct sctp_supported_addrs_param sat; + __u8 extensions[4]; + size_t chunksize; __be16 types[2]; - struct sctp_adaptation_ind_param aiparam; - struct sctp_supported_ext_param ext_param; int num_ext = 0; - __u8 extensions[4]; - struct sctp_paramhdr *auth_chunks = NULL, - *auth_hmacs = NULL; /* RFC 2960 3.3.2 Initiation (INIT) (1) * * Note 1: The INIT chunks can contain multiple addresses that * can be IPv4 and/or IPv6 in any combination. */ - retval = NULL; /* Convert the provided bind address list to raw format. */ addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); @@ -380,26 +379,24 @@ nodata: } struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - gfp_t gfp, int unkparam_len) + const struct sctp_chunk *chunk, + gfp_t gfp, int unkparam_len) { + struct sctp_supported_ext_param ext_param; + struct sctp_adaptation_ind_param aiparam; + struct sctp_paramhdr *auth_chunks = NULL; + struct sctp_paramhdr *auth_random = NULL; + struct sctp_paramhdr *auth_hmacs = NULL; + struct sctp_chunk *retval = NULL; + struct sctp_cookie_param *cookie; struct sctp_inithdr initack; - struct sctp_chunk *retval; union sctp_params addrs; struct sctp_sock *sp; - int addrs_len; - struct sctp_cookie_param *cookie; - int cookie_len; + __u8 extensions[4]; size_t chunksize; - struct sctp_adaptation_ind_param aiparam; - struct sctp_supported_ext_param ext_param; int num_ext = 0; - __u8 extensions[4]; - struct sctp_paramhdr *auth_chunks = NULL, - *auth_hmacs = NULL, - *auth_random = NULL; - - retval = NULL; + int cookie_len; + int addrs_len; /* Note: there may be no addresses to embed. */ addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); @@ -562,11 +559,11 @@ nomem_cookie: * to insure interoperability. */ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, - const struct sctp_chunk *chunk) + const struct sctp_chunk *chunk) { struct sctp_chunk *retval; - void *cookie; int cookie_len; + void *cookie; cookie = asoc->peer.cookie; cookie_len = asoc->peer.cookie_len; @@ -614,7 +611,7 @@ nodata: * Set to zero on transmit and ignored on receipt. */ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, - const struct sctp_chunk *chunk) + const struct sctp_chunk *chunk) { struct sctp_chunk *retval; @@ -659,8 +656,8 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, * Note: The CWR is considered a Control chunk. */ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, - const __u32 lowest_tsn, - const struct sctp_chunk *chunk) + const __u32 lowest_tsn, + const struct sctp_chunk *chunk) { struct sctp_chunk *retval; struct sctp_cwrhdr cwr; @@ -694,7 +691,7 @@ nodata: /* Make an ECNE chunk. This is a congestion experienced report. */ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, - const __u32 lowest_tsn) + const __u32 lowest_tsn) { struct sctp_chunk *retval; struct sctp_ecnehdr ecne; @@ -715,9 +712,9 @@ nodata: * parameters. However, do not populate the data payload. */ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, - const struct sctp_sndrcvinfo *sinfo, - int data_len, __u8 flags, __u16 ssn, - gfp_t gfp) + const struct sctp_sndrcvinfo *sinfo, + int data_len, __u8 flags, __u16 ssn, + gfp_t gfp) { struct sctp_chunk *retval; struct sctp_datahdr dp; @@ -755,15 +752,15 @@ nodata: */ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) { - struct sctp_chunk *retval; - struct sctp_sackhdr sack; - int len; - __u32 ctsn; - __u16 num_gabs, num_dup_tsns; - struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; + struct sctp_association *aptr = (struct sctp_association *)asoc; struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; + __u16 num_gabs, num_dup_tsns; struct sctp_transport *trans; + struct sctp_chunk *retval; + struct sctp_sackhdr sack; + __u32 ctsn; + int len; memset(gabs, 0, sizeof(gabs)); ctsn = sctp_tsnmap_get_ctsn(map); @@ -879,7 +876,7 @@ nodata: } struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, - const struct sctp_chunk *chunk) + const struct sctp_chunk *chunk) { struct sctp_chunk *retval; @@ -902,8 +899,8 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, } struct sctp_chunk *sctp_make_shutdown_complete( - const struct sctp_association *asoc, - const struct sctp_chunk *chunk) + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) { struct sctp_chunk *retval; __u8 flags = 0; @@ -936,8 +933,8 @@ struct sctp_chunk *sctp_make_shutdown_complete( * association, except when responding to an INIT (sctpimpguide 2.41). */ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - const size_t hint) + const struct sctp_chunk *chunk, + const size_t hint) { struct sctp_chunk *retval; __u8 flags = 0; @@ -973,8 +970,9 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, /* Helper to create ABORT with a NO_USER_DATA error. */ struct sctp_chunk *sctp_make_abort_no_data( - const struct sctp_association *asoc, - const struct sctp_chunk *chunk, __u32 tsn) + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + __u32 tsn) { struct sctp_chunk *retval; __be32 payload; @@ -1054,8 +1052,8 @@ err_chunk: static void *sctp_addto_param(struct sctp_chunk *chunk, int len, const void *data) { - void *target; int chunklen = ntohs(chunk->chunk_hdr->length); + void *target; target = skb_put(chunk->skb, len); @@ -1073,10 +1071,10 @@ static void *sctp_addto_param(struct sctp_chunk *chunk, int len, /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ struct sctp_chunk *sctp_make_abort_violation( - const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - const __u8 *payload, - const size_t paylen) + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const __u8 *payload, + const size_t paylen) { struct sctp_chunk *retval; struct sctp_paramhdr phdr; @@ -1099,14 +1097,14 @@ end: } struct sctp_chunk *sctp_make_violation_paramlen( - const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - struct sctp_paramhdr *param) + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + struct sctp_paramhdr *param) { - struct sctp_chunk *retval; static const char error[] = "The following parameter had invalid length:"; size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr) + sizeof(*param); + struct sctp_chunk *retval; retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) @@ -1122,12 +1120,12 @@ nodata: } struct sctp_chunk *sctp_make_violation_max_retrans( - const struct sctp_association *asoc, - const struct sctp_chunk *chunk) + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) { - struct sctp_chunk *retval; static const char error[] = "Association exceeded its max_retans count"; size_t payload_len = sizeof(error) + sizeof(struct sctp_errhdr); + struct sctp_chunk *retval; retval = sctp_make_abort(asoc, chunk, payload_len); if (!retval) @@ -1171,8 +1169,9 @@ nodata: } struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - const void *payload, const size_t paylen) + const struct sctp_chunk *chunk, + const void *payload, + const size_t paylen) { struct sctp_chunk *retval; @@ -1203,9 +1202,9 @@ nodata: * This routine can be used for containing multiple causes in the chunk. */ static struct sctp_chunk *sctp_make_op_error_space( - const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - size_t size) + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + size_t size) { struct sctp_chunk *retval; @@ -1237,8 +1236,8 @@ nodata: * to report all the errors, if the incoming chunk is large */ static inline struct sctp_chunk *sctp_make_op_error_fixed( - const struct sctp_association *asoc, - const struct sctp_chunk *chunk) + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) { size_t size = asoc ? asoc->pathmtu : 0; @@ -1250,9 +1249,9 @@ static inline struct sctp_chunk *sctp_make_op_error_fixed( /* Create an Operation Error chunk. */ struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, - const struct sctp_chunk *chunk, - __be16 cause_code, const void *payload, - size_t paylen, size_t reserve_tail) + const struct sctp_chunk *chunk, + __be16 cause_code, const void *payload, + size_t paylen, size_t reserve_tail) { struct sctp_chunk *retval; @@ -1271,9 +1270,9 @@ nodata: struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) { - struct sctp_chunk *retval; - struct sctp_hmac *hmac_desc; struct sctp_authhdr auth_hdr; + struct sctp_hmac *hmac_desc; + struct sctp_chunk *retval; __u8 *hmac; /* Get the first hmac that the peer told us to use */ @@ -1319,8 +1318,8 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) * */ struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, - const struct sctp_association *asoc, - struct sock *sk, gfp_t gfp) + const struct sctp_association *asoc, + struct sock *sk, gfp_t gfp) { struct sctp_chunk *retval; @@ -1372,11 +1371,11 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) * arguments, reserving enough space for a 'paylen' byte payload. */ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, - __u8 type, __u8 flags, int paylen, - gfp_t gfp) + __u8 type, __u8 flags, int paylen, + gfp_t gfp) { - struct sctp_chunk *retval; struct sctp_chunkhdr *chunk_hdr; + struct sctp_chunk *retval; struct sk_buff *skb; struct sock *sk; @@ -1470,9 +1469,9 @@ void sctp_chunk_put(struct sctp_chunk *ch) */ void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) { - void *target; int chunklen = ntohs(chunk->chunk_hdr->length); int padlen = SCTP_PAD4(chunklen) - chunklen; + void *target; skb_put_zero(chunk->skb, padlen); target = skb_put_data(chunk->skb, data, len); @@ -1525,11 +1524,10 @@ int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, */ void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) { - struct sctp_datamsg *msg; - struct sctp_chunk *lchunk; struct sctp_stream *stream; - __u16 ssn; - __u16 sid; + struct sctp_chunk *lchunk; + struct sctp_datamsg *msg; + __u16 ssn, sid; if (chunk->has_ssn) return; @@ -1574,8 +1572,8 @@ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, - struct sctp_chunk *chunk, - gfp_t gfp) + struct sctp_chunk *chunk, + gfp_t gfp) { struct sctp_association *asoc; enum sctp_scope scope; @@ -1602,8 +1600,8 @@ static struct sctp_cookie_param *sctp_pack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, const struct sctp_chunk *init_chunk, - int *cookie_len, - const __u8 *raw_addrs, int addrs_len) + int *cookie_len, const __u8 *raw_addrs, + int addrs_len) { struct sctp_signed_cookie *cookie; struct sctp_cookie_param *retval; @@ -1690,19 +1688,19 @@ nodata: /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ struct sctp_association *sctp_unpack_cookie( - const struct sctp_endpoint *ep, - const struct sctp_association *asoc, - struct sctp_chunk *chunk, gfp_t gfp, - int *error, struct sctp_chunk **errp) + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, gfp_t gfp, + int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; + int headersize, bodysize, fixed_size; struct sctp_signed_cookie *cookie; + struct sk_buff *skb = chunk->skb; struct sctp_cookie *bear_cookie; - int headersize, bodysize, fixed_size; __u8 *digest = ep->digest; - unsigned int len; enum sctp_scope scope; - struct sk_buff *skb = chunk->skb; + unsigned int len; ktime_t kt; /* Header size is static data prior to the actual cookie, including @@ -1974,8 +1972,8 @@ static int sctp_process_hn_param(const struct sctp_association *asoc, static int sctp_verify_ext_param(struct net *net, union sctp_params param) { __u16 num_ext = ntohs(param.p->length) - sizeof(struct sctp_paramhdr); - int have_auth = 0; int have_asconf = 0; + int have_auth = 0; int i; for (i = 0; i < num_ext; i++) { @@ -2005,10 +2003,10 @@ static int sctp_verify_ext_param(struct net *net, union sctp_params param) } static void sctp_process_ext_param(struct sctp_association *asoc, - union sctp_params param) + union sctp_params param) { - struct net *net = sock_net(asoc->base.sk); __u16 num_ext = ntohs(param.p->length) - sizeof(struct sctp_paramhdr); + struct net *net = sock_net(asoc->base.sk); int i; for (i = 0; i < num_ext; i++) { @@ -2309,13 +2307,13 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, struct sctp_init_chunk *peer_init, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); - union sctp_params param; struct sctp_transport *transport; struct list_head *pos, *temp; - struct sctp_af *af; + union sctp_params param; union sctp_addr addr; - char *cookie; + struct sctp_af *af; int src_match = 0; + char *cookie; /* We must include the address that the INIT packet came from. * This is the only address that matters for an INIT packet. @@ -2499,16 +2497,15 @@ static int sctp_process_param(struct sctp_association *asoc, gfp_t gfp) { struct net *net = sock_net(asoc->base.sk); - union sctp_addr addr; - int i; - __u16 sat; - int retval = 1; - enum sctp_scope scope; - u32 stale; - struct sctp_af *af; + struct sctp_endpoint *ep = asoc->ep; union sctp_addr_param *addr_param; struct sctp_transport *t; - struct sctp_endpoint *ep = asoc->ep; + enum sctp_scope scope; + union sctp_addr addr; + struct sctp_af *af; + int retval = 1, i; + u32 stale; + __u16 sat; /* We maintain all INIT parameters in network byte order all the * time. This allows us to not worry about whether the parameters @@ -2806,22 +2803,20 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, * */ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, - union sctp_addr *laddr, - struct sockaddr *addrs, - int addrcnt, - __be16 flags) + union sctp_addr *laddr, + struct sockaddr *addrs, + int addrcnt, __be16 flags) { + union sctp_addr_param addr_param; struct sctp_addip_param param; - struct sctp_chunk *retval; - union sctp_addr_param addr_param; - union sctp_addr *addr; - void *addr_buf; - struct sctp_af *af; - int paramlen = sizeof(param); - int addr_param_len = 0; - int totallen = 0; - int i; - int del_pickup = 0; + int paramlen = sizeof(param); + struct sctp_chunk *retval; + int addr_param_len = 0; + union sctp_addr *addr; + int totallen = 0, i; + int del_pickup = 0; + struct sctp_af *af; + void *addr_buf; /* Get total length of all the address parameters. */ addr_buf = addrs; @@ -2897,12 +2892,12 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, union sctp_addr *addr) { + struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); + union sctp_addr_param addrparam; struct sctp_addip_param param; - struct sctp_chunk *retval; - int len = sizeof(param); - union sctp_addr_param addrparam; - int addrlen; - struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); + struct sctp_chunk *retval; + int len = sizeof(param); + int addrlen; addrlen = af->to_addr_param(addr, &addrparam); if (!addrlen) @@ -2946,9 +2941,9 @@ struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, __u32 serial, int vparam_len) { - struct sctp_addiphdr asconf; - struct sctp_chunk *retval; - int length = sizeof(asconf) + vparam_len; + struct sctp_addiphdr asconf; + struct sctp_chunk *retval; + int length = sizeof(asconf) + vparam_len; /* Create the chunk. */ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length, @@ -2970,10 +2965,10 @@ static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, struct sctp_addip_param *asconf_param) { struct sctp_addip_param ack_param; - struct sctp_errhdr err_param; - int asconf_param_len = 0; - int err_param_len = 0; - __be16 response_type; + struct sctp_errhdr err_param; + int asconf_param_len = 0; + int err_param_len = 0; + __be16 response_type; if (SCTP_ERROR_NO_ERROR == err_code) { response_type = SCTP_PARAM_SUCCESS_REPORT; @@ -3011,10 +3006,10 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, struct sctp_chunk *asconf, struct sctp_addip_param *asconf_param) { + union sctp_addr_param *addr_param; struct sctp_transport *peer; - struct sctp_af *af; union sctp_addr addr; - union sctp_addr_param *addr_param; + struct sctp_af *af; addr_param = (void *)asconf_param + sizeof(*asconf_param); @@ -3142,8 +3137,8 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_paramhdr **errp) { struct sctp_addip_chunk *addip; - union sctp_params param; bool addr_param_seen = false; + union sctp_params param; addip = (struct sctp_addip_chunk *)chunk->chunk_hdr; sctp_walk_params(param, addip, addip_hdr.params) { @@ -3209,16 +3204,15 @@ bool sctp_verify_asconf(const struct sctp_association *asoc, struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, struct sctp_chunk *asconf) { + union sctp_addr_param *addr_param; struct sctp_addip_chunk *addip; + struct sctp_chunk *asconf_ack; bool all_param_pass = true; + struct sctp_addiphdr *hdr; + int length = 0, chunk_len; union sctp_params param; - struct sctp_addiphdr *hdr; - union sctp_addr_param *addr_param; - struct sctp_chunk *asconf_ack; - __be16 err_code; - int length = 0; - int chunk_len; - __u32 serial; + __be16 err_code; + __u32 serial; addip = (struct sctp_addip_chunk *)asconf->chunk_hdr; chunk_len = ntohs(asconf->chunk_hdr->length) - @@ -3295,12 +3289,12 @@ done: static void sctp_asconf_param_success(struct sctp_association *asoc, struct sctp_addip_param *asconf_param) { - struct sctp_af *af; - union sctp_addr addr; struct sctp_bind_addr *bp = &asoc->base.bind_addr; union sctp_addr_param *addr_param; - struct sctp_transport *transport; struct sctp_sockaddr_entry *saddr; + struct sctp_transport *transport; + union sctp_addr addr; + struct sctp_af *af; addr_param = (void *)asconf_param + sizeof(*asconf_param); @@ -3357,10 +3351,10 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, int no_err) { struct sctp_addip_param *asconf_ack_param; - struct sctp_errhdr *err_param; - int length; - int asconf_ack_len; - __be16 err_code; + struct sctp_errhdr *err_param; + int asconf_ack_len; + __be16 err_code; + int length; if (no_err) err_code = SCTP_ERROR_NO_ERROR; @@ -3409,15 +3403,15 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, int sctp_process_asconf_ack(struct sctp_association *asoc, struct sctp_chunk *asconf_ack) { - struct sctp_chunk *asconf = asoc->addip_last_asconf; - union sctp_addr_param *addr_param; - struct sctp_addip_param *asconf_param; - int length = 0; - int asconf_len = asconf->skb->len; - int all_param_pass = 0; - int no_err = 1; - int retval = 0; - __be16 err_code = SCTP_ERROR_NO_ERROR; + struct sctp_chunk *asconf = asoc->addip_last_asconf; + struct sctp_addip_param *asconf_param; + __be16 err_code = SCTP_ERROR_NO_ERROR; + union sctp_addr_param *addr_param; + int asconf_len = asconf->skb->len; + int all_param_pass = 0; + int length = 0; + int no_err = 1; + int retval = 0; /* Skip the chunkhdr and addiphdr from the last asconf sent and store * a pointer to address parameter. @@ -3544,9 +3538,8 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, * \ \ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -static struct sctp_chunk *sctp_make_reconf( - const struct sctp_association *asoc, - int length) +static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc, + int length) { struct sctp_reconf_chunk *reconf; struct sctp_chunk *retval; @@ -3597,9 +3590,9 @@ static struct sctp_chunk *sctp_make_reconf( * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct sctp_chunk *sctp_make_strreset_req( - const struct sctp_association *asoc, - __u16 stream_num, __u16 *stream_list, - bool out, bool in) + const struct sctp_association *asoc, + __u16 stream_num, __u16 *stream_list, + bool out, bool in) { struct sctp_strreset_outreq outreq; __u16 stream_len = stream_num * 2; @@ -3651,7 +3644,7 @@ struct sctp_chunk *sctp_make_strreset_req( * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct sctp_chunk *sctp_make_strreset_tsnreq( - const struct sctp_association *asoc) + const struct sctp_association *asoc) { struct sctp_strreset_tsnreq tsnreq; __u16 length = sizeof(tsnreq); @@ -3682,8 +3675,8 @@ struct sctp_chunk *sctp_make_strreset_tsnreq( * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ struct sctp_chunk *sctp_make_strreset_addstrm( - const struct sctp_association *asoc, - __u16 out, __u16 in) + const struct sctp_association *asoc, + __u16 out, __u16 in) { struct sctp_strreset_addstrm addstrm; __u16 size = sizeof(addstrm); @@ -3727,9 +3720,8 @@ struct sctp_chunk *sctp_make_strreset_addstrm( * | Result | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -struct sctp_chunk *sctp_make_strreset_resp( - const struct sctp_association *asoc, - __u32 result, __u32 sn) +struct sctp_chunk *sctp_make_strreset_resp(const struct sctp_association *asoc, + __u32 result, __u32 sn) { struct sctp_strreset_resp resp; __u16 length = sizeof(resp); @@ -3764,10 +3756,10 @@ struct sctp_chunk *sctp_make_strreset_resp( * | Receiver's Next TSN (optional) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ -struct sctp_chunk *sctp_make_strreset_tsnresp( - struct sctp_association *asoc, - __u32 result, __u32 sn, - __u32 sender_tsn, __u32 receiver_tsn) +struct sctp_chunk *sctp_make_strreset_tsnresp(struct sctp_association *asoc, + __u32 result, __u32 sn, + __u32 sender_tsn, + __u32 receiver_tsn) { struct sctp_strreset_resptsn tsnresp; __u16 length = sizeof(tsnresp); -- cgit v1.2.3-55-g7522 From 3dfe55d9cae7413c1bd12cbc0f9145384f753014 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 11 Aug 2017 19:33:04 +0100 Subject: Bluetooth: kfree tmp rather than an alias to it While the kfree of dhkey_a is of the same address of tmp, it probably is clearer and more human readable if tmp is kfree'd rather than dhkey_a. Detected by CoverityScan, CID#1448650 ("Free of address-of expression") Signed-off-by: Colin Ian King Signed-off-by: Marcel Holtmann --- net/bluetooth/selftest.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c index ee92c925ecc5..34a1227f4391 100644 --- a/net/bluetooth/selftest.c +++ b/net/bluetooth/selftest.c @@ -164,7 +164,7 @@ static int __init test_ecdh_sample(const u8 priv_a[32], const u8 priv_b[32], ret = -EINVAL; out: - kfree(dhkey_a); + kfree(tmp); return ret; } -- cgit v1.2.3-55-g7522 From c5ebc4409f2bb2c0b053c204ba197c6b71527eed Mon Sep 17 00:00:00 2001 From: Girish Moodalbail Date: Wed, 9 Aug 2017 01:09:28 -0700 Subject: geneve: use netlink_ext_ack for error reporting in rtnl operations Add extack error messages for failure paths while creating/modifying geneve devices. Once extack support is added to iproute2, more meaningful and helpful error messages will be displayed making it easy for users to discern what went wrong. Before: ======= $ ip link add gen1 address 0:1:2:3:4:5:6 type geneve id 200 \ remote 192.168.13.2 RTNETLINK answers: Invalid argument After: ====== $ ip link add gen1 address 0:1:2:3:4:5:6 type geneve id 200 \ remote 192.168.13.2 Error: Provided link layer address is not Ethernet Also, netdev_dbg() calls used to log errors associated with Netlink request have been removed. Signed-off-by: Girish Moodalbail Signed-off-by: David S. Miller --- drivers/net/geneve.c | 128 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 92 insertions(+), 36 deletions(-) diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 8b8565dd2afb..f6404074b7b0 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1086,21 +1086,33 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { if (tb[IFLA_ADDRESS]) { - if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) + if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided link layer address is not Ethernet"); return -EINVAL; + } - if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) + if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided Ethernet address is not unicast"); return -EADDRNOTAVAIL; + } } - if (!data) + if (!data) { + NL_SET_ERR_MSG(extack, + "Not enough attributes provided to perform the operation"); return -EINVAL; + } if (data[IFLA_GENEVE_ID]) { __u32 vni = nla_get_u32(data[IFLA_GENEVE_ID]); - if (vni >= GENEVE_N_VID) + if (vni >= GENEVE_N_VID) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_ID], + "Geneve ID must be lower than 16777216"); return -ERANGE; + } } return 0; @@ -1158,6 +1170,7 @@ static bool geneve_dst_addr_equal(struct ip_tunnel_info *a, } static int geneve_configure(struct net *net, struct net_device *dev, + struct netlink_ext_ack *extack, const struct ip_tunnel_info *info, bool metadata, bool ipv6_rx_csum) { @@ -1166,8 +1179,11 @@ static int geneve_configure(struct net *net, struct net_device *dev, bool tun_collect_md, tun_on_same_port; int err, encap_len; - if (metadata && !is_tnl_info_zero(info)) + if (metadata && !is_tnl_info_zero(info)) { + NL_SET_ERR_MSG(extack, + "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified"); return -EINVAL; + } geneve->net = net; geneve->dev = dev; @@ -1188,11 +1204,17 @@ static int geneve_configure(struct net *net, struct net_device *dev, dev->needed_headroom = encap_len + ETH_HLEN; if (metadata) { - if (tun_on_same_port) + if (tun_on_same_port) { + NL_SET_ERR_MSG(extack, + "There can be only one externally controlled device on a destination port"); return -EPERM; + } } else { - if (tun_collect_md) + if (tun_collect_md) { + NL_SET_ERR_MSG(extack, + "There already exists an externally controlled device on this destination port"); return -EPERM; + } } dst_cache_reset(&geneve->info.dst_cache); @@ -1214,31 +1236,41 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port) info->key.tp_dst = htons(dst_port); } -static int geneve_nl2info(struct net_device *dev, struct nlattr *tb[], - struct nlattr *data[], struct ip_tunnel_info *info, - bool *metadata, bool *use_udp6_rx_checksums, - bool changelink) +static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack, + struct ip_tunnel_info *info, bool *metadata, + bool *use_udp6_rx_checksums, bool changelink) { - if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) + int attrtype; + + if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) { + NL_SET_ERR_MSG(extack, + "Cannot specify both IPv4 and IPv6 Remote addresses"); return -EINVAL; + } if (data[IFLA_GENEVE_REMOTE]) { - if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) - return -EOPNOTSUPP; + if (changelink && (ip_tunnel_info_af(info) == AF_INET6)) { + attrtype = IFLA_GENEVE_REMOTE; + goto change_notsup; + } info->key.u.ipv4.dst = nla_get_in_addr(data[IFLA_GENEVE_REMOTE]); if (IN_MULTICAST(ntohl(info->key.u.ipv4.dst))) { - netdev_dbg(dev, "multicast remote is unsupported\n"); + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE], + "Remote IPv4 address cannot be Multicast"); return -EINVAL; } } if (data[IFLA_GENEVE_REMOTE6]) { #if IS_ENABLED(CONFIG_IPV6) - if (changelink && (ip_tunnel_info_af(info) == AF_INET)) - return -EOPNOTSUPP; + if (changelink && (ip_tunnel_info_af(info) == AF_INET)) { + attrtype = IFLA_GENEVE_REMOTE6; + goto change_notsup; + } info->mode = IP_TUNNEL_INFO_IPV6; info->key.u.ipv6.dst = @@ -1246,16 +1278,20 @@ static int geneve_nl2info(struct net_device *dev, struct nlattr *tb[], if (ipv6_addr_type(&info->key.u.ipv6.dst) & IPV6_ADDR_LINKLOCAL) { - netdev_dbg(dev, "link-local remote is unsupported\n"); + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], + "Remote IPv6 address cannot be link-local"); return -EINVAL; } if (ipv6_addr_is_multicast(&info->key.u.ipv6.dst)) { - netdev_dbg(dev, "multicast remote is unsupported\n"); + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], + "Remote IPv6 address cannot be Multicast"); return -EINVAL; } info->key.tun_flags |= TUNNEL_CSUM; *use_udp6_rx_checksums = true; #else + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6], + "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; #endif } @@ -1271,8 +1307,10 @@ static int geneve_nl2info(struct net_device *dev, struct nlattr *tb[], tvni[2] = vni & 0x000000ff; tunid = vni_to_tunnel_id(tvni); - if (changelink && (tunid != info->key.tun_id)) - return -EOPNOTSUPP; + if (changelink && (tunid != info->key.tun_id)) { + attrtype = IFLA_GENEVE_ID; + goto change_notsup; + } info->key.tun_id = tunid; } @@ -1285,44 +1323,61 @@ static int geneve_nl2info(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_GENEVE_LABEL]) { info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; - if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) + if (info->key.label && (!(info->mode & IP_TUNNEL_INFO_IPV6))) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_LABEL], + "Label attribute only applies for IPv6 Geneve devices"); return -EINVAL; + } } if (data[IFLA_GENEVE_PORT]) { - if (changelink) - return -EOPNOTSUPP; + if (changelink) { + attrtype = IFLA_GENEVE_PORT; + goto change_notsup; + } info->key.tp_dst = nla_get_be16(data[IFLA_GENEVE_PORT]); } if (data[IFLA_GENEVE_COLLECT_METADATA]) { - if (changelink) - return -EOPNOTSUPP; + if (changelink) { + attrtype = IFLA_GENEVE_COLLECT_METADATA; + goto change_notsup; + } *metadata = true; } if (data[IFLA_GENEVE_UDP_CSUM]) { - if (changelink) - return -EOPNOTSUPP; + if (changelink) { + attrtype = IFLA_GENEVE_UDP_CSUM; + goto change_notsup; + } if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) info->key.tun_flags |= TUNNEL_CSUM; } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { - if (changelink) - return -EOPNOTSUPP; + if (changelink) { + attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; + goto change_notsup; + } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) info->key.tun_flags &= ~TUNNEL_CSUM; } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { - if (changelink) - return -EOPNOTSUPP; + if (changelink) { + attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; + goto change_notsup; + } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) *use_udp6_rx_checksums = false; } return 0; +change_notsup: + NL_SET_ERR_MSG_ATTR(extack, data[attrtype], + "Changing VNI, Port, endpoint IP address family, external, and UDP checksum attributes are not supported"); + return -EOPNOTSUPP; } static int geneve_newlink(struct net *net, struct net_device *dev, @@ -1335,12 +1390,13 @@ static int geneve_newlink(struct net *net, struct net_device *dev, int err; init_tnl_info(&info, GENEVE_UDP_PORT); - err = geneve_nl2info(dev, tb, data, &info, &metadata, + err = geneve_nl2info(tb, data, extack, &info, &metadata, &use_udp6_rx_checksums, false); if (err) return err; - return geneve_configure(net, dev, &info, metadata, use_udp6_rx_checksums); + return geneve_configure(net, dev, extack, &info, metadata, + use_udp6_rx_checksums); } /* Quiesces the geneve device data path for both TX and RX. @@ -1409,7 +1465,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], memcpy(&info, &geneve->info, sizeof(info)); metadata = geneve->collect_md; use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; - err = geneve_nl2info(dev, tb, data, &info, &metadata, + err = geneve_nl2info(tb, data, extack, &info, &metadata, &use_udp6_rx_checksums, true); if (err) return err; @@ -1536,7 +1592,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, return dev; init_tnl_info(&info, dst_port); - err = geneve_configure(net, dev, &info, true, true); + err = geneve_configure(net, dev, NULL, &info, true, true); if (err) { free_netdev(dev); return ERR_PTR(err); -- cgit v1.2.3-55-g7522 From 861932ecc36063196c80ca3e240502b0f6d0e977 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 9 Aug 2017 14:30:31 +0200 Subject: net: sched: Add helpers to identify classids Offloading drivers need to understand what qdisc class a filter is added to. Currently they only need to identify ingress, clsact->ingress and clsact->egress. So provide these helpers. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_sched.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 2579c209ea51..259bc191ba59 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -5,6 +5,7 @@ #include #include #include +#include #define DEFAULT_TX_QUEUE_LEN 1000 @@ -132,4 +133,17 @@ static inline unsigned int psched_mtu(const struct net_device *dev) return dev->mtu + dev->hard_header_len; } +static inline bool is_classid_clsact_ingress(u32 classid) +{ + /* This also returns true for ingress qdisc */ + return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) && + TC_H_MIN(classid) != TC_H_MIN(TC_H_MIN_EGRESS); +} + +static inline bool is_classid_clsact_egress(u32 classid) +{ + return TC_H_MAJ(classid) == TC_H_MAJ(TC_H_CLSACT) && + TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_EGRESS); +} + #endif -- cgit v1.2.3-55-g7522 From 7690f2a51d8afe51ac97e7fae66b081f192a7158 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 9 Aug 2017 14:30:32 +0200 Subject: net: sched: propagate classid down to offload drivers Drivers need classid to decide they support this specific qdisc+class or not. So propagate it down via the tc_cls_common_offload struct. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0f78e6560b2d..1f1de20e584f 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -410,6 +410,7 @@ struct tc_cls_common_offload { u32 chain_index; __be16 protocol; u32 prio; + u32 classid; }; static inline void @@ -420,6 +421,7 @@ tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, cls_common->chain_index = tp->chain->index; cls_common->protocol = tp->protocol; cls_common->prio = tp->prio; + cls_common->classid = tp->classid; } struct tc_cls_u32_knode { -- cgit v1.2.3-55-g7522 From a2e8da9378cc09e2e922a0b3d481bd9d07c3d245 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 9 Aug 2017 14:30:33 +0200 Subject: net: sched: use newly added classid identity helpers Instead of checking handle, which does not have the inner class information and drivers wrongly assume clsact->egress as ingress, use the newly introduced classid identification helpers. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 18 ++++++++++++++++-- drivers/net/ethernet/netronome/nfp/bpf/main.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/offload.c | 2 +- net/dsa/slave.c | 9 ++++++++- 8 files changed, 30 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d80b20d695e0..afa6fd688fac 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2892,7 +2892,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) static int cxgb_setup_tc_cls_u32(struct net_device *dev, struct tc_cls_u32_offload *cls_u32) { - if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + if (is_classid_clsact_ingress(cls_u32->common.classid) || cls_u32->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c6b132476de4..f9fd8d8f1bef 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9230,7 +9230,7 @@ static int ixgbe_setup_tc_cls_u32(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (TC_H_MAJ(cls_u32->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + if (is_classid_clsact_ingress(cls_u32->common.classid) || cls_u32->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 981f8415b546..8633ca5af6ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3031,7 +3031,7 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + if (is_classid_clsact_ingress(cls_flower->common.classid) || cls_flower->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f3c494a4ecdf..f34c00fbf78c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -657,7 +657,7 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + if (is_classid_clsact_ingress(cls_flower->common.classid) || cls_flower->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index eb7c4549f464..a99600333a49 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1696,7 +1696,14 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_cls_matchall_offload *f) { - bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress; + + if (is_classid_clsact_ingress(f->common.classid)) + ingress = true; + else if (is_classid_clsact_egress(f->common.classid)) + ingress = false; + else + return -EOPNOTSUPP; if (f->common.chain_index) return -EOPNOTSUPP; @@ -1717,7 +1724,14 @@ static int mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_cls_flower_offload *f) { - bool ingress = TC_H_MAJ(f->common.handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress; + + if (is_classid_clsact_ingress(f->common.classid)) + ingress = true; + else if (is_classid_clsact_egress(f->common.classid)) + ingress = false; + else + return -EOPNOTSUPP; if (f->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index f981f60ec306..0e6864922d5c 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -127,7 +127,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || - TC_H_MAJ(cls_bpf->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + is_classid_clsact_ingress(cls_bpf->common.classid) || cls_bpf->common.protocol != htons(ETH_P_ALL) || cls_bpf->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 01767c7376d5..3ad5aaa210a4 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -390,7 +390,7 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *cls_flower = type_data; if (type != TC_SETUP_CLSFLOWER || - TC_H_MAJ(cls_flower->common.handle) != TC_H_MAJ(TC_H_INGRESS) || + is_classid_clsact_ingress(cls_flower->common.classid) || !eth_proto_is_802_3(cls_flower->common.protocol) || cls_flower->common.chain_index) return -EOPNOTSUPP; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 8c79011c5a83..78e78a6e6833 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -914,7 +914,14 @@ static void dsa_slave_del_cls_matchall(struct net_device *dev, static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, struct tc_cls_matchall_offload *cls) { - bool ingress = TC_H_MAJ(cls->common.handle) == TC_H_MAJ(TC_H_INGRESS); + bool ingress; + + if (is_classid_clsact_ingress(cls->common.classid)) + ingress = true; + else if (is_classid_clsact_egress(cls->common.classid)) + ingress = false; + else + return -EOPNOTSUPP; if (cls->common.chain_index) return -EOPNOTSUPP; -- cgit v1.2.3-55-g7522 From 237f79d24ebe1eb9b5651b7342ba5cc9d9b8f222 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 9 Aug 2017 14:30:34 +0200 Subject: net: sched: remove handle propagation down to the drivers There is no longer need to use handle in drivers, so remove it from tc_cls_common_offload struct. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 1f1de20e584f..bd9dd79357fe 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -406,7 +406,6 @@ tcf_match_indev(struct sk_buff *skb, int ifindex) #endif /* CONFIG_NET_CLS_IND */ struct tc_cls_common_offload { - u32 handle; u32 chain_index; __be16 protocol; u32 prio; @@ -417,7 +416,6 @@ static inline void tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common, const struct tcf_proto *tp) { - cls_common->handle = tp->q->handle; cls_common->chain_index = tp->chain->index; cls_common->protocol = tp->protocol; cls_common->prio = tp->prio; -- cgit v1.2.3-55-g7522 From 7b06e8aed283081010596c98a67f06c595affe51 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 9 Aug 2017 14:30:35 +0200 Subject: net: sched: remove cops->tcf_cl_offload cops->tcf_cl_offload is no longer needed, as the drivers check what they can and cannot offload using the classid identify helpers. So remove this. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/pkt_cls.h | 14 +++----------- include/net/sch_generic.h | 1 - net/sched/cls_bpf.c | 4 ++-- net/sched/cls_flower.c | 8 ++++---- net/sched/cls_matchall.c | 4 ++-- net/sched/cls_u32.c | 8 ++++---- net/sched/sch_ingress.c | 12 ------------ 7 files changed, 15 insertions(+), 36 deletions(-) diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index bd9dd79357fe..e80edd8879ef 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -457,19 +457,12 @@ struct tc_cls_u32_offload { }; }; -static inline bool tc_can_offload(const struct net_device *dev, - const struct tcf_proto *tp) +static inline bool tc_can_offload(const struct net_device *dev) { - const struct Qdisc *sch = tp->q; - const struct Qdisc_class_ops *cops = sch->ops->cl_ops; - if (!(dev->features & NETIF_F_HW_TC)) return false; if (!dev->netdev_ops->ndo_setup_tc) return false; - if (cops && cops->tcf_cl_offload) - return cops->tcf_cl_offload(tp->classid); - return true; } @@ -478,12 +471,11 @@ static inline bool tc_skip_hw(u32 flags) return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false; } -static inline bool tc_should_offload(const struct net_device *dev, - const struct tcf_proto *tp, u32 flags) +static inline bool tc_should_offload(const struct net_device *dev, u32 flags) { if (tc_skip_hw(flags)) return false; - return tc_can_offload(dev, tp); + return tc_can_offload(dev); } static inline bool tc_skip_sw(u32 flags) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e79f5ad1c5f3..5865db91976b 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -156,7 +156,6 @@ struct Qdisc_class_ops { /* Filter manipulation */ struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long); - bool (*tcf_cl_offload)(u32 classid); unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, u32 classid); void (*unbind_tcf)(struct Qdisc *, unsigned long); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index db17b68df94e..6f2dffe30f25 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -178,7 +178,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, (oldprog && tc_skip_sw(oldprog->gen_flags)); if (oldprog && oldprog->offloaded) { - if (tc_should_offload(dev, tp, prog->gen_flags)) { + if (tc_should_offload(dev, prog->gen_flags)) { cmd = TC_CLSBPF_REPLACE; } else if (!tc_skip_sw(prog->gen_flags)) { obj = oldprog; @@ -187,7 +187,7 @@ static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog, return -EINVAL; } } else { - if (!tc_should_offload(dev, tp, prog->gen_flags)) + if (!tc_should_offload(dev, prog->gen_flags)) return skip_sw ? -EINVAL : 0; cmd = TC_CLSBPF_ADD; } diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index d2551a03c542..052e902dc71c 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -227,7 +227,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) struct tc_cls_flower_offload cls_flower = {}; struct net_device *dev = f->hw_dev; - if (!tc_can_offload(dev, tp)) + if (!tc_can_offload(dev)) return; tc_cls_common_offload_init(&cls_flower.common, tp); @@ -246,9 +246,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, struct tc_cls_flower_offload cls_flower = {}; int err; - if (!tc_can_offload(dev, tp)) { + if (!tc_can_offload(dev)) { if (tcf_exts_get_dev(dev, &f->exts, &f->hw_dev) || - (f->hw_dev && !tc_can_offload(f->hw_dev, tp))) { + (f->hw_dev && !tc_can_offload(f->hw_dev))) { f->hw_dev = dev; return tc_skip_sw(f->flags) ? -EINVAL : 0; } @@ -281,7 +281,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) struct tc_cls_flower_offload cls_flower = {}; struct net_device *dev = f->hw_dev; - if (!tc_can_offload(dev, tp)) + if (!tc_can_offload(dev)) return; tc_cls_common_offload_init(&cls_flower.common, tp); diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index d44e26fdae84..d4dc387f7a56 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -92,7 +92,7 @@ static void mall_destroy(struct tcf_proto *tp) if (!head) return; - if (tc_should_offload(dev, tp, head->flags)) + if (tc_should_offload(dev, head->flags)) mall_destroy_hw_filter(tp, head, (unsigned long) head); call_rcu(&head->rcu, mall_destroy_rcu); @@ -172,7 +172,7 @@ static int mall_change(struct net *net, struct sk_buff *in_skb, if (err) goto err_set_parms; - if (tc_should_offload(dev, tp, flags)) { + if (tc_should_offload(dev, flags)) { err = mall_replace_hw_filter(tp, new, (unsigned long) new); if (err) { if (tc_skip_sw(flags)) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 5a3f78181526..af22742d2847 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -433,7 +433,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_u32_offload cls_u32 = {}; - if (!tc_should_offload(dev, tp, 0)) + if (!tc_should_offload(dev, 0)) return; tc_cls_common_offload_init(&cls_u32.common, tp); @@ -450,7 +450,7 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h, struct tc_cls_u32_offload cls_u32 = {}; int err; - if (!tc_should_offload(dev, tp, flags)) + if (!tc_should_offload(dev, flags)) return tc_skip_sw(flags) ? -EINVAL : 0; tc_cls_common_offload_init(&cls_u32.common, tp); @@ -471,7 +471,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) struct net_device *dev = tp->q->dev_queue->dev; struct tc_cls_u32_offload cls_u32 = {}; - if (!tc_should_offload(dev, tp, 0)) + if (!tc_should_offload(dev, 0)) return; tc_cls_common_offload_init(&cls_u32.common, tp); @@ -490,7 +490,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n, struct tc_cls_u32_offload cls_u32 = {}; int err; - if (!tc_should_offload(dev, tp, flags)) + if (!tc_should_offload(dev, flags)) return tc_skip_sw(flags) ? -EINVAL : 0; tc_cls_common_offload_init(&cls_u32.common, tp); diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index d8a9bebcab90..a15c543c3569 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c @@ -32,11 +32,6 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid) return TC_H_MIN(classid) + 1; } -static bool ingress_cl_offload(u32 classid) -{ - return true; -} - static unsigned long ingress_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { @@ -103,7 +98,6 @@ static const struct Qdisc_class_ops ingress_class_ops = { .put = ingress_put, .walk = ingress_walk, .tcf_block = ingress_tcf_block, - .tcf_cl_offload = ingress_cl_offload, .bind_tcf = ingress_bind_filter, .unbind_tcf = ingress_put, }; @@ -134,11 +128,6 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid) } } -static bool clsact_cl_offload(u32 classid) -{ - return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS); -} - static unsigned long clsact_bind_filter(struct Qdisc *sch, unsigned long parent, u32 classid) { @@ -198,7 +187,6 @@ static const struct Qdisc_class_ops clsact_class_ops = { .put = ingress_put, .walk = ingress_walk, .tcf_block = clsact_tcf_block, - .tcf_cl_offload = clsact_cl_offload, .bind_tcf = clsact_bind_filter, .unbind_tcf = ingress_put, }; -- cgit v1.2.3-55-g7522 From c1550fde51482feebeb8e27742672a2614cb1f12 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Wed, 9 Aug 2017 12:07:08 -0700 Subject: liquidio: rx/tx queue cleanup When deleting a queue, clear its corresponding bit in the qmask, vfree its memory, clear out the pointer that's pointing to it, and decrement the queue count. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/octeon_droq.c | 4 ++++ drivers/net/ethernet/cavium/liquidio/request_manager.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index f7b5d68eb4cf..9372d4ce9954 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -209,6 +209,10 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no) droq->desc_ring, droq->desc_ring_dma); memset(droq, 0, OCT_DROQ_SIZE); + oct->io_qmask.oq &= ~(1ULL << q_no); + vfree(oct->droq[q_no]); + oct->droq[q_no] = NULL; + oct->num_oqs--; return 0; } diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 7b297f1f6dbe..20a96bab3d0d 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -190,6 +190,10 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) q_size = iq->max_count * desc_size; lio_dma_free(oct, (u32)q_size, iq->base_addr, iq->base_addr_dma); + oct->io_qmask.iq &= ~(1ULL << iq_no); + vfree(oct->instr_queue[iq_no]); + oct->instr_queue[iq_no] = NULL; + oct->num_iqs--; return 0; } return 1; -- cgit v1.2.3-55-g7522 From 35c7ad3567b06593adbddb59674737fd29d388ad Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Wed, 9 Aug 2017 13:28:04 -0700 Subject: liquidio: removed check for queue size alignment There is no restriction on queue size alignment. Hence removing check for valid queue size. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/request_manager.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 20a96bab3d0d..1e0fbce86d60 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -77,13 +77,6 @@ int octeon_init_instr_queue(struct octeon_device *oct, return 1; } - if (num_descs & (num_descs - 1)) { - dev_err(&oct->pci_dev->dev, - "Number of descriptors for instr queue %d not in power of 2.\n", - iq_no); - return 1; - } - q_size = (u32)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; -- cgit v1.2.3-55-g7522 From d18c2a1b159d107e341206500747dceabe8501fb Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 10 Aug 2017 00:35:50 +0300 Subject: phylink: Fix an uninitialized variable bug "ret" isn't necessarily initialized here. Fixes: 9525ae83959b ("phylink: add phylink infrastructure") Signed-off-by: Dan Carpenter Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 32917bdd1432..bcb4755bcd95 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -958,7 +958,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, } mutex_unlock(&pl->state_mutex); - return ret; + return 0; } EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_set); -- cgit v1.2.3-55-g7522 From 6123c66854c174e4982f98195100c1d990f9e5e6 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:03 -0700 Subject: netvsc: delay setup of VF device When VF device is discovered, delay bring it automatically up in order to allow userspace to some simple changes (like renaming). Reported-by: Vitaly Kuznetsov Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 2 +- drivers/net/hyperv/netvsc_drv.c | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 98b25f6900c8..a57e37641dc5 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -723,7 +723,7 @@ struct net_device_context { /* State to manage the associated VF interface. */ struct net_device __rcu *vf_netdev; struct netvsc_vf_pcpu_stats __percpu *vf_stats; - struct work_struct vf_takeover; + struct delayed_work vf_takeover; /* 1: allocated, serial number is valid. 0: not allocated */ u32 vf_alloc; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index eb0023f55fe1..e059375a6d8c 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -47,6 +47,7 @@ #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) +#define VF_TAKEOVER_INT (HZ / 10) static int ring_size = 128; module_param(ring_size, int, S_IRUGO); @@ -1559,7 +1560,9 @@ static int netvsc_vf_join(struct net_device *vf_netdev, /* set slave flag before open to prevent IPv6 addrconf */ vf_netdev->flags |= IFF_SLAVE; - schedule_work(&ndev_ctx->vf_takeover); + schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); netdev_info(vf_netdev, "joined to %s\n", ndev->name); return 0; @@ -1575,8 +1578,6 @@ static void __netvsc_vf_setup(struct net_device *ndev, { int ret; - call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); - /* Align MTU of VF with master */ ret = dev_set_mtu(vf_netdev, ndev->mtu); if (ret) @@ -1597,12 +1598,12 @@ static void __netvsc_vf_setup(struct net_device *ndev, static void netvsc_vf_setup(struct work_struct *w) { struct net_device_context *ndev_ctx - = container_of(w, struct net_device_context, vf_takeover); + = container_of(w, struct net_device_context, vf_takeover.work); struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx); struct net_device *vf_netdev; if (!rtnl_trylock()) { - schedule_work(w); + schedule_delayed_work(&ndev_ctx->vf_takeover, 0); return; } @@ -1706,7 +1707,7 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) return NOTIFY_DONE; net_device_ctx = netdev_priv(ndev); - cancel_work_sync(&net_device_ctx->vf_takeover); + cancel_delayed_work_sync(&net_device_ctx->vf_takeover); netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name); @@ -1748,7 +1749,7 @@ static int netvsc_probe(struct hv_device *dev, spin_lock_init(&net_device_ctx->lock); INIT_LIST_HEAD(&net_device_ctx->reconfig_events); - INIT_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); + INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup); net_device_ctx->vf_stats = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats); -- cgit v1.2.3-55-g7522 From 5e20d55a23a76a876396ba1235bdf019e74d0c6f Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:04 -0700 Subject: netvsc: don't signal host twice if empty When hv_pkt_iter_next() returns NULL, it has already called hv_pkt_iter_close(). Calling it twice can lead to extra host signal. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index bffaf93d3cb0..b5fc8fb56b88 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1191,10 +1191,6 @@ int netvsc_poll(struct napi_struct *napi, int budget) nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); } - /* if ring is empty, signal host */ - if (!nvchan->desc) - hv_pkt_iter_close(channel); - /* If send of pending receive completions suceeded * and did not exhaust NAPI budget this time * and not doing busy poll -- cgit v1.2.3-55-g7522 From 16ba3266006be10dc7ec25dd1442f74cef89cb95 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:05 -0700 Subject: netvsc: propagate MAC address change to VF slave If VF is slaved to synthetic device, then any change to netvsc MAC address should be propagated to the slave device. If slave device doesn't support MAC address change then it should also be an error to attempt to change synthetic NIC MAC address. It also fixes the error unwind in the original code. If give a bad address, the old code would change the device MAC address anyway. Reviewed-by: Haiyang Zhang Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index e059375a6d8c..07015b1c42c6 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1053,27 +1053,31 @@ static void netvsc_get_stats64(struct net_device *net, static int netvsc_set_mac_addr(struct net_device *ndev, void *p) { struct net_device_context *ndc = netdev_priv(ndev); + struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev); struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev); struct sockaddr *addr = p; - char save_adr[ETH_ALEN]; - unsigned char save_aatype; int err; - memcpy(save_adr, ndev->dev_addr, ETH_ALEN); - save_aatype = ndev->addr_assign_type; - - err = eth_mac_addr(ndev, p); - if (err != 0) + err = eth_prepare_mac_addr_change(ndev, p); + if (err) return err; if (!nvdev) return -ENODEV; + if (vf_netdev) { + err = dev_set_mac_address(vf_netdev, addr); + if (err) + return err; + } + err = rndis_filter_set_device_mac(nvdev, addr->sa_data); - if (err != 0) { - /* roll back to saved MAC */ - memcpy(ndev->dev_addr, save_adr, ETH_ALEN); - ndev->addr_assign_type = save_aatype; + if (!err) { + eth_commit_mac_addr_change(ndev, p); + } else if (vf_netdev) { + /* rollback change on VF */ + memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); + dev_set_mac_address(vf_netdev, addr); } return err; -- cgit v1.2.3-55-g7522 From 68d715f68541d58033199eea80991394a6886eb7 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:06 -0700 Subject: netvsc: check error return when restoring channels and mtu If setting new values fails, and the attempt to restore original settings fails. Then log an error and leave device down. This should never happen, but if it does don't go down in flames. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 07015b1c42c6..c7391889938b 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -845,7 +845,13 @@ static int netvsc_set_channels(struct net_device *net, } else { ret = PTR_ERR(nvdev); device_info.num_chn = orig; - rndis_filter_device_add(dev, &device_info); + nvdev = rndis_filter_device_add(dev, &device_info); + + if (IS_ERR(nvdev)) { + netdev_err(net, "restoring channel setting failed: %ld\n", + PTR_ERR(nvdev)); + return ret; + } } if (was_opened) @@ -953,10 +959,16 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) /* Attempt rollback to original MTU */ ndev->mtu = orig_mtu; - rndis_filter_device_add(hdev, &device_info); + nvdev = rndis_filter_device_add(hdev, &device_info); if (vf_netdev) dev_set_mtu(vf_netdev, orig_mtu); + + if (IS_ERR(nvdev)) { + netdev_err(ndev, "restoring mtu failed: %ld\n", + PTR_ERR(nvdev)); + return ret; + } } if (was_opened) -- cgit v1.2.3-55-g7522 From 958333708f2877d3855e3bc31dad428e2f2c8096 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:07 -0700 Subject: netvsc: no need to allocate send/receive on numa node The send and receive buffers are both per-device (not per-channel). The associated NUMA node is a property of the CPU which is per-channel therefore it makes no sense to force the receive/send buffer to be allocated on a particular node (since it is a shared resource). Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc.c | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index b5fc8fb56b88..7407006f4e22 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -246,20 +246,13 @@ int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) static int netvsc_init_buf(struct hv_device *device, struct netvsc_device *net_device) { - int ret = 0; - struct nvsp_message *init_packet; struct nvsp_1_message_send_receive_buffer_complete *resp; - struct net_device *ndev; + struct net_device *ndev = hv_get_drvdata(device); + struct nvsp_message *init_packet; size_t map_words; - int node; - - ndev = hv_get_drvdata(device); - - node = cpu_to_node(device->channel->target_cpu); - net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node); - if (!net_device->recv_buf) - net_device->recv_buf = vzalloc(net_device->recv_buf_size); + int ret = 0; + net_device->recv_buf = vzalloc(net_device->recv_buf_size); if (!net_device->recv_buf) { netdev_err(ndev, "unable to allocate receive " "buffer of size %d\n", net_device->recv_buf_size); @@ -340,9 +333,7 @@ static int netvsc_init_buf(struct hv_device *device, goto cleanup; /* Now setup the send buffer. */ - net_device->send_buf = vzalloc_node(net_device->send_buf_size, node); - if (!net_device->send_buf) - net_device->send_buf = vzalloc(net_device->send_buf_size); + net_device->send_buf = vzalloc(net_device->send_buf_size); if (!net_device->send_buf) { netdev_err(ndev, "unable to allocate send " "buffer of size %d\n", net_device->send_buf_size); -- cgit v1.2.3-55-g7522 From 89bb42b11370c2daf19d8820398f7255f8499ab7 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:08 -0700 Subject: netvsc: whitespace cleanup Fix some minor indentation issues. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c7391889938b..3219d2e8918f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -165,7 +165,7 @@ static int netvsc_close(struct net_device *net) } static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, - int pkt_type) + int pkt_type) { struct rndis_packet *rndis_pkt; struct rndis_per_packet_info *ppi; @@ -286,7 +286,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, } static u32 fill_pg_buf(struct page *page, u32 offset, u32 len, - struct hv_page_buffer *pb) + struct hv_page_buffer *pb) { int j = 0; @@ -626,6 +626,7 @@ no_memory: ++net_device_ctx->eth_stats.tx_no_memory; goto drop; } + /* * netvsc_linkstatus_callback - Link up/down notification */ @@ -649,8 +650,8 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj, if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { u32 speed; - speed = *(u32 *)((void *)indicate + indicate-> - status_buf_offset) / 10000; + speed = *(u32 *)((void *)indicate + + indicate->status_buf_offset) / 10000; ndev_ctx->speed = speed; return; } @@ -1018,7 +1019,7 @@ static void netvsc_get_stats64(struct net_device *net, struct net_device_context *ndev_ctx = netdev_priv(net); struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev); struct netvsc_vf_pcpu_stats vf_tot; - int i; + int i; if (!nvdev) return; -- cgit v1.2.3-55-g7522 From 00f5024e821e60c0d1d7df44f2577a2c277b5cc9 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:09 -0700 Subject: netvsc: remove unnecessary cast of void pointer Assignment to a typed pointer is sufficient in C. No cast is needed. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3219d2e8918f..9f89de17b5fa 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -523,9 +523,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) rndis_msg_size += NDIS_VLAN_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, - IEEE_8021Q_INFO); - vlan = (struct ndis_pkt_8021q_info *)((void *)ppi + - ppi->ppi_offset); + IEEE_8021Q_INFO); + + vlan = (void *)ppi + ppi->ppi_offset; vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; @@ -538,8 +538,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, TCP_LARGESEND_PKTINFO); - lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + - ppi->ppi_offset); + lso_info = (void *)ppi + ppi->ppi_offset; lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; if (skb->protocol == htons(ETH_P_IP)) { -- cgit v1.2.3-55-g7522 From ea5a32c00bcacce1d8ac834a70a82f95a1c79425 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:10 -0700 Subject: netvsc: remove unnecessary check for NULL hdr The function init_page_array is always called with a valid pointer to RNDIS header. No check for NULL is needed. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 9f89de17b5fa..7b465e40869b 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -333,10 +333,9 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, * 2. skb linear data * 3. skb fragment data */ - if (hdr != NULL) - slots_used += fill_pg_buf(virt_to_page(hdr), - offset_in_page(hdr), - len, &pb[slots_used]); + slots_used += fill_pg_buf(virt_to_page(hdr), + offset_in_page(hdr), + len, &pb[slots_used]); packet->rmsg_size = len; packet->rmsg_pgcnt = slots_used; -- cgit v1.2.3-55-g7522 From 8b5327975ae171ca54dfd93e6c042d1292945867 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:11 -0700 Subject: netvsc: allow controlling send/recv buffer size Control the size of the buffer areas via ethtool ring settings. They aren't really traditional hardware rings, but host API breaks receive and send buffer into chunks. The final size of the chunks are controlled by the host. The default value of send and receive buffer area for host DMA is much larger than it needs to be. Experimentation shows that 4M receive and 1M send is sufficient. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 9 ++-- drivers/net/hyperv/netvsc.c | 70 +++++++++++++----------- drivers/net/hyperv/netvsc_drv.c | 117 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 157 insertions(+), 39 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index a57e37641dc5..30326373e46f 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -148,6 +148,8 @@ struct netvsc_device_info { unsigned char mac_adr[ETH_ALEN]; int ring_size; u32 num_chn; + u32 send_sections; + u32 recv_sections; }; enum rndis_device_state { @@ -634,12 +636,12 @@ struct nvsp_message { #define NETVSC_SEND_BUFFER_SIZE (1024 * 1024 * 15) /* 15MB */ #define NETVSC_INVALID_INDEX -1 +#define NETVSC_SEND_SECTION_SIZE 6144 +#define NETVSC_RECV_SECTION_SIZE 1728 #define NETVSC_RECEIVE_BUFFER_ID 0xcafe #define NETVSC_SEND_BUFFER_ID 0 -#define NETVSC_PACKET_SIZE 4096 - #define VRSS_SEND_TAB_SIZE 16 /* must be power of 2 */ #define VRSS_CHANNEL_MAX 64 #define VRSS_CHANNEL_DEFAULT 8 @@ -754,14 +756,13 @@ struct netvsc_device { /* Receive buffer allocated by us but manages by NetVSP */ void *recv_buf; - u32 recv_buf_size; u32 recv_buf_gpadl_handle; u32 recv_section_cnt; + u32 recv_section_size; u32 recv_completion_cnt; /* Send buffer allocated by us */ void *send_buf; - u32 send_buf_size; u32 send_buf_gpadl_handle; u32 send_section_cnt; u32 send_section_size; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 7407006f4e22..d9d7555148eb 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -75,6 +75,10 @@ static struct netvsc_device *alloc_net_device(void) atomic_set(&net_device->open_cnt, 0); net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; + + net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE; + net_device->send_section_size = NETVSC_SEND_SECTION_SIZE; + init_completion(&net_device->channel_init_wait); init_waitqueue_head(&net_device->subchan_open); @@ -143,6 +147,7 @@ static void netvsc_destroy_buf(struct hv_device *device) "revoke receive buffer to netvsp\n"); return; } + net_device->recv_section_cnt = 0; } /* Teardown the gpadl on the vsp end */ @@ -173,7 +178,7 @@ static void netvsc_destroy_buf(struct hv_device *device) * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need * to send a revoke msg here */ - if (net_device->send_section_size) { + if (net_device->send_section_cnt) { /* Send the revoke receive buffer */ revoke_packet = &net_device->revoke_packet; memset(revoke_packet, 0, sizeof(struct nvsp_message)); @@ -205,6 +210,7 @@ static void netvsc_destroy_buf(struct hv_device *device) "revoke send buffer to netvsp\n"); return; } + net_device->send_section_cnt = 0; } /* Teardown the gpadl on the vsp end */ if (net_device->send_buf_gpadl_handle) { @@ -244,18 +250,25 @@ int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) } static int netvsc_init_buf(struct hv_device *device, - struct netvsc_device *net_device) + struct netvsc_device *net_device, + const struct netvsc_device_info *device_info) { struct nvsp_1_message_send_receive_buffer_complete *resp; struct net_device *ndev = hv_get_drvdata(device); struct nvsp_message *init_packet; + unsigned int buf_size; size_t map_words; int ret = 0; - net_device->recv_buf = vzalloc(net_device->recv_buf_size); + /* Get receive buffer area. */ + buf_size = device_info->recv_sections * net_device->recv_section_size; + buf_size = roundup(buf_size, PAGE_SIZE); + + net_device->recv_buf = vzalloc(buf_size); if (!net_device->recv_buf) { - netdev_err(ndev, "unable to allocate receive " - "buffer of size %d\n", net_device->recv_buf_size); + netdev_err(ndev, + "unable to allocate receive buffer of size %u\n", + buf_size); ret = -ENOMEM; goto cleanup; } @@ -266,7 +279,7 @@ static int netvsc_init_buf(struct hv_device *device, * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, - net_device->recv_buf_size, + buf_size, &net_device->recv_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, @@ -312,31 +325,31 @@ static int netvsc_init_buf(struct hv_device *device, resp->num_sections, resp->sections[0].sub_alloc_size, resp->sections[0].num_sub_allocs); - net_device->recv_section_cnt = resp->num_sections; - - /* - * For 1st release, there should only be 1 section that represents the - * entire receive buffer - */ - if (net_device->recv_section_cnt != 1 || - resp->sections[0].offset != 0) { + /* There should only be one section for the entire receive buffer */ + if (resp->num_sections != 1 || resp->sections[0].offset != 0) { ret = -EINVAL; goto cleanup; } + net_device->recv_section_size = resp->sections[0].sub_alloc_size; + net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; + /* Setup receive completion ring */ net_device->recv_completion_cnt - = round_up(resp->sections[0].num_sub_allocs + 1, + = round_up(net_device->recv_section_cnt + 1, PAGE_SIZE / sizeof(u64)); ret = netvsc_alloc_recv_comp_ring(net_device, 0); if (ret) goto cleanup; /* Now setup the send buffer. */ - net_device->send_buf = vzalloc(net_device->send_buf_size); + buf_size = device_info->send_sections * net_device->send_section_size; + buf_size = round_up(buf_size, PAGE_SIZE); + + net_device->send_buf = vzalloc(buf_size); if (!net_device->send_buf) { - netdev_err(ndev, "unable to allocate send " - "buffer of size %d\n", net_device->send_buf_size); + netdev_err(ndev, "unable to allocate send buffer of size %u\n", + buf_size); ret = -ENOMEM; goto cleanup; } @@ -346,7 +359,7 @@ static int netvsc_init_buf(struct hv_device *device, * than the channel to establish the gpadl handle. */ ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, - net_device->send_buf_size, + buf_size, &net_device->send_buf_gpadl_handle); if (ret != 0) { netdev_err(ndev, @@ -391,10 +404,8 @@ static int netvsc_init_buf(struct hv_device *device, net_device->send_section_size = init_packet->msg. v1_msg.send_send_buf_complete.section_size; - /* Section count is simply the size divided by the section size. - */ - net_device->send_section_cnt = - net_device->send_buf_size / net_device->send_section_size; + /* Section count is simply the size divided by the section size. */ + net_device->send_section_cnt = buf_size / net_device->send_section_size; netdev_dbg(ndev, "Send section size: %d, Section count:%d\n", net_device->send_section_size, net_device->send_section_cnt); @@ -472,7 +483,8 @@ static int negotiate_nvsp_ver(struct hv_device *device, } static int netvsc_connect_vsp(struct hv_device *device, - struct netvsc_device *net_device) + struct netvsc_device *net_device, + const struct netvsc_device_info *device_info) { const u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, @@ -522,14 +534,8 @@ static int netvsc_connect_vsp(struct hv_device *device, if (ret != 0) goto cleanup; - /* Post the big receive buffer to NetVSP */ - if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) - net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; - else - net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; - net_device->send_buf_size = NETVSC_SEND_BUFFER_SIZE; - ret = netvsc_init_buf(device, net_device); + ret = netvsc_init_buf(device, net_device, device_info); cleanup: return ret; @@ -1287,7 +1293,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device, rcu_assign_pointer(net_device_ctx->nvdev, net_device); /* Connect with the NetVsp */ - ret = netvsc_connect_vsp(device, net_device); + ret = netvsc_connect_vsp(device, net_device, device_info); if (ret != 0) { netdev_err(ndev, "unable to connect to NetVSP - %d\n", ret); diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 7b465e40869b..873c83a66cc2 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -45,7 +45,12 @@ #include "hyperv_net.h" -#define RING_SIZE_MIN 64 +#define RING_SIZE_MIN 64 +#define NETVSC_MIN_TX_SECTIONS 10 +#define NETVSC_DEFAULT_TX 192 /* ~1M */ +#define NETVSC_MIN_RX_SECTIONS 10 /* ~64K */ +#define NETVSC_DEFAULT_RX 2048 /* ~4M */ + #define LINKCHANGE_INT (2 * HZ) #define VF_TAKEOVER_INT (HZ / 10) @@ -831,11 +836,13 @@ static int netvsc_set_channels(struct net_device *net, if (was_opened) rndis_filter_close(nvdev); - rndis_filter_device_remove(dev, nvdev); - memset(&device_info, 0, sizeof(device_info)); device_info.num_chn = count; device_info.ring_size = ring_size; + device_info.send_sections = nvdev->send_section_cnt; + device_info.recv_sections = nvdev->recv_section_cnt; + + rndis_filter_device_remove(dev, nvdev); nvdev = rndis_filter_device_add(dev, &device_info); if (!IS_ERR(nvdev)) { @@ -947,6 +954,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = nvdev->num_chn; + device_info.send_sections = nvdev->send_section_cnt; + device_info.recv_sections = nvdev->recv_section_cnt; rndis_filter_device_remove(hdev, nvdev); @@ -1351,6 +1360,104 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir, return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn); } +/* Hyper-V RNDIS protocol does not have ring in the HW sense. + * It does have pre-allocated receive area which is divided into sections. + */ +static void __netvsc_get_ringparam(struct netvsc_device *nvdev, + struct ethtool_ringparam *ring) +{ + u32 max_buf_size; + + ring->rx_pending = nvdev->recv_section_cnt; + ring->tx_pending = nvdev->send_section_cnt; + + if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2) + max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY; + else + max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE; + + ring->rx_max_pending = max_buf_size / nvdev->recv_section_size; + ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE + / nvdev->send_section_size; +} + +static void netvsc_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct net_device_context *ndevctx = netdev_priv(ndev); + struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); + + if (!nvdev) + return; + + __netvsc_get_ringparam(nvdev, ring); +} + +static int netvsc_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct net_device_context *ndevctx = netdev_priv(ndev); + struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); + struct hv_device *hdev = ndevctx->device_ctx; + struct netvsc_device_info device_info; + struct ethtool_ringparam orig; + u32 new_tx, new_rx; + bool was_opened; + int ret = 0; + + if (!nvdev || nvdev->destroy) + return -ENODEV; + + memset(&orig, 0, sizeof(orig)); + __netvsc_get_ringparam(nvdev, &orig); + + new_tx = clamp_t(u32, ring->tx_pending, + NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending); + new_rx = clamp_t(u32, ring->rx_pending, + NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending); + + if (new_tx == orig.tx_pending && + new_rx == orig.rx_pending) + return 0; /* no change */ + + memset(&device_info, 0, sizeof(device_info)); + device_info.num_chn = nvdev->num_chn; + device_info.ring_size = ring_size; + device_info.send_sections = new_tx; + device_info.recv_sections = new_rx; + + netif_device_detach(ndev); + was_opened = rndis_filter_opened(nvdev); + if (was_opened) + rndis_filter_close(nvdev); + + rndis_filter_device_remove(hdev, nvdev); + + nvdev = rndis_filter_device_add(hdev, &device_info); + if (IS_ERR(nvdev)) { + ret = PTR_ERR(nvdev); + + device_info.send_sections = orig.tx_pending; + device_info.recv_sections = orig.rx_pending; + nvdev = rndis_filter_device_add(hdev, &device_info); + if (IS_ERR(nvdev)) { + netdev_err(ndev, "restoring ringparam failed: %ld\n", + PTR_ERR(nvdev)); + return ret; + } + } + + if (was_opened) + rndis_filter_open(nvdev); + netif_device_attach(ndev); + + /* We may have missed link change notifications */ + ndevctx->last_reconfig = 0; + schedule_delayed_work(&ndevctx->dwork, 0); + + return ret; +} + static const struct ethtool_ops ethtool_ops = { .get_drvinfo = netvsc_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1367,6 +1474,8 @@ static const struct ethtool_ops ethtool_ops = { .set_rxfh = netvsc_set_rxfh, .get_link_ksettings = netvsc_get_link_ksettings, .set_link_ksettings = netvsc_set_link_ksettings, + .get_ringparam = netvsc_get_ringparam, + .set_ringparam = netvsc_set_ringparam, }; static const struct net_device_ops device_ops = { @@ -1782,6 +1891,8 @@ static int netvsc_probe(struct hv_device *dev, memset(&device_info, 0, sizeof(device_info)); device_info.ring_size = ring_size; device_info.num_chn = VRSS_CHANNEL_DEFAULT; + device_info.send_sections = NETVSC_DEFAULT_TX; + device_info.recv_sections = NETVSC_DEFAULT_RX; nvdev = rndis_filter_device_add(dev, &device_info); if (IS_ERR(nvdev)) { -- cgit v1.2.3-55-g7522 From cad5c197704d82faf33ffdbef414f15db08d9ef9 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 9 Aug 2017 17:46:12 -0700 Subject: netvsc: keep track of some non-fatal overload conditions Add ethtool statistics for case where send chimmeny buffer is exhausted and driver has to fall back to doing scatter/gather send. Also, add statistic for case where ring buffer is full and receive completions are delayed. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 2 ++ drivers/net/hyperv/netvsc.c | 19 +++++++++++++------ drivers/net/hyperv/netvsc_drv.c | 2 ++ 3 files changed, 17 insertions(+), 6 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 30326373e46f..9198dd1240ed 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -680,6 +680,8 @@ struct netvsc_ethtool_stats { unsigned long tx_no_space; unsigned long tx_too_big; unsigned long tx_busy; + unsigned long tx_send_full; + unsigned long rx_comp_busy; }; struct netvsc_vf_pcpu_stats { diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index d9d7555148eb..0530e7d729e1 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -883,7 +883,9 @@ int netvsc_send(struct net_device_context *ndev_ctx, } else if (pktlen + net_device->pkt_align < net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); - if (section_index != NETVSC_INVALID_INDEX) { + if (unlikely(section_index == NETVSC_INVALID_INDEX)) { + ++ndev_ctx->eth_stats.tx_send_full; + } else { move_pkt_msd(&msd_send, &msd_skb, msdp); msd_len = 0; } @@ -949,9 +951,10 @@ send_now: } /* Send pending recv completions */ -static int send_recv_completions(struct netvsc_channel *nvchan) +static int send_recv_completions(struct net_device *ndev, + struct netvsc_device *nvdev, + struct netvsc_channel *nvchan) { - struct netvsc_device *nvdev = nvchan->net_device; struct multi_recv_comp *mrc = &nvchan->mrc; struct recv_comp_msg { struct nvsp_message_header hdr; @@ -969,8 +972,12 @@ static int send_recv_completions(struct netvsc_channel *nvchan) msg.status = rcd->status; ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), rcd->tid, VM_PKT_COMP, 0); - if (unlikely(ret)) + if (unlikely(ret)) { + struct net_device_context *ndev_ctx = netdev_priv(ndev); + + ++ndev_ctx->eth_stats.rx_comp_busy; return ret; + } if (++mrc->first == nvdev->recv_completion_cnt) mrc->first = 0; @@ -1011,7 +1018,7 @@ static void enq_receive_complete(struct net_device *ndev, recv_comp_slot_avail(nvdev, mrc, &filled, &avail); if (unlikely(filled > NAPI_POLL_WEIGHT)) { - send_recv_completions(nvchan); + send_recv_completions(ndev, nvdev, nvchan); recv_comp_slot_avail(nvdev, mrc, &filled, &avail); } @@ -1194,7 +1201,7 @@ int netvsc_poll(struct napi_struct *napi, int budget) * then re-enable host interrupts * and reschedule if ring is not empty. */ - if (send_recv_completions(nvchan) == 0 && + if (send_recv_completions(ndev, net_device, nvchan) == 0 && work_done < budget && napi_complete_done(napi, work_done) && hv_end_read(&channel->inbound)) { diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 873c83a66cc2..b33f0507c373 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1112,6 +1112,8 @@ static const struct { { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) }, { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) }, { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) }, + { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) }, + { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) }, }, vf_stats[] = { { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) }, { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) }, -- cgit v1.2.3-55-g7522 From 2e1c80847b9983a8c93a678f6de2dc3c42f19661 Mon Sep 17 00:00:00 2001 From: Maxim Uvarov Date: Thu, 10 Aug 2017 10:47:46 +0300 Subject: drivers: net: davinci_mdio: remove busy loop on wait user access Polling 14 mdio devices on single mdio bus eats 30% of 1Ghz cpu time due to busy loop in wait(). Add small delay to relax cpu. Signed-off-by: Max Uvarov Reviewed-by: Andrew Lunn Reviewed-by: Grygorii Strashko Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_mdio.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 33df340db1f1..611e7cc5f07b 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -198,8 +198,10 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) return 0; reg = __raw_readl(®s->control); - if ((reg & CONTROL_IDLE) == 0) + if ((reg & CONTROL_IDLE) == 0) { + usleep_range(100, 200); continue; + } /* * An emac soft_reset may have clobbered the mdio controller's -- cgit v1.2.3-55-g7522 From cc147a0dc312e5c85fe083b1712913e4496c11e5 Mon Sep 17 00:00:00 2001 From: Maxim Uvarov Date: Thu, 10 Aug 2017 10:47:47 +0300 Subject: drivers: net: davinci_mdio: print bus frequency Frequency can be adjusted in DT it make sense to print current used value on driver init. Signed-off-by: Max Uvarov Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/davinci_mdio.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 611e7cc5f07b..3c33f4504d8e 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -159,8 +159,10 @@ static int davinci_mdio_reset(struct mii_bus *bus) /* dump hardware version info */ ver = __raw_readl(&data->regs->version); - dev_info(data->dev, "davinci mdio revision %d.%d\n", - (ver >> 8) & 0xff, ver & 0xff); + dev_info(data->dev, + "davinci mdio revision %d.%d, bus freq %ld\n", + (ver >> 8) & 0xff, ver & 0xff, + data->pdata.bus_freq); if (data->skip_scan) goto done; -- cgit v1.2.3-55-g7522 From d24d39d906fa8944024f154e0ea3f031252c06c1 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Thu, 10 Aug 2017 04:13:12 -0400 Subject: forcedeth: replace init_timer_deferrable with setup_deferrable_timer Replace init_timer_deferrable with setup_deferrable_timer to simplify the source code. Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index aa912f43e15f..994a83a1f0a5 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5629,9 +5629,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev); setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev); - init_timer_deferrable(&np->stats_poll); - np->stats_poll.data = (unsigned long) dev; - np->stats_poll.function = nv_do_stats_poll; /* timer handler */ + setup_deferrable_timer(&np->stats_poll, nv_do_stats_poll, + (unsigned long)dev); err = pci_enable_device(pci_dev); if (err) -- cgit v1.2.3-55-g7522 From baf6ee81406ab806c7db34bd1e9a0a824cb84c71 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 21:56:40 +0800 Subject: net: phy: Add rockchip PHY driver support Support integrated ethernet PHY currently. Signed-off-by: David Wu Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 5 + drivers/net/phy/Makefile | 1 + drivers/net/phy/rockchip.c | 233 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 239 insertions(+) create mode 100644 drivers/net/phy/rockchip.c diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index bf73969a9d2b..8c919203291a 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -366,6 +366,11 @@ config REALTEK_PHY ---help--- Supports the Realtek 821x PHY. +config ROCKCHIP_PHY + tristate "Driver for Rockchip Ethernet PHYs" + ---help--- + Currently supports the integrated Ethernet PHY. + config SMSC_PHY tristate "SMSC PHYs" ---help--- diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 7237255bad68..416df92fbf4f 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -72,6 +72,7 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc.o obj-$(CONFIG_NATIONAL_PHY) += national.o obj-$(CONFIG_QSEMI_PHY) += qsemi.o obj-$(CONFIG_REALTEK_PHY) += realtek.o +obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o obj-$(CONFIG_SMSC_PHY) += smsc.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_TERANETICS_PHY) += teranetics.o diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c new file mode 100644 index 000000000000..c092af137056 --- /dev/null +++ b/drivers/net/phy/rockchip.c @@ -0,0 +1,233 @@ +/** + * drivers/net/phy/rockchip.c + * + * Driver for ROCKCHIP Ethernet PHYs + * + * Copyright (c) 2017, Fuzhou Rockchip Electronics Co., Ltd + * + * David Wu + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include + +#define INTERNAL_EPHY_ID 0x1234d400 + +#define MII_INTERNAL_CTRL_STATUS 17 +#define SMI_ADDR_TSTCNTL 20 +#define SMI_ADDR_TSTREAD1 21 +#define SMI_ADDR_TSTREAD2 22 +#define SMI_ADDR_TSTWRITE 23 +#define MII_SPECIAL_CONTROL_STATUS 31 + +#define MII_AUTO_MDIX_EN BIT(7) +#define MII_MDIX_EN BIT(6) + +#define MII_SPEED_10 BIT(2) +#define MII_SPEED_100 BIT(3) + +#define TSTCNTL_RD (BIT(15) | BIT(10)) +#define TSTCNTL_WR (BIT(14) | BIT(10)) + +#define TSTMODE_ENABLE 0x400 +#define TSTMODE_DISABLE 0x0 + +#define WR_ADDR_A7CFG 0x18 + +static int rockchip_init_tstmode(struct phy_device *phydev) +{ + int ret; + + /* Enable access to Analog and DSP register banks */ + ret = phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_ENABLE); + if (ret) + return ret; + + ret = phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_DISABLE); + if (ret) + return ret; + + return phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_ENABLE); +} + +static int rockchip_close_tstmode(struct phy_device *phydev) +{ + /* Back to basic register bank */ + return phy_write(phydev, SMI_ADDR_TSTCNTL, TSTMODE_DISABLE); +} + +static int rockchip_integrated_phy_analog_init(struct phy_device *phydev) +{ + int ret; + + ret = rockchip_init_tstmode(phydev); + if (ret) + return ret; + + /* + * Adjust tx amplitude to make sginal better, + * the default value is 0x8. + */ + ret = phy_write(phydev, SMI_ADDR_TSTWRITE, 0xB); + if (ret) + return ret; + ret = phy_write(phydev, SMI_ADDR_TSTCNTL, TSTCNTL_WR | WR_ADDR_A7CFG); + if (ret) + return ret; + + return rockchip_close_tstmode(phydev); +} + +static int rockchip_integrated_phy_config_init(struct phy_device *phydev) +{ + int val, ret; + + /* + * The auto MIDX has linked problem on some board, + * workround to disable auto MDIX. + */ + val = phy_read(phydev, MII_INTERNAL_CTRL_STATUS); + if (val < 0) + return val; + val &= ~MII_AUTO_MDIX_EN; + ret = phy_write(phydev, MII_INTERNAL_CTRL_STATUS, val); + if (ret) + return ret; + + return rockchip_integrated_phy_analog_init(phydev); +} + +static void rockchip_link_change_notify(struct phy_device *phydev) +{ + int speed = SPEED_10; + + if (phydev->autoneg == AUTONEG_ENABLE) { + int reg = phy_read(phydev, MII_SPECIAL_CONTROL_STATUS); + + if (reg < 0) { + phydev_err(phydev, "phy_read err: %d.\n", reg); + return; + } + + if (reg & MII_SPEED_100) + speed = SPEED_100; + else if (reg & MII_SPEED_10) + speed = SPEED_10; + } else { + int bmcr = phy_read(phydev, MII_BMCR); + + if (bmcr < 0) { + phydev_err(phydev, "phy_read err: %d.\n", bmcr); + return; + } + + if (bmcr & BMCR_SPEED100) + speed = SPEED_100; + else + speed = SPEED_10; + } + + /* + * If mode switch happens from 10BT to 100BT, all DSP/AFE + * registers are set to default values. So any AFE/DSP + * registers have to be re-initialized in this case. + */ + if ((phydev->speed == SPEED_10) && (speed == SPEED_100)) { + int ret = rockchip_integrated_phy_analog_init(phydev); + if (ret) + phydev_err(phydev, "rockchip_integrated_phy_analog_init err: %d.\n", + ret); + } +} + +static int rockchip_set_polarity(struct phy_device *phydev, int polarity) +{ + int reg, err, val; + + /* get the current settings */ + reg = phy_read(phydev, MII_INTERNAL_CTRL_STATUS); + if (reg < 0) + return reg; + + reg &= ~MII_AUTO_MDIX_EN; + val = reg; + switch (polarity) { + case ETH_TP_MDI: + val &= ~MII_MDIX_EN; + break; + case ETH_TP_MDI_X: + val |= MII_MDIX_EN; + break; + case ETH_TP_MDI_AUTO: + case ETH_TP_MDI_INVALID: + default: + return 0; + } + + if (val != reg) { + /* Set the new polarity value in the register */ + err = phy_write(phydev, MII_INTERNAL_CTRL_STATUS, val); + if (err) + return err; + } + + return 0; +} + +static int rockchip_config_aneg(struct phy_device *phydev) +{ + int err; + + err = rockchip_set_polarity(phydev, phydev->mdix); + if (err < 0) + return err; + + return genphy_config_aneg(phydev); +} + +static int rockchip_phy_resume(struct phy_device *phydev) +{ + genphy_resume(phydev); + + return rockchip_integrated_phy_config_init(phydev); +} + +static struct phy_driver rockchip_phy_driver[] = { +{ + .phy_id = INTERNAL_EPHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "Rockchip integrated EPHY", + .features = PHY_BASIC_FEATURES, + .flags = 0, + .link_change_notify = rockchip_link_change_notify, + .soft_reset = genphy_soft_reset, + .config_init = rockchip_integrated_phy_config_init, + .config_aneg = rockchip_config_aneg, + .read_status = genphy_read_status, + .suspend = genphy_suspend, + .resume = rockchip_phy_resume, +}, +}; + +module_phy_driver(rockchip_phy_driver); + +static struct mdio_device_id __maybe_unused rockchip_phy_tbl[] = { + { INTERNAL_EPHY_ID, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, rockchip_phy_tbl); + +MODULE_AUTHOR("David Wu "); +MODULE_DESCRIPTION("Rockchip Ethernet PHY driver"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-55-g7522 From 0d33f82efc8a381887b0d4b6d165f1805e7af536 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 21:56:41 +0800 Subject: multi_v7_defconfig: Make rockchip PHY built-in Enable the rockchip PHY driver for multi_v7_defconfig builds. Signed-off-by: David Wu Signed-off-by: David S. Miller --- arch/arm/configs/multi_v7_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 4d19c1b4b8e7..94d7e71c69c4 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -270,6 +270,7 @@ CONFIG_ICPLUS_PHY=y CONFIG_REALTEK_PHY=y CONFIG_MICREL_PHY=y CONFIG_FIXED_PHY=y +CONFIG_ROCKCHIP_PHY=y CONFIG_USB_PEGASUS=y CONFIG_USB_RTL8152=m CONFIG_USB_USBNET=y -- cgit v1.2.3-55-g7522 From 6b49668e17250879119fc400b68b667c703e1775 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 21:56:42 +0800 Subject: arm64: defconfig: Enable CONFIG_ROCKCHIP_PHY Make the rockchip PHY driver built into the kernel. Signed-off-by: David Wu Signed-off-by: David S. Miller --- arch/arm64/configs/defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index b4ca115b3be1..cdde4f56a281 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -203,6 +203,7 @@ CONFIG_MARVELL_PHY=m CONFIG_MESON_GXL_PHY=m CONFIG_MICREL_PHY=y CONFIG_REALTEK_PHY=m +CONFIG_ROCKCHIP_PHY=y CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m -- cgit v1.2.3-55-g7522 From e7aa4c07462eb5dc165d7dd625c982006c27bfcc Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 21:56:43 +0800 Subject: net: stmmac: dwmac-rk: Remove unwanted code for rk3328_set_to_rmii() This is wrong setting for rk3328_set_to_rmii(), so remove it. Signed-off-by: David Wu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f0df5193f047..a8e8fd579d07 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -365,9 +365,6 @@ static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, RK3328_GMAC_PHY_INTF_SEL_RMII | RK3328_GMAC_RMII_MODE); - - /* set MAC to RMII mode */ - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, GRF_BIT(11)); } static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) -- cgit v1.2.3-55-g7522 From 2398506b4ec121bc86d97e13f9f9c76085baf2ad Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 21:59:18 +0800 Subject: Documentation: net: phy: Add phy-is-integrated binding Add the documentation for integrated PHY. A boolean property indicates the PHY is integrated into the same physical package as the Ethernet MAC. If needed, muxers should be configured to ensure the integrated PHY is used. The absence of this property indicates the muxers should be configured so that the external PHY is used. Signed-off-by: David Wu Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/phy.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index b55857696fc3..d3c24d5ffa9a 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -52,6 +52,11 @@ Optional Properties: Mark the corresponding energy efficient ethernet mode as broken and request the ethernet to stop advertising it. +- phy-is-integrated: If set, indicates that the PHY is integrated into the same + physical package as the Ethernet MAC. If needed, muxers should be configured + to ensure the integrated PHY is used. The absence of this property indicates + the muxers should be configured so that the external PHY is used. + Example: ethernet-phy@0 { -- cgit v1.2.3-55-g7522 From fecd4d7eef8b219a7e7ab3524619b7ed76d208b4 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 22:00:02 +0800 Subject: net: stmmac: dwmac-rk: Add integrated PHY support To make integrated PHY work, need to configure the PHY clock, PHY cru reset and related registers. Signed-off-by: David Wu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 99 ++++++++++++++++++++++++-- 1 file changed, 95 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index a8e8fd579d07..90199175cf2c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -41,6 +41,7 @@ struct rk_gmac_ops { void (*set_to_rmii)(struct rk_priv_data *bsp_priv); void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed); void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed); + void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv); }; struct rk_priv_data { @@ -52,6 +53,7 @@ struct rk_priv_data { bool clk_enabled; bool clock_input; + bool integrated_phy; struct clk *clk_mac; struct clk *gmac_clkin; @@ -61,6 +63,9 @@ struct rk_priv_data { struct clk *clk_mac_refout; struct clk *aclk_mac; struct clk *pclk_mac; + struct clk *clk_phy; + + struct reset_control *phy_reset; int tx_delay; int rx_delay; @@ -750,9 +755,55 @@ static const struct rk_gmac_ops rk3399_ops = { .set_rmii_speed = rk3399_set_rmii_speed, }; -static int gmac_clk_init(struct rk_priv_data *bsp_priv) +#define RK_GRF_MACPHY_CON0 0xb00 +#define RK_GRF_MACPHY_CON1 0xb04 +#define RK_GRF_MACPHY_CON2 0xb08 +#define RK_GRF_MACPHY_CON3 0xb0c + +#define RK_MACPHY_ENABLE GRF_BIT(0) +#define RK_MACPHY_DISABLE GRF_CLR_BIT(0) +#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14) +#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7)) +#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0) +#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0) + +static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv) +{ + if (priv->ops->integrated_phy_powerup) + priv->ops->integrated_phy_powerup(priv); + + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M); + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE); + + regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID); + regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID); + + if (priv->phy_reset) { + /* PHY needs to be disabled before trying to reset it */ + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); + if (priv->phy_reset) + reset_control_assert(priv->phy_reset); + usleep_range(10, 20); + if (priv->phy_reset) + reset_control_deassert(priv->phy_reset); + usleep_range(10, 20); + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE); + msleep(30); + } +} + +static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv) { + regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE); + if (priv->phy_reset) + reset_control_assert(priv->phy_reset); +} + +static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat) +{ + struct rk_priv_data *bsp_priv = plat->bsp_priv; struct device *dev = &bsp_priv->pdev->dev; + int ret; bsp_priv->clk_enabled = false; @@ -803,6 +854,16 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv) clk_set_rate(bsp_priv->clk_mac, 50000000); } + if (plat->phy_node && bsp_priv->integrated_phy) { + bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0); + if (IS_ERR(bsp_priv->clk_phy)) { + ret = PTR_ERR(bsp_priv->clk_phy); + dev_err(dev, "Cannot get PHY clock: %d\n", ret); + return -EINVAL; + } + clk_set_rate(bsp_priv->clk_phy, 50000000); + } + return 0; } @@ -826,6 +887,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) bsp_priv->clk_mac_refout); } + if (!IS_ERR(bsp_priv->clk_phy)) + clk_prepare_enable(bsp_priv->clk_phy); + if (!IS_ERR(bsp_priv->aclk_mac)) clk_prepare_enable(bsp_priv->aclk_mac); @@ -858,6 +922,9 @@ static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable) bsp_priv->clk_mac_refout); } + if (!IS_ERR(bsp_priv->clk_phy)) + clk_disable_unprepare(bsp_priv->clk_phy); + if (!IS_ERR(bsp_priv->aclk_mac)) clk_disable_unprepare(bsp_priv->aclk_mac); @@ -902,6 +969,7 @@ static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable) } static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, + struct plat_stmmacenet_data *plat, const struct rk_gmac_ops *ops) { struct rk_priv_data *bsp_priv; @@ -964,9 +1032,22 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev, bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - bsp_priv->pdev = pdev; - gmac_clk_init(bsp_priv); + if (plat->phy_node) { + bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node, + "phy-is-integrated"); + if (bsp_priv->integrated_phy) { + bsp_priv->phy_reset = of_reset_control_get(plat->phy_node, NULL); + if (IS_ERR(bsp_priv->phy_reset)) { + dev_err(&pdev->dev, "No PHY reset control found.\n"); + bsp_priv->phy_reset = NULL; + } + } + } + dev_info(dev, "integrated PHY? (%s).\n", + bsp_priv->integrated_phy ? "yes" : "no"); + + bsp_priv->pdev = pdev; return bsp_priv; } @@ -1014,6 +1095,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) pm_runtime_enable(dev); pm_runtime_get_sync(dev); + if (bsp_priv->integrated_phy) + rk_gmac_integrated_phy_powerup(bsp_priv); + return 0; } @@ -1021,6 +1105,9 @@ static void rk_gmac_powerdown(struct rk_priv_data *gmac) { struct device *dev = &gmac->pdev->dev; + if (gmac->integrated_phy) + rk_gmac_integrated_phy_powerdown(gmac); + pm_runtime_put_sync(dev); pm_runtime_disable(dev); @@ -1072,12 +1159,16 @@ static int rk_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->fix_mac_speed = rk_fix_speed; - plat_dat->bsp_priv = rk_gmac_setup(pdev, data); + plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data); if (IS_ERR(plat_dat->bsp_priv)) { ret = PTR_ERR(plat_dat->bsp_priv); goto err_remove_config_dt; } + ret = rk_gmac_clk_init(plat_dat); + if (ret) + return ret; + ret = rk_gmac_powerup(plat_dat->bsp_priv); if (ret) goto err_remove_config_dt; -- cgit v1.2.3-55-g7522 From 6fa12c787cde8ebe07e5c9641ea4ebc490892ac9 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 22:00:25 +0800 Subject: net: stmmac: dwmac-rk: Add integrated PHY support for rk3228 There is only one mac controller in rk3228, which could connect to external PHY or integrated PHY, use the grf_com_mux bit15 to route external/integrated PHY. Signed-off-by: David Wu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 90199175cf2c..b6db3ff38606 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -86,6 +86,8 @@ struct rk_priv_data { #define RK3228_GRF_MAC_CON0 0x0900 #define RK3228_GRF_MAC_CON1 0x0904 +#define RK3228_GRF_CON_MUX 0x50 + /* RK3228_GRF_MAC_CON0 */ #define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) #define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) @@ -111,6 +113,9 @@ struct rk_priv_data { #define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) #define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1) +/* RK3228_GRF_COM_MUX */ +#define RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY GRF_BIT(15) + static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { @@ -191,11 +196,18 @@ static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) dev_err(dev, "unknown speed value for RMII! speed=%d", speed); } +static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv) +{ + regmap_write(priv->grf, RK3228_GRF_CON_MUX, + RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY); +} + static const struct rk_gmac_ops rk3228_ops = { .set_to_rgmii = rk3228_set_to_rgmii, .set_to_rmii = rk3228_set_to_rmii, .set_rgmii_speed = rk3228_set_rgmii_speed, .set_rmii_speed = rk3228_set_rmii_speed, + .integrated_phy_powerup = rk3228_integrated_phy_powerup, }; #define RK3288_GRF_SOC_CON1 0x0248 -- cgit v1.2.3-55-g7522 From 8bdf63bdd3200539af5ecd0c41284b5aa00ce532 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 22:00:52 +0800 Subject: net: stmmac: dwmac-rk: Add integrated PHY supprot for rk3328 There are two mac controllers in the rk3328, the one connects to external PHY, and the other one connects to integrated PHY. Like the mac of external PHY, the integrated PHY's mac also needs to configure the related mac registers at GRF. Signed-off-by: David Wu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index b6db3ff38606..2176403c72d8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -323,6 +323,8 @@ static const struct rk_gmac_ops rk3288_ops = { #define RK3328_GRF_MAC_CON0 0x0900 #define RK3328_GRF_MAC_CON1 0x0904 +#define RK3328_GRF_MAC_CON2 0x0908 +#define RK3328_GRF_MACPHY_CON1 0xb04 /* RK3328_GRF_MAC_CON0 */ #define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) @@ -349,6 +351,9 @@ static const struct rk_gmac_ops rk3288_ops = { #define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1) #define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0) +/* RK3328_GRF_MACPHY_CON1 */ +#define RK3328_MACPHY_RMII_MODE GRF_BIT(9) + static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, int tx_delay, int rx_delay) { @@ -373,13 +378,17 @@ static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv, static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv) { struct device *dev = &bsp_priv->pdev->dev; + unsigned int reg; if (IS_ERR(bsp_priv->grf)) { dev_err(dev, "Missing rockchip,grf property\n"); return; } - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 : + RK3328_GRF_MAC_CON1; + + regmap_write(bsp_priv->grf, reg, RK3328_GMAC_PHY_INTF_SEL_RMII | RK3328_GMAC_RMII_MODE); } @@ -409,29 +418,40 @@ static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) { struct device *dev = &bsp_priv->pdev->dev; + unsigned int reg; if (IS_ERR(bsp_priv->grf)) { dev_err(dev, "Missing rockchip,grf property\n"); return; } + reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 : + RK3328_GRF_MAC_CON1; + if (speed == 10) - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + regmap_write(bsp_priv->grf, reg, RK3328_GMAC_RMII_CLK_2_5M | RK3328_GMAC_SPEED_10M); else if (speed == 100) - regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1, + regmap_write(bsp_priv->grf, reg, RK3328_GMAC_RMII_CLK_25M | RK3328_GMAC_SPEED_100M); else dev_err(dev, "unknown speed value for RMII! speed=%d", speed); } +static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv) +{ + regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1, + RK3328_MACPHY_RMII_MODE); +} + static const struct rk_gmac_ops rk3328_ops = { .set_to_rgmii = rk3328_set_to_rgmii, .set_to_rmii = rk3328_set_to_rmii, .set_rgmii_speed = rk3328_set_rgmii_speed, .set_rmii_speed = rk3328_set_rmii_speed, + .integrated_phy_powerup = rk3328_integrated_phy_powerup, }; #define RK3366_GRF_SOC_CON6 0x0418 -- cgit v1.2.3-55-g7522 From db40f15b53e430d697efa6229b1bced30894d0d3 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 22:01:08 +0800 Subject: ARM: dts: rk3228-evb: Enable the integrated PHY for gmac This patch enables the integrated PHY for rk3228 evb board by default. To use the external 1000M PHY on evb board, need to make some switch of evb board to be on. Signed-off-by: David Wu Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- arch/arm/boot/dts/rk3228-evb.dts | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts index 58834330a5ba..456ddf7f11bb 100644 --- a/arch/arm/boot/dts/rk3228-evb.dts +++ b/arch/arm/boot/dts/rk3228-evb.dts @@ -50,6 +50,16 @@ device_type = "memory"; reg = <0x60000000 0x40000000>; }; + + vcc_phy: vcc-phy-regulator { + compatible = "regulator-fixed"; + enable-active-high; + regulator-name = "vcc_phy"; + regulator-min-microvolt = <1800000>; + regulator-max-microvolt = <1800000>; + regulator-always-on; + regulator-boot-on; + }; }; &emmc { @@ -60,6 +70,30 @@ status = "okay"; }; +&gmac { + assigned-clocks = <&cru SCLK_MAC_SRC>; + assigned-clock-rates = <50000000>; + clock_in_out = "output"; + phy-supply = <&vcc_phy>; + phy-mode = "rmii"; + phy-handle = <&phy0>; + status = "okay"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy@0 { + compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; + reg = <0>; + clocks = <&cru SCLK_MAC_PHY>; + resets = <&cru SRST_MACPHY>; + phy-is-integrated; + }; + }; +}; + &tsadc { status = "okay"; -- cgit v1.2.3-55-g7522 From 9c4cc910fe28939e485e78c45d9d90931e652759 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 22:01:41 +0800 Subject: ARM64: dts: rockchip: Add gmac2phy node support for rk3328 The gmac2phy controller of rk3328 is connected to integrated PHY directly inside, add the node for the integrated PHY support. Signed-off-by: David Wu Signed-off-by: David S. Miller --- arch/arm64/boot/dts/rockchip/rk3328.dtsi | 39 ++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 0be96cee27bd..d48bf5d9f8bd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -63,6 +63,8 @@ i2c1 = &i2c1; i2c2 = &i2c2; i2c3 = &i2c3; + ethernet0 = &gmac2io; + ethernet1 = &gmac2phy; }; cpus { @@ -424,6 +426,43 @@ status = "disabled"; }; + gmac2phy: ethernet@ff550000 { + compatible = "rockchip,rk3328-gmac"; + reg = <0x0 0xff550000 0x0 0x10000>; + rockchip,grf = <&grf>; + interrupts = ; + interrupt-names = "macirq"; + clocks = <&cru SCLK_MAC2PHY_SRC>, <&cru SCLK_MAC2PHY_RXTX>, + <&cru SCLK_MAC2PHY_RXTX>, <&cru SCLK_MAC2PHY_REF>, + <&cru ACLK_MAC2PHY>, <&cru PCLK_MAC2PHY>, + <&cru SCLK_MAC2PHY_OUT>; + clock-names = "stmmaceth", "mac_clk_rx", + "mac_clk_tx", "clk_mac_ref", + "aclk_mac", "pclk_mac", + "clk_macphy"; + resets = <&cru SRST_GMAC2PHY_A>, <&cru SRST_MACPHY>; + reset-names = "stmmaceth", "mac-phy"; + phy-mode = "rmii"; + phy-handle = <&phy>; + status = "disabled"; + + mdio { + compatible = "snps,dwmac-mdio"; + #address-cells = <1>; + #size-cells = <0>; + + phy: phy@0 { + compatible = "ethernet-phy-id1234.d400", "ethernet-phy-ieee802.3-c22"; + reg = <0>; + clocks = <&cru SCLK_MAC2PHY_OUT>; + resets = <&cru SRST_MACPHY>; + pinctrl-names = "default"; + pinctrl-0 = <&fephyled_rxm1 &fephyled_linkm1>; + phy-is-integrated; + }; + }; + }; + gic: interrupt-controller@ff811000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; -- cgit v1.2.3-55-g7522 From 4b05bc6157eb94d01abe493c72f5a3a5014718c8 Mon Sep 17 00:00:00 2001 From: David Wu Date: Thu, 10 Aug 2017 22:02:01 +0800 Subject: ARM64: dts: rockchip: Enable gmac2phy for rk3328-evb Enable the gmac2phy, make the gmac2phy work on the rk3328-evb board. Signed-off-by: David Wu Signed-off-by: David S. Miller --- arch/arm64/boot/dts/rockchip/rk3328-evb.dts | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts index cf272392cebf..b9f36dad17e6 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts @@ -50,6 +50,23 @@ chosen { stdout-path = "serial2:1500000n8"; }; + + vcc_phy: vcc-phy-regulator { + compatible = "regulator-fixed"; + regulator-name = "vcc_phy"; + regulator-always-on; + regulator-boot-on; + }; +}; + +&gmac2phy { + phy-supply = <&vcc_phy>; + clock_in_out = "output"; + assigned-clocks = <&cru SCLK_MAC2PHY_SRC>; + assigned-clock-rate = <50000000>; + assigned-clocks = <&cru SCLK_MAC2PHY>; + assigned-clock-parents = <&cru SCLK_MAC2PHY_SRC>; + status = "okay"; }; &uart2 { -- cgit v1.2.3-55-g7522 From 9537e7cbf20787416b25833f2f9886166d8148f2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 10 Aug 2017 12:54:59 +0300 Subject: hns3pf: fix hns3_del_tunnel_port() This function has a copy and paste bug so it accidentally calls the add function instead of the delete function. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 069ae426aa24..e519795dff7c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1138,7 +1138,7 @@ static void hns3_del_tunnel_port(struct net_device *netdev, u16 port, udp_tnl->dst_port = 0; /* TBD send command to hardware to del port */ if (h->ae_algo->ops->del_tunnel_udp) - h->ae_algo->ops->add_tunnel_udp(h, port); + h->ae_algo->ops->del_tunnel_udp(h, port); } /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports -- cgit v1.2.3-55-g7522 From 720a8478b2689bd221e4a8744e1695de7966a22f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 10 Aug 2017 12:56:14 +0300 Subject: hns3pf: Fix some harmless copy and paste bugs These were copy and paste bugs, but I believe they are harmless. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index e519795dff7c..9589b7e1d24c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -330,7 +330,7 @@ static int hns3_nic_mc_sync(struct net_device *netdev, struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - if (h->ae_algo->ops->add_uc_addr) + if (h->ae_algo->ops->add_mc_addr) return h->ae_algo->ops->add_mc_addr(h, addr); return 0; @@ -342,7 +342,7 @@ static int hns3_nic_mc_unsync(struct net_device *netdev, struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - if (h->ae_algo->ops->rm_uc_addr) + if (h->ae_algo->ops->rm_mc_addr) return h->ae_algo->ops->rm_mc_addr(h, addr); return 0; -- cgit v1.2.3-55-g7522 From e37425c23afd34fb73d329d2ca7b31ae1e6f2be5 Mon Sep 17 00:00:00 2001 From: Florinel Iordache Date: Thu, 10 Aug 2017 16:47:04 +0300 Subject: fsl/fman: implement several errata workarounds Implemented workarounds for the following dTSEC Erratum: A002, A004, A0012, A0014, A004839 on several operations that involve MAC CFG register changes: adjust link, rx pause frames, modify MAC address. Signed-off-by: Florinel Iordache Acked-by: Madalin Bucur Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_dtsec.c | 118 ++++++++++++++++++----- 1 file changed, 93 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 98bba10fc38c..ea43b4974149 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -123,7 +123,7 @@ #define DTSEC_ECNTRL_R100M 0x00000008 #define DTSEC_ECNTRL_QSGMIIM 0x00000001 -#define DTSEC_TCTRL_GTS 0x00000020 +#define TCTRL_GTS 0x00000020 #define RCTRL_PAL_MASK 0x001f0000 #define RCTRL_PAL_SHIFT 16 @@ -863,6 +863,52 @@ int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val) return 0; } +static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + + if (mode & COMM_MODE_TX) + iowrite32be(ioread32be(®s->tctrl) & + ~TCTRL_GTS, ®s->tctrl); + if (mode & COMM_MODE_RX) + iowrite32be(ioread32be(®s->rctrl) & + ~RCTRL_GRS, ®s->rctrl); +} + +static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode) +{ + struct dtsec_regs __iomem *regs = dtsec->regs; + u32 tmp; + + /* Graceful stop - Assert the graceful Rx stop bit */ + if (mode & COMM_MODE_RX) { + tmp = ioread32be(®s->rctrl) | RCTRL_GRS; + iowrite32be(tmp, ®s->rctrl); + + if (dtsec->fm_rev_info.major == 2) { + /* Workaround for dTSEC Errata A002 */ + usleep_range(100, 200); + } else { + /* Workaround for dTSEC Errata A004839 */ + usleep_range(10, 50); + } + } + + /* Graceful stop - Assert the graceful Tx stop bit */ + if (mode & COMM_MODE_TX) { + if (dtsec->fm_rev_info.major == 2) { + /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */ + pr_debug("GTS not supported due to DTSEC_A004 Errata.\n"); + } else { + tmp = ioread32be(®s->tctrl) | TCTRL_GTS; + iowrite32be(tmp, ®s->tctrl); + + /* Workaround for dTSEC Errata A0012, A0014 */ + usleep_range(10, 50); + } + } +} + int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode) { struct dtsec_regs __iomem *regs = dtsec->regs; @@ -880,13 +926,8 @@ int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode) iowrite32be(tmp, ®s->maccfg1); - /* Graceful start - clear the graceful receive stop bit */ - if (mode & COMM_MODE_TX) - iowrite32be(ioread32be(®s->tctrl) & ~DTSEC_TCTRL_GTS, - ®s->tctrl); - if (mode & COMM_MODE_RX) - iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS, - ®s->rctrl); + /* Graceful start - clear the graceful Rx/Tx stop bit */ + graceful_start(dtsec, mode); return 0; } @@ -899,23 +940,8 @@ int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode) if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; - /* Gracefull stop - Assert the graceful transmit stop bit */ - if (mode & COMM_MODE_RX) { - tmp = ioread32be(®s->rctrl) | RCTRL_GRS; - iowrite32be(tmp, ®s->rctrl); - - if (dtsec->fm_rev_info.major == 2) - usleep_range(100, 200); - else - udelay(10); - } - - if (mode & COMM_MODE_TX) { - if (dtsec->fm_rev_info.major == 2) - pr_debug("GTS not supported due to DTSEC_A004 errata.\n"); - else - pr_debug("GTS not supported due to DTSEC_A0014 errata.\n"); - } + /* Graceful stop - Assert the graceful Rx/Tx stop bit */ + graceful_stop(dtsec, mode); tmp = ioread32be(®s->maccfg1); if (mode & COMM_MODE_RX) @@ -933,11 +959,19 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u16 pause_time, u16 __maybe_unused thresh_time) { struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; u32 ptv = 0; if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + if (pause_time) { /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) { @@ -958,17 +992,27 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW, ®s->maccfg1); + graceful_start(dtsec, mode); + return 0; } int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) { struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; u32 tmp; if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + tmp = ioread32be(®s->maccfg1); if (en) tmp |= MACCFG1_RX_FLOW; @@ -976,20 +1020,34 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en) tmp &= ~MACCFG1_RX_FLOW; iowrite32be(tmp, ®s->maccfg1); + graceful_start(dtsec, mode); + return 0; } int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr) { + struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; + if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + /* Initialize MAC Station Address registers (1 & 2) * Station address have to be swapped (big endian to little endian */ dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr); set_mac_address(dtsec->regs, (u8 *)(*enet_addr)); + graceful_start(dtsec, mode); + return 0; } @@ -1162,11 +1220,19 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val) int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) { struct dtsec_regs __iomem *regs = dtsec->regs; + enum comm_mode mode = COMM_MODE_NONE; u32 tmp; if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; + if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0) + mode |= COMM_MODE_RX; + if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0) + mode |= COMM_MODE_TX; + + graceful_stop(dtsec, mode); + tmp = ioread32be(®s->maccfg2); /* Full Duplex */ @@ -1186,6 +1252,8 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed) tmp &= ~DTSEC_ECNTRL_R100M; iowrite32be(tmp, ®s->ecntrl); + graceful_start(dtsec, mode); + return 0; } -- cgit v1.2.3-55-g7522 From 839da4d98960bcc68e6b7b945b33ad3916ec1e92 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 10 Aug 2017 13:49:10 -0700 Subject: net: ipv4: set orig_oif based on fib result for local traffic Attempts to connect to a local address with a socket bound to a device with the local address hangs if there is no listener: $ ip addr sh dev eth1 3: eth1: mtu 1500 qdisc mq state UP group default qlen 1000 link/ether 02:e0:f9:1c:00:37 brd ff:ff:ff:ff:ff:ff inet 10.100.1.4/24 scope global eth1 valid_lft forever preferred_lft forever inet6 2001:db8:1::4/120 scope global valid_lft forever preferred_lft forever inet6 fe80::e0:f9ff:fe1c:37/64 scope link valid_lft forever preferred_lft forever $ vrf-test -I eth1 -r 10.100.1.4 (don't let the command name fool you; vrf-test works without vrfs.) The problem is that the original intended device, eth1 in this case, is lost when the tcp reset is sent, so the socket lookup does not find a match for the reset and the connect attempt hangs. Fix by adjusting orig_oif for local traffic to the device from the fib lookup result. With this patch you get the more user friendly: $ vrf-test -I eth1 -r 10.100.1.4 connect failed: 111: Connection refused orig_oif is saved to the newly created rtable as rt_iif and when set it is used as the dif for socket lookups. It is set based on flowi4_oif passed in to ip_route_output_key_hash_rcu and will be set to either the loopback device, an l3mdev device, nothing (flowi4_oif = 0 which is the case in the example above) or a netdev index depending on the lookup path. In each case, resetting orig_oif to the device in the fib result for the RTN_LOCAL case allows the actual device to be preserved as the skb tx and rx is done over the loopback or VRF device. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/route.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2ef46294475f..b88836e6b4a1 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2439,6 +2439,12 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, /* L3 master device is the loopback for that domain */ dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? : net->loopback_dev; + + /* make sure orig_oif points to fib result device even + * though packet rx/tx happens over loopback or l3mdev + */ + orig_oif = FIB_RES_OIF(*res); + fl4->flowi4_oif = dev_out->ifindex; flags |= RTCF_LOCAL; goto make_route; -- cgit v1.2.3-55-g7522 From 6aa200058e4df8a60681ddd67d7932fdf2ce58ab Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Fri, 11 Aug 2017 01:55:20 +0300 Subject: wan: dscc4: add checks for dma mapping errors The driver does not check if mapping dma memory succeed. The patch adds the checks and failure handling. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: David S. Miller --- drivers/net/wan/dscc4.c | 53 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 16 deletions(-) diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 799830ffcae2..8480dbfc70f2 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -519,22 +519,30 @@ static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) { unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; + struct pci_dev *pdev = dpriv->pci_priv->pdev; struct RxFD *rx_fd = dpriv->rx_fd + dirty; const int len = RX_MAX(HDLC_MAX_MRU); struct sk_buff *skb; - int ret = 0; + dma_addr_t addr; skb = dev_alloc_skb(len); + if (!skb) + goto err_out; + + skb->protocol = hdlc_type_trans(skb, dev); + addr = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, addr)) + goto err_free_skb; + dpriv->rx_skbuff[dirty] = skb; - if (skb) { - skb->protocol = hdlc_type_trans(skb, dev); - rx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, - skb->data, len, PCI_DMA_FROMDEVICE)); - } else { - rx_fd->data = 0; - ret = -1; - } - return ret; + rx_fd->data = cpu_to_le32(addr); + return 0; + +err_free_skb: + dev_kfree_skb_any(skb); +err_out: + rx_fd->data = 0; + return -1; } /* @@ -1145,16 +1153,23 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct dscc4_pci_priv *ppriv = dpriv->pci_priv; + struct pci_dev *pdev = dpriv->pci_priv->pdev; struct TxFD *tx_fd; + dma_addr_t addr; int next; + addr = pci_map_single(pdev, skb->data, skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, addr)) { + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + next = dpriv->tx_current%TX_RING_SIZE; dpriv->tx_skbuff[next] = skb; tx_fd = dpriv->tx_fd + next; tx_fd->state = FrameEnd | TO_STATE_TX(skb->len); - tx_fd->data = cpu_to_le32(pci_map_single(ppriv->pdev, skb->data, skb->len, - PCI_DMA_TODEVICE)); + tx_fd->data = cpu_to_le32(addr); tx_fd->complete = 0x00000000; tx_fd->jiffies = jiffies; mb(); @@ -1887,16 +1902,22 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) skb = dev_alloc_skb(DUMMY_SKB_SIZE); if (skb) { + struct pci_dev *pdev = dpriv->pci_priv->pdev; int last = dpriv->tx_dirty%TX_RING_SIZE; struct TxFD *tx_fd = dpriv->tx_fd + last; + dma_addr_t addr; skb->len = DUMMY_SKB_SIZE; skb_copy_to_linear_data(skb, version, strlen(version) % DUMMY_SKB_SIZE); + addr = pci_map_single(pdev, skb->data, DUMMY_SKB_SIZE, + PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(pdev, addr)) { + dev_kfree_skb_any(skb); + return NULL; + } tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE); - tx_fd->data = cpu_to_le32(pci_map_single(dpriv->pci_priv->pdev, - skb->data, DUMMY_SKB_SIZE, - PCI_DMA_TODEVICE)); + tx_fd->data = cpu_to_le32(addr); dpriv->tx_skbuff[last] = skb; } return skb; -- cgit v1.2.3-55-g7522 From ba6ba68f597be06f9ad381c142c65ca0af32640d Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Fri, 11 Aug 2017 01:55:21 +0300 Subject: wan: dscc4: convert to plain DMA API Make use the dma_*() interfaces rather than the pci_*() interfaces. Signed-off-by: Alexey Khoroshilov Signed-off-by: David S. Miller --- drivers/net/wan/dscc4.c | 96 ++++++++++++++++++++++++++----------------------- 1 file changed, 51 insertions(+), 45 deletions(-) diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 8480dbfc70f2..a043fb1367bd 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c @@ -483,20 +483,20 @@ static void dscc4_tx_print(struct net_device *dev, static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) { - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct TxFD *tx_fd = dpriv->tx_fd; struct RxFD *rx_fd = dpriv->rx_fd; struct sk_buff **skbuff; int i; - pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); - pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); + dma_free_coherent(d, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma); + dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); skbuff = dpriv->tx_skbuff; for (i = 0; i < TX_RING_SIZE; i++) { if (*skbuff) { - pci_unmap_single(pdev, le32_to_cpu(tx_fd->data), - (*skbuff)->len, PCI_DMA_TODEVICE); + dma_unmap_single(d, le32_to_cpu(tx_fd->data), + (*skbuff)->len, DMA_TO_DEVICE); dev_kfree_skb(*skbuff); } skbuff++; @@ -506,8 +506,9 @@ static void dscc4_release_ring(struct dscc4_dev_priv *dpriv) skbuff = dpriv->rx_skbuff; for (i = 0; i < RX_RING_SIZE; i++) { if (*skbuff) { - pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); + dma_unmap_single(d, le32_to_cpu(rx_fd->data), + RX_MAX(HDLC_MAX_MRU), + DMA_FROM_DEVICE); dev_kfree_skb(*skbuff); } skbuff++; @@ -519,7 +520,7 @@ static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) { unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE; - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct RxFD *rx_fd = dpriv->rx_fd + dirty; const int len = RX_MAX(HDLC_MAX_MRU); struct sk_buff *skb; @@ -530,8 +531,8 @@ static inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, goto err_out; skb->protocol = hdlc_type_trans(skb, dev); - addr = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, addr)) + addr = dma_map_single(d, skb->data, len, DMA_FROM_DEVICE); + if (dma_mapping_error(d, addr)) goto err_free_skb; dpriv->rx_skbuff[dirty] = skb; @@ -654,7 +655,7 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev) { struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE; - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct sk_buff *skb; int pkt_len; @@ -664,8 +665,8 @@ static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv, goto refill; } pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2)); - pci_unmap_single(pdev, le32_to_cpu(rx_fd->data), - RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE); + dma_unmap_single(d, le32_to_cpu(rx_fd->data), + RX_MAX(HDLC_MAX_MRU), DMA_FROM_DEVICE); if ((skb->data[--pkt_len] & FrameOk) == FrameOk) { dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; @@ -782,8 +783,8 @@ static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc = -ENOMEM; - priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, - IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); + priv->iqcfg = (__le32 *)dma_alloc_coherent(&pdev->dev, + IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma, GFP_KERNEL); if (!priv->iqcfg) goto err_free_irq_5; writel(priv->iqcfg_dma, ioaddr + IQCFG); @@ -794,16 +795,18 @@ static int dscc4_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) */ for (i = 0; i < dev_per_card; i++) { dpriv = priv->root + i; - dpriv->iqtx = (__le32 *) pci_alloc_consistent(pdev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma); + dpriv->iqtx = (__le32 *)dma_alloc_coherent(&pdev->dev, + IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma, + GFP_KERNEL); if (!dpriv->iqtx) goto err_free_iqtx_6; writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4); } for (i = 0; i < dev_per_card; i++) { dpriv = priv->root + i; - dpriv->iqrx = (__le32 *) pci_alloc_consistent(pdev, - IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma); + dpriv->iqrx = (__le32 *)dma_alloc_coherent(&pdev->dev, + IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma, + GFP_KERNEL); if (!dpriv->iqrx) goto err_free_iqrx_7; writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4); @@ -827,18 +830,18 @@ out: err_free_iqrx_7: while (--i >= 0) { dpriv = priv->root + i; - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqrx, dpriv->iqrx_dma); } i = dev_per_card; err_free_iqtx_6: while (--i >= 0) { dpriv = priv->root + i; - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqtx, dpriv->iqtx_dma); } - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, - priv->iqcfg_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg, + priv->iqcfg_dma); err_free_irq_5: free_irq(pdev->irq, priv->root); err_release_4: @@ -1153,13 +1156,13 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct TxFD *tx_fd; dma_addr_t addr; int next; - addr = pci_map_single(pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, addr)) { + addr = dma_map_single(d, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(d, addr)) { dev_kfree_skb_any(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; @@ -1587,8 +1590,9 @@ try: tx_fd = dpriv->tx_fd + cur; skb = dpriv->tx_skbuff[cur]; if (skb) { - pci_unmap_single(ppriv->pdev, le32_to_cpu(tx_fd->data), - skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&ppriv->pdev->dev, + le32_to_cpu(tx_fd->data), + skb->len, DMA_TO_DEVICE); if (tx_fd->state & FrameEnd) { dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; @@ -1902,7 +1906,7 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) skb = dev_alloc_skb(DUMMY_SKB_SIZE); if (skb) { - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; int last = dpriv->tx_dirty%TX_RING_SIZE; struct TxFD *tx_fd = dpriv->tx_fd + last; dma_addr_t addr; @@ -1910,9 +1914,9 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) skb->len = DUMMY_SKB_SIZE; skb_copy_to_linear_data(skb, version, strlen(version) % DUMMY_SKB_SIZE); - addr = pci_map_single(pdev, skb->data, DUMMY_SKB_SIZE, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, addr)) { + addr = dma_map_single(d, skb->data, DUMMY_SKB_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(d, addr)) { dev_kfree_skb_any(skb); return NULL; } @@ -1926,18 +1930,20 @@ static struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv) static int dscc4_init_ring(struct net_device *dev) { struct dscc4_dev_priv *dpriv = dscc4_priv(dev); - struct pci_dev *pdev = dpriv->pci_priv->pdev; + struct device *d = &dpriv->pci_priv->pdev->dev; struct TxFD *tx_fd; struct RxFD *rx_fd; void *ring; int i; - ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma); + ring = dma_alloc_coherent(d, RX_TOTAL_SIZE, &dpriv->rx_fd_dma, + GFP_KERNEL); if (!ring) goto err_out; dpriv->rx_fd = rx_fd = (struct RxFD *) ring; - ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma); + ring = dma_alloc_coherent(d, TX_TOTAL_SIZE, &dpriv->tx_fd_dma, + GFP_KERNEL); if (!ring) goto err_free_dma_rx; dpriv->tx_fd = tx_fd = (struct TxFD *) ring; @@ -1975,9 +1981,9 @@ static int dscc4_init_ring(struct net_device *dev) return 0; err_free_dma_tx: - pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); + dma_free_coherent(d, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma); err_free_dma_rx: - pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); + dma_free_coherent(d, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma); err_out: return -ENOMEM; } @@ -1997,16 +2003,16 @@ static void dscc4_remove_one(struct pci_dev *pdev) dscc4_pci_reset(pdev, ioaddr); free_irq(pdev->irq, root); - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, - ppriv->iqcfg_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg, + ppriv->iqcfg_dma); for (i = 0; i < dev_per_card; i++) { struct dscc4_dev_priv *dpriv = root + i; dscc4_release_ring(dpriv); - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqrx, dpriv->iqrx_dma); - pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), - dpriv->iqtx, dpriv->iqtx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqrx, dpriv->iqrx_dma); + dma_free_coherent(&pdev->dev, IRQ_RING_SIZE*sizeof(u32), + dpriv->iqtx, dpriv->iqtx_dma); } dscc4_free1(pdev); -- cgit v1.2.3-55-g7522 From 939912216fa8f62331de7d04edff492d5dc8e6e9 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Thu, 10 Aug 2017 20:16:29 -0700 Subject: net: skb_needs_check() removes CHECKSUM_UNNECESSARY check for tx. Because we remove the UFO support, we will also remove the CHECKSUM_UNNECESSARY check in skb_needs_check(). Cc: Willem de Bruijn Signed-off-by: Tonghao Zhang Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/core/dev.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 3f69f6e71824..1024d3741d12 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2731,8 +2731,7 @@ EXPORT_SYMBOL(skb_mac_gso_segment); static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) - return skb->ip_summed != CHECKSUM_PARTIAL && - skb->ip_summed != CHECKSUM_UNNECESSARY; + return skb->ip_summed != CHECKSUM_PARTIAL; return skb->ip_summed == CHECKSUM_NONE; } -- cgit v1.2.3-55-g7522 From 12acb133ece5a0b25b0984d31d870619966f63fe Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Fri, 11 Aug 2017 10:25:44 +0200 Subject: nfp: send control message when MAC representors are created The firmware expects a MAC_REPR control message when a MAC representor is created. The driver should expect a PORTMOD message to follow which will provide the link states of the physical port associated with the MAC representor. Signed-off-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 33 ++++++++++++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 21 +++++++++++++++ drivers/net/ethernet/netronome/nfp/flower/main.c | 29 ++++++++++++++++++--- 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index dd7fa9cf225f..a69d68ba3d0c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -75,6 +75,39 @@ nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, return skb; } +struct sk_buff * +nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports) +{ + struct nfp_flower_cmsg_mac_repr *msg; + struct sk_buff *skb; + unsigned int size; + + size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]); + skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR); + if (!skb) + return NULL; + + msg = nfp_flower_cmsg_get_data(skb); + memset(msg->reserved, 0, sizeof(msg->reserved)); + msg->num_ports = num_ports; + + return skb; +} + +void +nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, + unsigned int nbi, unsigned int nbi_port, + unsigned int phys_port) +{ + struct nfp_flower_cmsg_mac_repr *msg; + + msg = nfp_flower_cmsg_get_data(skb); + msg->ports[idx].idx = idx; + msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI; + msg->ports[idx].nbi_port = nbi_port; + msg->ports[idx].phys_port = phys_port; +} + int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok) { struct nfp_flower_cmsg_portmod *msg; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index cf738de170ab..aa92a8711a02 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -247,12 +247,27 @@ struct nfp_flower_cmsg_hdr { enum nfp_flower_cmsg_type_port { NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0, NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2, + NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7, NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8, NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15, NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16, NFP_FLOWER_CMSG_TYPE_MAX = 32, }; +/* NFP_FLOWER_CMSG_TYPE_MAC_REPR */ +struct nfp_flower_cmsg_mac_repr { + u8 reserved[3]; + u8 num_ports; + struct { + u8 idx; + u8 info; + u8 nbi_port; + u8 phys_port; + } ports[0]; +}; + +#define NFP_FLOWER_CMSG_MAC_REPR_NBI GENMASK(1, 0) + /* NFP_FLOWER_CMSG_TYPE_PORT_MOD */ struct nfp_flower_cmsg_portmod { __be32 portnum; @@ -308,6 +323,12 @@ static inline void *nfp_flower_cmsg_get_data(struct sk_buff *skb) return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN; } +struct sk_buff * +nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports); +void +nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, + unsigned int nbi, unsigned int nbi_port, + unsigned int phys_port); int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); struct sk_buff * diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 6a65c8b33807..b905454b30ca 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -214,15 +214,22 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) { struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; struct nfp_reprs *reprs, *old_reprs; + struct sk_buff *ctrl_skb; unsigned int i; int err; - reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); - if (!reprs) + ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); + if (!ctrl_skb) return -ENOMEM; + reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); + if (!reprs) { + err = -ENOMEM; + goto err_free_ctrl_skb; + } + for (i = 0; i < eth_tbl->count; i++) { - int phys_port = eth_tbl->ports[i].index; + unsigned int phys_port = eth_tbl->ports[i].index; struct nfp_port *port; u32 cmsg_port_id; @@ -255,6 +262,11 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) goto err_reprs_clean; } + nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, + eth_tbl->ports[i].nbi, + eth_tbl->ports[i].base, + phys_port); + nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", phys_port, reprs->reprs[phys_port]->name); } @@ -265,9 +277,20 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) goto err_reprs_clean; } + /* The MAC_REPR control message should be sent after the MAC + * representors are registered using nfp_app_reprs_set(). This is + * because the firmware may respond with control messages for the + * MAC representors, f.e. to provide the driver with information + * about their state, and without registration the driver will drop + * any such messages. + */ + nfp_ctrl_tx(app->ctrl, ctrl_skb); + return 0; err_reprs_clean: nfp_reprs_clean_and_free(reprs); +err_free_ctrl_skb: + kfree_skb(ctrl_skb); return err; } -- cgit v1.2.3-55-g7522 From 0ed80da518a1f27562a013f106505e495e891fe4 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Fri, 11 Aug 2017 04:26:26 -0700 Subject: openvswitch: Remove unnecessary newlines from OVS_NLERR uses OVS_NLERR already adds a newline so these just add blank lines to the logging. Signed-off-by: Joe Perches Acked-by: Joe Stringer Signed-off-by: David S. Miller --- net/openvswitch/conntrack.c | 14 +++++--------- net/openvswitch/flow_netlink.c | 2 +- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 03859e386b47..30d632509f82 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1180,15 +1180,13 @@ static int parse_nat(const struct nlattr *attr, int type = nla_type(a); if (type > OVS_NAT_ATTR_MAX) { - OVS_NLERR(log, - "Unknown NAT attribute (type=%d, max=%d).\n", + OVS_NLERR(log, "Unknown NAT attribute (type=%d, max=%d)", type, OVS_NAT_ATTR_MAX); return -EINVAL; } if (nla_len(a) != ovs_nat_attr_lens[type][ip_vers]) { - OVS_NLERR(log, - "NAT attribute type %d has unexpected length (%d != %d).\n", + OVS_NLERR(log, "NAT attribute type %d has unexpected length (%d != %d)", type, nla_len(a), ovs_nat_attr_lens[type][ip_vers]); return -EINVAL; @@ -1198,9 +1196,7 @@ static int parse_nat(const struct nlattr *attr, case OVS_NAT_ATTR_SRC: case OVS_NAT_ATTR_DST: if (info->nat) { - OVS_NLERR(log, - "Only one type of NAT may be specified.\n" - ); + OVS_NLERR(log, "Only one type of NAT may be specified"); return -ERANGE; } info->nat |= OVS_CT_NAT; @@ -1245,13 +1241,13 @@ static int parse_nat(const struct nlattr *attr, break; default: - OVS_NLERR(log, "Unknown nat attribute (%d).\n", type); + OVS_NLERR(log, "Unknown nat attribute (%d)", type); return -EINVAL; } } if (rem > 0) { - OVS_NLERR(log, "NAT attribute has %d unknown bytes.\n", rem); + OVS_NLERR(log, "NAT attribute has %d unknown bytes", rem); return -EINVAL; } if (!info->nat) { diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index f07d10ac35d8..e8eb427ce6d1 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1255,7 +1255,7 @@ static int ovs_key_from_nlattrs(struct net *net, struct sw_flow_match *match, } if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) { - OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x).\n", + OVS_NLERR(log, "IPv6 flow label %x is out of range (max=%x)", ntohl(ipv6_key->ipv6_label), (1 << 20) - 1); return -EINVAL; } -- cgit v1.2.3-55-g7522 From 159fe88efda2f5b84681727a4291511338132c1f Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Fri, 11 Aug 2017 19:10:42 +0530 Subject: mlxsw: make mlxsw_config_profile const Make these structures const as they only stored in the profile field of a mlxsw_driver structure, which is of type const. Done using Coccinelle. Signed-off-by: Bhumika Goyal Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/switchib.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a99600333a49..90a95cdc1626 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -3757,7 +3757,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_fids_fini(mlxsw_sp); } -static struct mlxsw_config_profile mlxsw_sp_config_profile = { +static const struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_mid = 1, diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c index 74341fe0eb25..ab7a29846bfa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c @@ -497,7 +497,7 @@ static void mlxsw_sib_fini(struct mlxsw_core *mlxsw_core) mlxsw_sib_ports_remove(mlxsw_sib); } -static struct mlxsw_config_profile mlxsw_sib_config_profile = { +static const struct mlxsw_config_profile mlxsw_sib_config_profile = { .used_max_system_port = 1, .max_system_port = 48000, .used_max_ib_mc = 1, diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index 3b0f72455681..f3c29bbf07e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -1674,7 +1674,7 @@ static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core) mlxsw_sx_ports_remove(mlxsw_sx); } -static struct mlxsw_config_profile mlxsw_sx_config_profile = { +static const struct mlxsw_config_profile mlxsw_sx_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_mid = 1, -- cgit v1.2.3-55-g7522 From 047dbb27ff470782c4bd3d574942e571e747463f Mon Sep 17 00:00:00 2001 From: William Tu Date: Fri, 11 Aug 2017 06:46:39 -0700 Subject: selftests: bpf: add check for ip XDP redirect Kernel test robot reports error when running test_xdp_redirect.sh. Check if ip tool supports xdpgeneric, if not, skip the test. Signed-off-by: William Tu Acked-by: Daniel Borkmann Cc: John Fastabend Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_xdp_redirect.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/bpf/test_xdp_redirect.sh b/tools/testing/selftests/bpf/test_xdp_redirect.sh index d8c73ed6e040..344a3656dea6 100755 --- a/tools/testing/selftests/bpf/test_xdp_redirect.sh +++ b/tools/testing/selftests/bpf/test_xdp_redirect.sh @@ -23,6 +23,11 @@ cleanup() ip netns del ns2 2> /dev/null } +ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null +if [ $? -ne 0 ];then + echo "selftests: [SKIP] Could not run test without the ip xdpgeneric support" + exit 0 +fi set -e ip netns add ns1 -- cgit v1.2.3-55-g7522 From aa69ff9e9c32db8aa84835baffea1b70c39e5112 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 11 Aug 2017 11:22:09 -0700 Subject: liquidio: moved ptp_enable to octeon_device structure ptp_enable was a global static variable. Moved this global variable to octeon_device structure and removed extra device id check. Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 19 ++++++++++--------- drivers/net/ethernet/cavium/liquidio/octeon_device.h | 1 + 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index cbd6287e578e..8bf6dfcf5881 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -78,8 +78,6 @@ int octeon_console_debug_enabled(u32 console) return (console_bitmask >> (console)) & 0x1; } -static int ptp_enable = 1; - /* Polling interval for determining when NIC application is alive */ #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 @@ -1363,6 +1361,13 @@ liquidio_probe(struct pci_dev *pdev, if (pdev->device == OCTEON_CN23XX_PF_VID) oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; + /* Enable PTP for 6XXX Device */ + if (((pdev->device == OCTEON_CN66XX) || + (pdev->device == OCTEON_CN68XX))) + oct_dev->ptp_enable = true; + else + oct_dev->ptp_enable = false; + dev_info(&pdev->dev, "Initializing device %x:%x.\n", (u32)pdev->vendor, (u32)pdev->device); @@ -2388,9 +2393,7 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; - if (((oct->chip_id == OCTEON_CN66XX) || - (oct->chip_id == OCTEON_CN68XX)) && - ptp_enable) { + if (oct->ptp_enable) { if (rh->r_dh.has_hwtstamp) { /* timestamp is included from the hardware at * the beginning of the packet. @@ -2735,8 +2738,7 @@ static int liquidio_open(struct net_device *netdev) oct->droq[0]->ops.poll_mode = 1; } - if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) && - ptp_enable) + if (oct->ptp_enable) oct_ptp_open(netdev); ifstate_set(lio, LIO_IFSTATE_RUNNING); @@ -3091,8 +3093,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) switch (cmd) { case SIOCSHWTSTAMP: - if ((lio->oct_dev->chip_id == OCTEON_CN66XX || - lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable) + if (lio->oct_dev->ptp_enable) return hwtstamp_ioctl(netdev, ifr); default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 31efdef02a24..b014e6ad0e9a 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -554,6 +554,7 @@ struct octeon_device { } loc; atomic_t *adapter_refcount; /* reference count of adapter */ + bool ptp_enable; }; #define OCT_DRV_ONLINE 1 -- cgit v1.2.3-55-g7522 From d0225784be6c9bdfb05149ebc30bf9fc1fdbce3a Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Wed, 9 Aug 2017 17:39:12 +0200 Subject: rtnelink: Move link dump consistency check out of the loop Calls to rtnl_dump_ifinfo() are protected by RTNL lock. So are the {list,unlist}_netdevice() calls where we bump the net->dev_base_seq number. For this reason net->dev_base_seq can't change under out feet while we're looping over links in rtnl_dump_ifinfo(). So move the check for net->dev_base_seq change (since the last time we were called) out of the loop. This way we avoid giving a wrong impression that there are concurrent updates to the link list going on while we're iterating over them. Signed-off-by: Jakub Sitnicki Signed-off-by: David S. Miller --- net/core/rtnetlink.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9e9f1419be60..a78fd61da0ec 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1621,8 +1621,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) s_h = cb->args[0]; s_idx = cb->args[1]; - cb->seq = net->dev_base_seq; - /* A hack to preserve kernel<->userspace interface. * The correct header is ifinfomsg. It is consistent with rtnl_getlink. * However, before Linux v3.9 the code here assumed rtgenmsg and that's @@ -1668,8 +1666,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) goto out_err; } - - nl_dump_check_consistent(cb, nlmsg_hdr(skb)); cont: idx++; } @@ -1679,6 +1675,8 @@ out: out_err: cb->args[1] = idx; cb->args[0] = h; + cb->seq = net->dev_base_seq; + nl_dump_check_consistent(cb, nlmsg_hdr(skb)); return err; } -- cgit v1.2.3-55-g7522 From 66ccbc9c87c2ae96585f9aa5f733609e6f4acf25 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 11 Aug 2017 19:41:16 +0800 Subject: tap: use build_skb() for small packet We use tun_alloc_skb() which calls sock_alloc_send_pskb() to allocate skb in the past. This socket based method is not suitable for high speed userspace like virtualization which usually: - ignore sk_sndbuf (INT_MAX) and expect to receive the packet as fast as possible - don't want to be block at sendmsg() To eliminate the above overheads, this patch tries to use build_skb() for small packet. We will do this only when the following conditions are all met: - TAP instead of TUN - sk_sndbuf is INT_MAX - caller don't want to be blocked - zerocopy is not used - packet size is smaller enough to use build_skb() Pktgen from guest to host shows ~11% improvement for rx pps of tap: Before: ~1.70Mpps After : ~1.88Mpps What's more important, this makes it possible to implement XDP for tap before creating skbs. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 112 ++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 91 insertions(+), 21 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index d21510d47aa2..9736df40d2bf 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -105,6 +105,8 @@ do { \ } while (0) #endif +#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) + /* TUN device flags */ /* IFF_ATTACH_QUEUE is never stored in device flags, @@ -170,6 +172,7 @@ struct tun_file { struct list_head next; struct tun_struct *detached; struct skb_array tx_array; + struct page_frag alloc_frag; }; struct tun_flow_entry { @@ -571,6 +574,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean) } if (tun) skb_array_cleanup(&tfile->tx_array); + if (tfile->alloc_frag.page) + put_page(tfile->alloc_frag.page); sock_put(&tfile->sk); } } @@ -1190,6 +1195,61 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile, } } +static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, + int len, int noblock, bool zerocopy) +{ + if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP) + return false; + + if (tfile->socket.sk->sk_sndbuf != INT_MAX) + return false; + + if (!noblock) + return false; + + if (zerocopy) + return false; + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) + return false; + + return true; +} + +static struct sk_buff *tun_build_skb(struct tun_file *tfile, + struct iov_iter *from, + int len) +{ + struct page_frag *alloc_frag = &tfile->alloc_frag; + struct sk_buff *skb; + int buflen = SKB_DATA_ALIGN(len + TUN_RX_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + char *buf; + size_t copied; + + if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) + return ERR_PTR(-ENOMEM); + + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + copied = copy_page_from_iter(alloc_frag->page, + alloc_frag->offset + TUN_RX_PAD, + len, from); + if (copied != len) + return ERR_PTR(-EFAULT); + + skb = build_skb(buf, buflen); + if (!skb) + return ERR_PTR(-ENOMEM); + + skb_reserve(skb, TUN_RX_PAD); + skb_put(skb, len); + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + + return skb; +} + /* Get packet from user space buffer */ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, void *msg_control, struct iov_iter *from, @@ -1263,30 +1323,38 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, zerocopy = true; } - if (!zerocopy) { - copylen = len; - if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) - linear = good_linear; - else - linear = tun16_to_cpu(tun, gso.hdr_len); - } - - skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); - if (IS_ERR(skb)) { - if (PTR_ERR(skb) != -EAGAIN) + if (tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { + skb = tun_build_skb(tfile, from, len); + if (IS_ERR(skb)) { this_cpu_inc(tun->pcpu_stats->rx_dropped); - return PTR_ERR(skb); - } + return PTR_ERR(skb); + } + } else { + if (!zerocopy) { + copylen = len; + if (tun16_to_cpu(tun, gso.hdr_len) > good_linear) + linear = good_linear; + else + linear = tun16_to_cpu(tun, gso.hdr_len); + } - if (zerocopy) - err = zerocopy_sg_from_iter(skb, from); - else - err = skb_copy_datagram_from_iter(skb, 0, from, len); + skb = tun_alloc_skb(tfile, align, copylen, linear, noblock); + if (IS_ERR(skb)) { + if (PTR_ERR(skb) != -EAGAIN) + this_cpu_inc(tun->pcpu_stats->rx_dropped); + return PTR_ERR(skb); + } - if (err) { - this_cpu_inc(tun->pcpu_stats->rx_dropped); - kfree_skb(skb); - return -EFAULT; + if (zerocopy) + err = zerocopy_sg_from_iter(skb, from); + else + err = skb_copy_datagram_from_iter(skb, 0, from, len); + + if (err) { + this_cpu_inc(tun->pcpu_stats->rx_dropped); + kfree_skb(skb); + return -EFAULT; + } } if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) { @@ -2377,6 +2445,8 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; + tfile->alloc_frag.page = NULL; + file->private_data = tfile; INIT_LIST_HEAD(&tfile->next); -- cgit v1.2.3-55-g7522 From 7c4974786f4794178f04e96318fc3b2f2850cbc6 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 11 Aug 2017 19:41:17 +0800 Subject: net: export some generic xdp helpers This patch tries to export some generic xdp helpers to drivers. This can let driver to do XDP for a specific skb. This is useful for the case when the packet is hard to be processed at page level directly (e.g jumbo/GSO frame). With this patch, there's no need for driver to forbid the XDP set when configuration is not suitable. Instead, it can defer the XDP for packets that is hard to be processed directly after skb is created. Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 ++ net/core/dev.c | 14 ++++++++------ 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1d238d54c484..0f1c4cb2441e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3243,6 +3243,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); } +void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); +int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); int netif_rx_ni(struct sk_buff *skb); int netif_receive_skb(struct sk_buff *skb); diff --git a/net/core/dev.c b/net/core/dev.c index 1024d3741d12..40b28e417072 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3919,7 +3919,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, /* When doing generic XDP we have to bypass the qdisc layer and the * network taps in order to match in-driver-XDP behavior. */ -static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) +void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) { struct net_device *dev = skb->dev; struct netdev_queue *txq; @@ -3940,13 +3940,12 @@ static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog) kfree_skb(skb); } } +EXPORT_SYMBOL_GPL(generic_xdp_tx); static struct static_key generic_xdp_needed __read_mostly; -static int do_xdp_generic(struct sk_buff *skb) +int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb) { - struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog); - if (xdp_prog) { u32 act = netif_receive_generic_xdp(skb, xdp_prog); int err; @@ -3971,6 +3970,7 @@ out_redir: kfree_skb(skb); return XDP_DROP; } +EXPORT_SYMBOL_GPL(do_xdp_generic); static int netif_rx_internal(struct sk_buff *skb) { @@ -3981,7 +3981,8 @@ static int netif_rx_internal(struct sk_buff *skb) trace_netif_rx(skb); if (static_key_false(&generic_xdp_needed)) { - int ret = do_xdp_generic(skb); + int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), + skb); /* Consider XDP consuming the packet a success from * the netdev point of view we do not want to count @@ -4502,7 +4503,8 @@ static int netif_receive_skb_internal(struct sk_buff *skb) rcu_read_lock(); if (static_key_false(&generic_xdp_needed)) { - int ret = do_xdp_generic(skb); + int ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), + skb); if (ret != XDP_PASS) { rcu_read_unlock(); -- cgit v1.2.3-55-g7522 From 761876c857cb2ef8489fbee01907151da902af91 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 11 Aug 2017 19:41:18 +0800 Subject: tap: XDP support This patch tries to implement XDP for tun. The implementation was split into two parts: - fast path: small and no gso packet. We try to do XDP at page level before build_skb(). For XDP_TX, since creating/destroying queues were completely under control of userspace, it was implemented through generic XDP helper after skb has been built. This could be optimized in the future. - slow path: big or gso packet. We try to do it after skb was created through generic XDP helpers. Test were done through pktgen with small packets. xdp1 test shows ~41.1% improvement: Before: ~1.7Mpps After: ~2.3Mpps xdp_redirect to ixgbe shows ~60% improvement: Before: ~0.8Mpps After: ~1.38Mpps Suggested-by: Michael S. Tsirkin Signed-off-by: Jason Wang Signed-off-by: David S. Miller --- drivers/net/tun.c | 149 +++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 142 insertions(+), 7 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 9736df40d2bf..5892284eb8d0 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -73,6 +73,8 @@ #include #include #include +#include +#include #include @@ -105,7 +107,8 @@ do { \ } while (0) #endif -#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) +#define TUN_HEADROOM 256 +#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD + TUN_HEADROOM) /* TUN device flags */ @@ -224,6 +227,7 @@ struct tun_struct { u32 flow_count; u32 rx_batched; struct tun_pcpu_stats __percpu *pcpu_stats; + struct bpf_prog __rcu *xdp_prog; }; #ifdef CONFIG_TUN_VNET_CROSS_LE @@ -590,6 +594,7 @@ static void tun_detach(struct tun_file *tfile, bool clean) static void tun_detach_all(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); + struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog); struct tun_file *tfile, *tmp; int i, n = tun->numqueues; @@ -622,6 +627,9 @@ static void tun_detach_all(struct net_device *dev) } BUG_ON(tun->numdisabled != 0); + if (xdp_prog) + bpf_prog_put(xdp_prog); + if (tun->flags & IFF_PERSIST) module_put(THIS_MODULE); } @@ -1008,6 +1016,46 @@ tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped = tx_dropped; } +static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct tun_struct *tun = netdev_priv(dev); + struct bpf_prog *old_prog; + + old_prog = rtnl_dereference(tun->xdp_prog); + rcu_assign_pointer(tun->xdp_prog, prog); + if (old_prog) + bpf_prog_put(old_prog); + + return 0; +} + +static u32 tun_xdp_query(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + const struct bpf_prog *xdp_prog; + + xdp_prog = rtnl_dereference(tun->xdp_prog); + if (xdp_prog) + return xdp_prog->aux->id; + + return 0; +} + +static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return tun_xdp_set(dev, xdp->prog, xdp->extack); + case XDP_QUERY_PROG: + xdp->prog_id = tun_xdp_query(dev); + xdp->prog_attached = !!xdp->prog_id; + return 0; + default: + return -EINVAL; + } +} + static const struct net_device_ops tun_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -1038,6 +1086,7 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_features_check = passthru_features_check, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, + .ndo_xdp = tun_xdp, }; static void tun_flow_init(struct tun_struct *tun) @@ -1217,16 +1266,22 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, return true; } -static struct sk_buff *tun_build_skb(struct tun_file *tfile, +static struct sk_buff *tun_build_skb(struct tun_struct *tun, + struct tun_file *tfile, struct iov_iter *from, - int len) + struct virtio_net_hdr *hdr, + int len, int *generic_xdp) { struct page_frag *alloc_frag = &tfile->alloc_frag; struct sk_buff *skb; + struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(len + TUN_RX_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + unsigned int delta = 0; char *buf; size_t copied; + bool xdp_xmit = false; + int err; if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) return ERR_PTR(-ENOMEM); @@ -1238,16 +1293,77 @@ static struct sk_buff *tun_build_skb(struct tun_file *tfile, if (copied != len) return ERR_PTR(-EFAULT); + if (hdr->gso_type) + *generic_xdp = 1; + else + *generic_xdp = 0; + + rcu_read_lock(); + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog && !*generic_xdp) { + struct xdp_buff xdp; + void *orig_data; + u32 act; + + xdp.data_hard_start = buf; + xdp.data = buf + TUN_RX_PAD; + xdp.data_end = xdp.data + len; + orig_data = xdp.data; + act = bpf_prog_run_xdp(xdp_prog, &xdp); + + switch (act) { + case XDP_REDIRECT: + get_page(alloc_frag->page); + alloc_frag->offset += buflen; + err = xdp_do_redirect(tun->dev, &xdp, xdp_prog); + if (err) + goto err_redirect; + return NULL; + case XDP_TX: + xdp_xmit = true; + /* fall through */ + case XDP_PASS: + delta = orig_data - xdp.data; + break; + default: + bpf_warn_invalid_xdp_action(act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(tun->dev, xdp_prog, act); + /* fall through */ + case XDP_DROP: + goto err_xdp; + } + } + skb = build_skb(buf, buflen); - if (!skb) + if (!skb) { + rcu_read_unlock(); return ERR_PTR(-ENOMEM); + } - skb_reserve(skb, TUN_RX_PAD); - skb_put(skb, len); + skb_reserve(skb, TUN_RX_PAD - delta); + skb_put(skb, len + delta); get_page(alloc_frag->page); alloc_frag->offset += buflen; + if (xdp_xmit) { + skb->dev = tun->dev; + generic_xdp_tx(skb, xdp_prog); + rcu_read_lock(); + return NULL; + } + + rcu_read_unlock(); + return skb; + +err_redirect: + put_page(alloc_frag->page); +err_xdp: + rcu_read_unlock(); + this_cpu_inc(tun->pcpu_stats->rx_dropped); + return NULL; } /* Get packet from user space buffer */ @@ -1266,6 +1382,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, bool zerocopy = false; int err; u32 rxhash; + int generic_xdp = 1; if (!(tun->dev->flags & IFF_UP)) return -EIO; @@ -1324,11 +1441,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, } if (tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) { - skb = tun_build_skb(tfile, from, len); + skb = tun_build_skb(tun, tfile, from, &gso, len, &generic_xdp); if (IS_ERR(skb)) { this_cpu_inc(tun->pcpu_stats->rx_dropped); return PTR_ERR(skb); } + if (!skb) + return total_len; } else { if (!zerocopy) { copylen = len; @@ -1402,6 +1521,22 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_reset_network_header(skb); skb_probe_transport_header(skb, 0); + if (generic_xdp) { + struct bpf_prog *xdp_prog; + int ret; + + rcu_read_lock(); + xdp_prog = rcu_dereference(tun->xdp_prog); + if (xdp_prog) { + ret = do_xdp_generic(xdp_prog, skb); + if (ret != XDP_PASS) { + rcu_read_unlock(); + return total_len; + } + } + rcu_read_unlock(); + } + rxhash = __skb_get_hash_symmetric(skb); #ifndef CONFIG_4KSTACKS tun_rx_batched(tun, tfile, skb, more); -- cgit v1.2.3-55-g7522 From 653ef6a3e4af21fda8e73061d4f1b069a9982301 Mon Sep 17 00:00:00 2001 From: Girish Moodalbail Date: Fri, 11 Aug 2017 15:20:59 -0700 Subject: vxlan: change vxlan_[config_]validate() to use netlink_ext_ack for error reporting The kernel log is not where users expect error messages for netlink requests; as we have extended acks now, we can replace pr_debug() with NL_SET_ERR_MSG_ATTR(). Signed-off-by: Matthias Schiffer Signed-off-by: Girish Moodalbail Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 99 +++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 73 insertions(+), 26 deletions(-) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 35e84a9e1cfb..ae3a1da703c2 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2729,12 +2729,14 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { - pr_debug("invalid link address (not ethernet)\n"); + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided link layer address is not Ethernet"); return -EINVAL; } if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { - pr_debug("invalid all zero ethernet address\n"); + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_ADDRESS], + "Provided Ethernet address is not unicast"); return -EADDRNOTAVAIL; } } @@ -2742,18 +2744,27 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], if (tb[IFLA_MTU]) { u32 mtu = nla_get_u32(tb[IFLA_MTU]); - if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) + if (mtu < ETH_MIN_MTU || mtu > ETH_MAX_MTU) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_MTU], + "MTU must be between 68 and 65535"); return -EINVAL; + } } - if (!data) + if (!data) { + NL_SET_ERR_MSG(extack, + "Required attributes not provided to perform the operation"); return -EINVAL; + } if (data[IFLA_VXLAN_ID]) { u32 id = nla_get_u32(data[IFLA_VXLAN_ID]); - if (id >= VXLAN_N_VID) + if (id >= VXLAN_N_VID) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_ID], + "VXLAN ID must be lower than 16777216"); return -ERANGE; + } } if (data[IFLA_VXLAN_PORT_RANGE]) { @@ -2761,8 +2772,8 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], = nla_data(data[IFLA_VXLAN_PORT_RANGE]); if (ntohs(p->high) < ntohs(p->low)) { - pr_debug("port range %u .. %u not valid\n", - ntohs(p->low), ntohs(p->high)); + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_PORT_RANGE], + "Invalid source port range"); return -EINVAL; } } @@ -2919,7 +2930,8 @@ static int vxlan_sock_add(struct vxlan_dev *vxlan) static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, struct net_device **lower, - struct vxlan_dev *old) + struct vxlan_dev *old, + struct netlink_ext_ack *extack) { struct vxlan_net *vn = net_generic(src_net, vxlan_net_id); struct vxlan_dev *tmp; @@ -2933,6 +2945,8 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, */ if ((conf->flags & ~VXLAN_F_ALLOWED_GPE) || !(conf->flags & VXLAN_F_COLLECT_METADATA)) { + NL_SET_ERR_MSG(extack, + "VXLAN GPE does not support this combination of attributes"); return -EINVAL; } } @@ -2947,15 +2961,23 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, conf->saddr.sa.sa_family = conf->remote_ip.sa.sa_family; } - if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) + if (conf->saddr.sa.sa_family != conf->remote_ip.sa.sa_family) { + NL_SET_ERR_MSG(extack, + "Local and remote address must be from the same family"); return -EINVAL; + } - if (vxlan_addr_multicast(&conf->saddr)) + if (vxlan_addr_multicast(&conf->saddr)) { + NL_SET_ERR_MSG(extack, "Local address cannot be multicast"); return -EINVAL; + } if (conf->saddr.sa.sa_family == AF_INET6) { - if (!IS_ENABLED(CONFIG_IPV6)) + if (!IS_ENABLED(CONFIG_IPV6)) { + NL_SET_ERR_MSG(extack, + "IPv6 support not enabled in the kernel"); return -EPFNOSUPPORT; + } use_ipv6 = true; conf->flags |= VXLAN_F_IPV6; @@ -2967,46 +2989,68 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, if (local_type & IPV6_ADDR_LINKLOCAL) { if (!(remote_type & IPV6_ADDR_LINKLOCAL) && - (remote_type != IPV6_ADDR_ANY)) + (remote_type != IPV6_ADDR_ANY)) { + NL_SET_ERR_MSG(extack, + "Invalid combination of local and remote address scopes"); return -EINVAL; + } conf->flags |= VXLAN_F_IPV6_LINKLOCAL; } else { if (remote_type == - (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) + (IPV6_ADDR_UNICAST | IPV6_ADDR_LINKLOCAL)) { + NL_SET_ERR_MSG(extack, + "Invalid combination of local and remote address scopes"); return -EINVAL; + } conf->flags &= ~VXLAN_F_IPV6_LINKLOCAL; } } } - if (conf->label && !use_ipv6) + if (conf->label && !use_ipv6) { + NL_SET_ERR_MSG(extack, + "Label attribute only applies to IPv6 VXLAN devices"); return -EINVAL; + } if (conf->remote_ifindex) { struct net_device *lowerdev; lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); - if (!lowerdev) + if (!lowerdev) { + NL_SET_ERR_MSG(extack, + "Invalid local interface, device not found"); return -ENODEV; + } #if IS_ENABLED(CONFIG_IPV6) if (use_ipv6) { struct inet6_dev *idev = __in6_dev_get(lowerdev); - if (idev && idev->cnf.disable_ipv6) + if (idev && idev->cnf.disable_ipv6) { + NL_SET_ERR_MSG(extack, + "IPv6 support disabled by administrator"); return -EPERM; + } } #endif *lower = lowerdev; } else { - if (vxlan_addr_multicast(&conf->remote_ip)) + if (vxlan_addr_multicast(&conf->remote_ip)) { + NL_SET_ERR_MSG(extack, + "Local interface required for multicast remote destination"); + return -EINVAL; + } #if IS_ENABLED(CONFIG_IPV6) - if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) + if (conf->flags & VXLAN_F_IPV6_LINKLOCAL) { + NL_SET_ERR_MSG(extack, + "Local interface required for link-local local/remote addresses"); return -EINVAL; + } #endif *lower = NULL; @@ -3038,6 +3082,8 @@ static int vxlan_config_validate(struct net *src_net, struct vxlan_config *conf, tmp->cfg.remote_ifindex != conf->remote_ifindex) continue; + NL_SET_ERR_MSG(extack, + "A VXLAN device with the specified VNI already exists"); return -EEXIST; } @@ -3097,14 +3143,14 @@ static void vxlan_config_apply(struct net_device *dev, } static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, - struct vxlan_config *conf, - bool changelink) + struct vxlan_config *conf, bool changelink, + struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); struct net_device *lowerdev; int ret; - ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan); + ret = vxlan_config_validate(src_net, conf, &lowerdev, vxlan, extack); if (ret) return ret; @@ -3114,13 +3160,14 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, } static int __vxlan_dev_create(struct net *net, struct net_device *dev, - struct vxlan_config *conf) + struct vxlan_config *conf, + struct netlink_ext_ack *extack) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_dev *vxlan = netdev_priv(dev); int err; - err = vxlan_dev_configure(net, dev, conf, false); + err = vxlan_dev_configure(net, dev, conf, false, extack); if (err) return err; @@ -3366,7 +3413,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (err) return err; - return __vxlan_dev_create(src_net, dev, &conf); + return __vxlan_dev_create(src_net, dev, &conf, extack); } static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], @@ -3386,7 +3433,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], memcpy(&old_dst, dst, sizeof(struct vxlan_rdst)); - err = vxlan_dev_configure(vxlan->net, dev, &conf, true); + err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack); if (err) return err; @@ -3592,7 +3639,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name, if (IS_ERR(dev)) return dev; - err = __vxlan_dev_create(net, dev, conf); + err = __vxlan_dev_create(net, dev, conf, NULL); if (err < 0) { free_netdev(dev); return ERR_PTR(err); -- cgit v1.2.3-55-g7522 From 9438c871b2c12f5f829156149502ce3e2d0ece76 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 11 Aug 2017 17:02:02 -0700 Subject: net: ipv4: remove unnecessary check on orig_oif rt_iif is going to be set to either 0 or orig_oif. If orig_oif is 0 it amounts to the same end result so remove the check. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/route.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b88836e6b4a1..6810d2076b1b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2236,7 +2236,7 @@ add: if (!rth) return ERR_PTR(-ENOBUFS); - rth->rt_iif = orig_oif ? : 0; + rth->rt_iif = orig_oif; if (res->table) rth->rt_table_id = res->table->tb_id; -- cgit v1.2.3-55-g7522 From 4f04256c983a4f115417d2009b44dcb7d70a6375 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 11 Aug 2017 17:11:14 -0700 Subject: net: vrf: Drop local rtable and rt6_info The VRF cached rtable and rt6_info for local traffic are no longer needed and actually prevent local traffic through enslaved devices. Remove them. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 122 ++++-------------------------------------------------- 1 file changed, 8 insertions(+), 114 deletions(-) diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index abd2010c48ae..7e19051f3230 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -47,9 +47,7 @@ static unsigned int vrf_net_id; struct net_vrf { struct rtable __rcu *rth; - struct rtable __rcu *rth_local; struct rt6_info __rcu *rt6; - struct rt6_info __rcu *rt6_local; u32 tb_id; }; @@ -194,42 +192,10 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb, /* if dst.dev is loopback or the VRF device again this is locally * originated traffic destined to a local address. Short circuit - * to Rx path using our local dst + * to Rx path */ - if (dst->dev == net->loopback_dev || dst->dev == dev) { - struct net_vrf *vrf = netdev_priv(dev); - struct rt6_info *rt6_local; - - /* release looked up dst and use cached local dst */ - dst_release(dst); - - rcu_read_lock(); - - rt6_local = rcu_dereference(vrf->rt6_local); - if (unlikely(!rt6_local)) { - rcu_read_unlock(); - goto err; - } - - /* Ordering issue: cached local dst is created on newlink - * before the IPv6 initialization. Using the local dst - * requires rt6i_idev to be set so make sure it is. - */ - if (unlikely(!rt6_local->rt6i_idev)) { - rt6_local->rt6i_idev = in6_dev_get(dev); - if (!rt6_local->rt6i_idev) { - rcu_read_unlock(); - goto err; - } - } - - dst = &rt6_local->dst; - dst_hold(dst); - - rcu_read_unlock(); - - return vrf_local_xmit(skb, dev, &rt6_local->dst); - } + if (dst->dev == dev) + return vrf_local_xmit(skb, dev, dst); skb_dst_set(skb, dst); @@ -296,30 +262,10 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, /* if dst.dev is loopback or the VRF device again this is locally * originated traffic destined to a local address. Short circuit - * to Rx path using our local dst + * to Rx path */ - if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) { - struct net_vrf *vrf = netdev_priv(vrf_dev); - struct rtable *rth_local; - struct dst_entry *dst = NULL; - - ip_rt_put(rt); - - rcu_read_lock(); - - rth_local = rcu_dereference(vrf->rth_local); - if (likely(rth_local)) { - dst = &rth_local->dst; - dst_hold(dst); - } - - rcu_read_unlock(); - - if (unlikely(!dst)) - goto err; - - return vrf_local_xmit(skb, vrf_dev, dst); - } + if (rt->dst.dev == vrf_dev) + return vrf_local_xmit(skb, vrf_dev, &rt->dst); skb_dst_set(skb, &rt->dst); @@ -528,12 +474,10 @@ static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev, static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) { struct rt6_info *rt6 = rtnl_dereference(vrf->rt6); - struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local); struct net *net = dev_net(dev); struct dst_entry *dst; RCU_INIT_POINTER(vrf->rt6, NULL); - RCU_INIT_POINTER(vrf->rt6_local, NULL); synchronize_rcu(); /* move dev in dst's to loopback so this VRF device can be deleted @@ -546,19 +490,6 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) dev_hold(dst->dev); dst_release(dst); } - - if (rt6_local) { - if (rt6_local->rt6i_idev) { - in6_dev_put(rt6_local->rt6i_idev); - rt6_local->rt6i_idev = NULL; - } - - dst = &rt6_local->dst; - dev_put(dst->dev); - dst->dev = net->loopback_dev; - dev_hold(dst->dev); - dst_release(dst); - } } static int vrf_rt6_create(struct net_device *dev) @@ -567,7 +498,7 @@ static int vrf_rt6_create(struct net_device *dev) struct net_vrf *vrf = netdev_priv(dev); struct net *net = dev_net(dev); struct fib6_table *rt6i_table; - struct rt6_info *rt6, *rt6_local; + struct rt6_info *rt6; int rc = -ENOMEM; /* IPv6 can be CONFIG enabled and then disabled runtime */ @@ -586,22 +517,7 @@ static int vrf_rt6_create(struct net_device *dev) rt6->rt6i_table = rt6i_table; rt6->dst.output = vrf_output6; - /* create a dst for local routing - packets sent locally - * to local address via the VRF device as a loopback - */ - rt6_local = ip6_dst_alloc(net, dev, flags); - if (!rt6_local) { - dst_release(&rt6->dst); - goto out; - } - - rt6_local->rt6i_idev = in6_dev_get(dev); - rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL; - rt6_local->rt6i_table = rt6i_table; - rt6_local->dst.input = ip6_input; - rcu_assign_pointer(vrf->rt6, rt6); - rcu_assign_pointer(vrf->rt6_local, rt6_local); rc = 0; out: @@ -788,12 +704,10 @@ static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev, static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) { struct rtable *rth = rtnl_dereference(vrf->rth); - struct rtable *rth_local = rtnl_dereference(vrf->rth_local); struct net *net = dev_net(dev); struct dst_entry *dst; RCU_INIT_POINTER(vrf->rth, NULL); - RCU_INIT_POINTER(vrf->rth_local, NULL); synchronize_rcu(); /* move dev in dst's to loopback so this VRF device can be deleted @@ -806,20 +720,12 @@ static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf) dev_hold(dst->dev); dst_release(dst); } - - if (rth_local) { - dst = &rth_local->dst; - dev_put(dst->dev); - dst->dev = net->loopback_dev; - dev_hold(dst->dev); - dst_release(dst); - } } static int vrf_rtable_create(struct net_device *dev) { struct net_vrf *vrf = netdev_priv(dev); - struct rtable *rth, *rth_local; + struct rtable *rth; if (!fib_new_table(dev_net(dev), vrf->tb_id)) return -ENOMEM; @@ -829,22 +735,10 @@ static int vrf_rtable_create(struct net_device *dev) if (!rth) return -ENOMEM; - /* create a dst for local ingress routing - packets sent locally - * to local address via the VRF device as a loopback - */ - rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0); - if (!rth_local) { - dst_release(&rth->dst); - return -ENOMEM; - } - rth->dst.output = vrf_output; rth->rt_table_id = vrf->tb_id; - rth_local->rt_table_id = vrf->tb_id; - rcu_assign_pointer(vrf->rth, rth); - rcu_assign_pointer(vrf->rth_local, rth_local); return 0; } -- cgit v1.2.3-55-g7522 From 1dfa76390bf056a49c1105b11f815a35a693b77c Mon Sep 17 00:00:00 2001 From: David Ahern Date: Fri, 11 Aug 2017 17:11:15 -0700 Subject: net: ipv4: add check for l3slave for index returned in IP_PKTINFO Similar to the loopback device, for packets sent through a VRF device the index returned in ipi_ifindex needs to be the saved index in rt_iif. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv4/ip_sockglue.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index dd68a9ed5e40..e558e4f9597b 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -1207,6 +1207,7 @@ e_inval: void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) { struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb); + bool l3slave = ipv4_l3mdev_skb(IPCB(skb)->flags); bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) || ipv6_sk_rxinfo(sk); @@ -1220,7 +1221,7 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb) * (e.g., process binds socket to eth0 for Tx which is * redirected to loopback in the rtable/dst). */ - if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX) + if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX || l3slave) pktinfo->ipi_ifindex = inet_iif(skb); pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb); -- cgit v1.2.3-55-g7522 From 5a7a8346498c02bbb0d6512c561f1dbfab0fcf62 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Aug 2017 00:03:06 +0300 Subject: of_mdio: merge branch tails in of_phy_register_fixed_link() Looks like gcc isn't always able to figure out that 3 *if* branches in of_phy_register_fixed_link() calling fixed_phy_register() at their ends are similar enough and thus can be merged. The "manual" merge saves 40 bytes of the object code (AArch64 gcc 4.8.5), and still saves 12 bytes even if gcc was able to merge the branch tails (ARM gcc 4.8.5)... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 94ca3470e943..b14a00034fb1 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -422,16 +422,13 @@ int of_phy_register_fixed_link(struct device_node *np) struct fixed_phy_status status = {}; struct device_node *fixed_link_node; u32 fixed_link_prop[5]; - struct phy_device *phy; const char *managed; - int link_gpio; + int link_gpio = -1; - if (of_property_read_string(np, "managed", &managed) == 0) { - if (strcmp(managed, "in-band-status") == 0) { - /* status is zeroed, namely its .link member */ - phy = fixed_phy_register(PHY_POLL, &status, -1, np); - return PTR_ERR_OR_ZERO(phy); - } + if (of_property_read_string(np, "managed", &managed) == 0 && + strcmp(managed, "in-band-status") == 0) { + /* status is zeroed, namely its .link member */ + goto register_phy; } /* New binding */ @@ -454,8 +451,7 @@ int of_phy_register_fixed_link(struct device_node *np) if (link_gpio == -EPROBE_DEFER) return -EPROBE_DEFER; - phy = fixed_phy_register(PHY_POLL, &status, link_gpio, np); - return PTR_ERR_OR_ZERO(phy); + goto register_phy; } /* Old binding */ @@ -466,11 +462,14 @@ int of_phy_register_fixed_link(struct device_node *np) status.speed = fixed_link_prop[2]; status.pause = fixed_link_prop[3]; status.asym_pause = fixed_link_prop[4]; - phy = fixed_phy_register(PHY_POLL, &status, -1, np); - return PTR_ERR_OR_ZERO(phy); + goto register_phy; } return -ENODEV; + +register_phy: + return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, link_gpio, + np)); } EXPORT_SYMBOL(of_phy_register_fixed_link); -- cgit v1.2.3-55-g7522 From 7acd43296889a71ecf9c20498465f672e667a9ad Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 12 Aug 2017 22:45:53 +0100 Subject: virtio-net: make array guest_offloads static The array guest_offloads is local to the source and does not need to be in global scope, so make it static. Also tweak formatting. Cleans up sparse warnings: symbol 'guest_offloads' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index e90de2186ffc..a3f3c66b4530 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -57,10 +57,12 @@ DECLARE_EWMA(pkt_len, 0, 64) #define VIRTNET_DRIVER_VERSION "1.0.0" -const unsigned long guest_offloads[] = { VIRTIO_NET_F_GUEST_TSO4, - VIRTIO_NET_F_GUEST_TSO6, - VIRTIO_NET_F_GUEST_ECN, - VIRTIO_NET_F_GUEST_UFO }; +static const unsigned long guest_offloads[] = { + VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, + VIRTIO_NET_F_GUEST_UFO +}; struct virtnet_stats { struct u64_stats_sync tx_syncp; -- cgit v1.2.3-55-g7522 From d17eb73bb713dc8dd389aa87606b90edb0d258b2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 12 Aug 2017 22:52:31 +0100 Subject: tap: make struct tap_fops static The structure tap_fops is local to the source and does not need to be in global scope, so make it static. Cleans up sparse warning: symbol 'tap_fops' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/tap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index ca267fd28ab8..0d039411e64c 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1127,7 +1127,7 @@ static long tap_compat_ioctl(struct file *file, unsigned int cmd, } #endif -const struct file_operations tap_fops = { +static const struct file_operations tap_fops = { .owner = THIS_MODULE, .open = tap_open, .release = tap_release, -- cgit v1.2.3-55-g7522 From 7496bece4dbbdc5b849cdcad8c6d99f9a0185c93 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 13 Aug 2017 16:41:45 +0530 Subject: can: constify platform_device_id platform_device_id are not supposed to change at runtime. All functions working with platform_device_id provided by work with const platform_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/can/c_can/c_can_platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index e36d10520e24..46a746ee80bb 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -220,7 +220,7 @@ static const struct c_can_driver_data am3352_dcan_drvdata = { .raminit_bits = am3352_raminit_bits, }; -static struct platform_device_id c_can_id_table[] = { +static const struct platform_device_id c_can_id_table[] = { { .name = KBUILD_MODNAME, .driver_data = (kernel_ulong_t)&c_can_drvdata, -- cgit v1.2.3-55-g7522 From bef0fed4d012af9651716aec85094a83df7a87bc Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 13 Aug 2017 16:42:08 +0530 Subject: net: dpaa_eth: constify platform_device_id platform_device_id are not supposed to change at runtime. All functions working with platform_device_id provided by work with const platform_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 733d54caabb6..c7fa285378b5 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2830,7 +2830,7 @@ static int dpaa_remove(struct platform_device *pdev) return err; } -static struct platform_device_id dpaa_devtype[] = { +static const struct platform_device_id dpaa_devtype[] = { { .name = "dpaa-ethernet", .driver_data = 0, -- cgit v1.2.3-55-g7522 From ef00df854b979eb6f1e4c203f34327887f143025 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 13 Aug 2017 16:42:42 +0530 Subject: net: sh_eth: constify platform_device_id platform_device_id are not supposed to change at runtime. All functions working with platform_device_id provided by work with const platform_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index d2dc0a8ef305..d2e88a30f57b 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3402,7 +3402,7 @@ static const struct dev_pm_ops sh_eth_dev_pm_ops = { #define SH_ETH_PM_OPS NULL #endif -static struct platform_device_id sh_eth_id_table[] = { +static const struct platform_device_id sh_eth_id_table[] = { { "sh7619-ether", (kernel_ulong_t)&sh7619_data }, { "sh771x-ether", (kernel_ulong_t)&sh771x_data }, { "sh7724-ether", (kernel_ulong_t)&sh7724_data }, -- cgit v1.2.3-55-g7522 From f5b589488ea5ed3bb6168b1a4e7f7b95841d8513 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sun, 13 Aug 2017 16:43:18 +0530 Subject: net: ti: cpsw:: constify platform_device_id platform_device_id are not supposed to change at runtime. All functions working with platform_device_id provided by work with const platform_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c8776dbf1a55..db8a4bcfc6c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2827,7 +2827,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv) #define CPSW_QUIRK_IRQ BIT(0) -static struct platform_device_id cpsw_devtype[] = { +static const struct platform_device_id cpsw_devtype[] = { { /* keep it for existing comaptibles */ .name = "cpsw", -- cgit v1.2.3-55-g7522 From da1542b01b38ba29e0d28b1ef980071e7d38d6d4 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Fri, 11 Aug 2017 18:43:14 -0700 Subject: liquidio: update debug console logging mechanism - remove logging dependency upon global func octeon_console_debug_enabled() - abstract debug console logging using console structure (via function ptr) to allow for more flexible logging Signed-off-by: Rick Farrington Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 44 +++++++++++++++++- .../net/ethernet/cavium/liquidio/octeon_console.c | 54 ++++++++++++++-------- .../net/ethernet/cavium/liquidio/octeon_device.h | 17 +++++-- 3 files changed, 90 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8bf6dfcf5881..8ec0b6d978d2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(console_bitmask, * @param console console to check * @returns 1 = enabled. 0 otherwise */ -int octeon_console_debug_enabled(u32 console) +static int octeon_console_debug_enabled(u32 console) { return (console_bitmask >> (console)) & 0x1; } @@ -185,6 +185,9 @@ struct octeon_device_priv { static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); #endif +static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, + char *prefix, char *suffix); + static int octeon_device_init(struct octeon_device *); static int liquidio_stop(struct net_device *netdev); static void liquidio_remove(struct pci_dev *pdev); @@ -4556,6 +4559,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev) int j, ret; int fw_loaded = 0; char bootcmd[] = "\n"; + char *dbg_enb = NULL; struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)octeon_dev->priv; atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); @@ -4762,10 +4766,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); return 1; } - ret = octeon_add_console(octeon_dev, 0); + /* If console debug enabled, specify empty string to use default + * enablement ELSE specify NULL string for 'disabled'. + */ + dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; + ret = octeon_add_console(octeon_dev, 0, dbg_enb); if (ret) { dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); return 1; + } else if (octeon_console_debug_enabled(0)) { + /* If console was added AND we're logging console output + * then set our console print function. + */ + octeon_dev->console[0].print = octeon_dbg_console_print; } atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); @@ -4800,6 +4813,33 @@ static int octeon_device_init(struct octeon_device *octeon_dev) return 0; } +/** + * \brief Debug console print function + * @param octeon_dev octeon device + * @param console_num console number + * @param prefix first portion of line to display + * @param suffix second portion of line to display + * + * The OCTEON debug console outputs entire lines (excluding '\n'). + * Normally, the line will be passed in the 'prefix' parameter. + * However, due to buffering, it is possible for a line to be split into two + * parts, in which case they will be passed as the 'prefix' parameter and + * 'suffix' parameter. + */ +static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, + char *prefix, char *suffix) +{ + if (prefix && suffix) + dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, + suffix); + else if (prefix) + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); + else if (suffix) + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); + + return 0; +} + /** * \brief Exits the module */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c index dd0efc9b4286..19e5212f66d1 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -437,20 +437,31 @@ static void output_console_line(struct octeon_device *oct, { char *line; s32 i; + size_t len; line = console_buffer; for (i = 0; i < bytes_read; i++) { /* Output a line at a time, prefixed */ if (console_buffer[i] == '\n') { console_buffer[i] = '\0'; - if (console->leftover[0]) { - dev_info(&oct->pci_dev->dev, "%lu: %s%s\n", - console_num, console->leftover, - line); + /* We need to output 'line', prefaced by 'leftover'. + * However, it is possible we're being called to + * output 'leftover' by itself (in the case of nothing + * having been read from the console). + * + * To avoid duplication, check for this condition. + */ + if (console->leftover[0] && + (line != console->leftover)) { + if (console->print) + (*console->print)(oct, (u32)console_num, + console->leftover, + line); console->leftover[0] = '\0'; } else { - dev_info(&oct->pci_dev->dev, "%lu: %s\n", - console_num, line); + if (console->print) + (*console->print)(oct, (u32)console_num, + line, NULL); } line = &console_buffer[i + 1]; } @@ -459,13 +470,16 @@ static void output_console_line(struct octeon_device *oct, /* Save off any leftovers */ if (line != &console_buffer[bytes_read]) { console_buffer[bytes_read] = '\0'; - strcpy(console->leftover, line); + len = strlen(console->leftover); + strncpy(&console->leftover[len], line, + sizeof(console->leftover) - len); } } static void check_console(struct work_struct *work) { s32 bytes_read, tries, total_read; + size_t len; struct octeon_console *console; struct cavium_wk *wk = (struct cavium_wk *)work; struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; @@ -487,7 +501,7 @@ static void check_console(struct work_struct *work) total_read += bytes_read; if (console->waiting) octeon_console_handle_result(oct, console_num); - if (octeon_console_debug_enabled(console_num)) { + if (console->print) { output_console_line(oct, console, console_num, console_buffer, bytes_read); } @@ -502,10 +516,13 @@ static void check_console(struct work_struct *work) /* If nothing is read after polling the console, * output any leftovers if any */ - if (octeon_console_debug_enabled(console_num) && - (total_read == 0) && (console->leftover[0])) { - dev_info(&oct->pci_dev->dev, "%u: %s\n", - console_num, console->leftover); + if (console->print && (total_read == 0) && + (console->leftover[0])) { + /* append '\n' as terminator for 'output_console_line' */ + len = strlen(console->leftover); + console->leftover[len] = '\n'; + output_console_line(oct, console, console_num, + console->leftover, (s32)(len + 1)); console->leftover[0] = '\0'; } @@ -557,7 +574,8 @@ int octeon_init_consoles(struct octeon_device *oct) return ret; } -int octeon_add_console(struct octeon_device *oct, u32 console_num) +int octeon_add_console(struct octeon_device *oct, u32 console_num, + char *dbg_enb) { int ret = 0; u32 delay; @@ -599,11 +617,11 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num) delay = OCTEON_CONSOLE_POLL_INTERVAL_MS; schedule_delayed_work(work, msecs_to_jiffies(delay)); - if (octeon_console_debug_enabled(console_num)) { - ret = octeon_console_send_cmd(oct, - "setenv pci_console_active 1", - 2000); - } + /* an empty string means use default debug console enablement */ + if (dbg_enb && !dbg_enb[0]) + dbg_enb = "setenv pci_console_active 1"; + if (dbg_enb) + ret = octeon_console_send_cmd(oct, dbg_enb, 2000); console->active = 1; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index b014e6ad0e9a..2c554729bfe4 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -194,6 +194,8 @@ struct octeon_reg_list { }; #define OCTEON_CONSOLE_MAX_READ_BYTES 512 +typedef int (*octeon_console_print_fn)(struct octeon_device *oct, + u32 num, char *pre, char *suf); struct octeon_console { u32 active; u32 waiting; @@ -201,6 +203,7 @@ struct octeon_console { u32 buffer_size; u64 input_base_addr; u64 output_base_addr; + octeon_console_print_fn print; char leftover[OCTEON_CONSOLE_MAX_READ_BYTES]; }; @@ -740,16 +743,20 @@ int octeon_wait_for_bootloader(struct octeon_device *oct, */ int octeon_init_consoles(struct octeon_device *oct); -int octeon_console_debug_enabled(u32 console); - /** * Adds access to a console to the device. * - * @param oct which octeon to add to - * @param console_num which console + * @param oct: which octeon to add to + * @param console_num: which console + * @param dbg_enb: ptr to debug enablement string, one of: + * * NULL for no debug output (i.e. disabled) + * * empty string enables debug output (via default method) + * * specific string to enable debug console output + * * @return Zero on success, negative on failure. */ -int octeon_add_console(struct octeon_device *oct, u32 console_num); +int octeon_add_console(struct octeon_device *oct, u32 console_num, + char *dbg_enb); /** write or read from a console */ int octeon_console_write(struct octeon_device *oct, u32 console_num, -- cgit v1.2.3-55-g7522 From b5e7dc47425860172eec6e1116ace7759d13b999 Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Sat, 12 Aug 2017 20:38:55 -0500 Subject: liquidio: fix duplicated code for different branches Refactor code in order to avoid identical code for different branches. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Acked-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index b78e296c4cba..4f65c08461f6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -578,23 +578,18 @@ static int lio_set_phys_id(struct net_device *netdev, break; case ETHTOOL_ID_ON: - if (oct->chip_id == OCTEON_CN66XX) { + if (oct->chip_id == OCTEON_CN66XX) octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, VITESSE_PHY_GPIO_HIGH); - - } else if (oct->chip_id == OCTEON_CN68XX) { - return -EINVAL; - } else { + else return -EINVAL; - } + break; case ETHTOOL_ID_OFF: if (oct->chip_id == OCTEON_CN66XX) octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, VITESSE_PHY_GPIO_LOW); - else if (oct->chip_id == OCTEON_CN68XX) - return -EINVAL; else return -EINVAL; -- cgit v1.2.3-55-g7522 From 4db93fb8aca3e9e56987c0e873166bdf4b6576ce Mon Sep 17 00:00:00 2001 From: Gustavo A. R. Silva Date: Sat, 12 Aug 2017 20:58:40 -0500 Subject: qlge: fix duplicated code for different branches Refactor code in order to avoid identical code for different branches. This issue was detected with the help of Coccinelle. Signed-off-by: Gustavo A. R. Silva Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_dbg.c | 47 +++++++++-------------------- 1 file changed, 14 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index 28ea0af89aef..458d55ba423f 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -144,42 +144,23 @@ static int ql_get_serdes_regs(struct ql_adapter *qdev, xaui_direct_valid = xaui_indirect_valid = 1; /* The XAUI needs to be read out per port */ - if (qdev->func & 1) { - /* We are NIC 2 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; + status = ql_read_other_func_serdes_reg(qdev, + XG_SERDES_XAUI_HSS_PCS_START, &temp); + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_indirect_valid = 0; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } else { - /* We are NIC 1 */ - status = ql_read_other_func_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_indirect_valid = 0; + status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); - status = ql_read_serdes_reg(qdev, - XG_SERDES_XAUI_HSS_PCS_START, &temp); - if (status) - temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; - if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == - XG_SERDES_ADDR_XAUI_PWR_DOWN) - xaui_direct_valid = 0; - } + if (status) + temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; + + if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) == + XG_SERDES_ADDR_XAUI_PWR_DOWN) + xaui_direct_valid = 0; /* * XFI register is shared so only need to read one -- cgit v1.2.3-55-g7522 From cc70267008d5a22bc044fb1670937d52cb024cd3 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 14 Aug 2017 10:54:03 +0200 Subject: mlxsw: spectrum_router: Return void from deletion functions There is no point in returning a value from function whose return value is never checked. Even if the return value was checked, there wouldn't be anything to do about it, as these functions are either called from error or deletion paths. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 93b6da88e79c..220e7e742aa4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -505,15 +505,15 @@ static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); } -static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_lpm_tree *lpm_tree) +static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) { char ralta_pl[MLXSW_REG_RALTA_LEN]; mlxsw_reg_ralta_pack(ralta_pl, false, (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto, lpm_tree->id); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl); } static int @@ -569,10 +569,10 @@ err_left_struct_set: return ERR_PTR(err); } -static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_lpm_tree *lpm_tree) +static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) { - return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); + mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree); } static struct mlxsw_sp_lpm_tree * @@ -601,12 +601,11 @@ inc_ref_count: return lpm_tree; } -static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_lpm_tree *lpm_tree) +static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_lpm_tree *lpm_tree) { if (--lpm_tree->ref_count == 0) - return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); - return 0; + mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree); } #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */ -- cgit v1.2.3-55-g7522 From 0adb214ba2c04489eb13bdbd145c5bf77d78b078 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 14 Aug 2017 10:54:04 +0200 Subject: mlxsw: spectrum_router: Pass argument explicitly Instead of relying on the LPM tree to be assigned to the virtual router before binding the two, lets pass it explicitly. This will later allow us to return upon binding error instead of having to perform a rollback of the assignment. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 220e7e742aa4..3c204d2144fa 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -659,13 +659,13 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp) } static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp, - const struct mlxsw_sp_fib *fib) + const struct mlxsw_sp_fib *fib, u8 tree_id) { char raltb_pl[MLXSW_REG_RALTB_LEN]; mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id, (enum mlxsw_reg_ralxx_protocol) fib->proto, - fib->lpm_tree->id); + tree_id); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl); } @@ -777,7 +777,7 @@ mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib, /* Prevent packet loss by overwriting existing binding */ fib->lpm_tree = new_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); if (err) goto err_tree_bind; mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); @@ -2631,7 +2631,7 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(lpm_tree)) return PTR_ERR(lpm_tree); fib->lpm_tree = lpm_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib); + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id); if (err) goto err_tree_bind; } -- cgit v1.2.3-55-g7522 From fc922bb0dd9406dd9897fd47df958789891c380e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Mon, 14 Aug 2017 10:54:05 +0200 Subject: mlxsw: spectrum_router: Use one LPM tree for all virtual routers The number of LPM trees available for lookup is much smaller than the number of virtual routers, which are used to implement VRFs. In addition, an LPM tree can only be used by one protocol - either IPv4 or IPv6. Therefore, in order to increase the number of supported virtual routers to the maximum we need to be able to share LPM trees across virtual routers instead of trying to find an optimized tree for each. Do that by allocating one LPM tree for each protocol, but make sure it will only include prefixes that are actually used, so as to not perform unnecessary lookups. Since changing the structure of a bound tree isn't recommended, whenever a new tree it required, it's first created and then bound to each virtual router, replacing the old one. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 259 +++++++++++++-------- 1 file changed, 165 insertions(+), 94 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3c204d2144fa..3d9be36965f6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -320,19 +320,6 @@ struct mlxsw_sp_prefix_usage { #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT) -static bool -mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1, - struct mlxsw_sp_prefix_usage *prefix_usage2) -{ - unsigned char prefix; - - mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) { - if (!test_bit(prefix, prefix_usage2->b)) - return false; - } - return true; -} - static bool mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1, struct mlxsw_sp_prefix_usage *prefix_usage2) @@ -589,16 +576,14 @@ mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp, lpm_tree->proto == proto && mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage, prefix_usage)) - goto inc_ref_count; + return lpm_tree; } - lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, - proto); - if (IS_ERR(lpm_tree)) - return lpm_tree; + return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto); +} -inc_ref_count: +static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree) +{ lpm_tree->ref_count++; - return lpm_tree; } static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, @@ -750,46 +735,6 @@ static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr) vr->fib4 = NULL; } -static int -mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib, - struct mlxsw_sp_prefix_usage *req_prefix_usage) -{ - struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree; - struct mlxsw_sp_lpm_tree *new_tree; - int err; - - if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage)) - return 0; - - new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage, - fib->proto); - if (IS_ERR(new_tree)) { - /* We failed to get a tree according to the required - * prefix usage. However, the current tree might be still good - * for us if our requirement is subset of the prefixes used - * in the tree. - */ - if (mlxsw_sp_prefix_usage_subset(req_prefix_usage, - &lpm_tree->prefix_usage)) - return 0; - return PTR_ERR(new_tree); - } - - /* Prevent packet loss by overwriting existing binding */ - fib->lpm_tree = new_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); - if (err) - goto err_tree_bind; - mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); - - return 0; - -err_tree_bind: - fib->lpm_tree = lpm_tree; - mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree); - return err; -} - static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id) { struct mlxsw_sp_vr *vr; @@ -808,6 +753,100 @@ static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr) mlxsw_sp_vr_destroy(vr); } +static bool +mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr, + enum mlxsw_sp_l3proto proto, u8 tree_id) +{ + struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); + + if (!mlxsw_sp_vr_is_used(vr)) + return false; + if (fib->lpm_tree && fib->lpm_tree->id == tree_id) + return true; + return false; +} + +static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib, + struct mlxsw_sp_lpm_tree *new_tree) +{ + struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; + int err; + + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); + if (err) + return err; + fib->lpm_tree = new_tree; + mlxsw_sp_lpm_tree_hold(new_tree); + mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree); + return 0; +} + +static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib, + struct mlxsw_sp_lpm_tree *new_tree) +{ + struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree; + enum mlxsw_sp_l3proto proto = fib->proto; + u8 old_id, new_id = new_tree->id; + struct mlxsw_sp_vr *vr; + int i, err; + + if (!old_tree) + goto no_replace; + old_id = old_tree->id; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { + vr = &mlxsw_sp->router->vrs[i]; + if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id)) + continue; + err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, + mlxsw_sp_vr_fib(vr, proto), + new_tree); + if (err) + goto err_tree_replace; + } + + return 0; + +err_tree_replace: + for (i--; i >= 0; i--) { + if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id)) + continue; + mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp, + mlxsw_sp_vr_fib(vr, proto), + old_tree); + } + return err; + +no_replace: + err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id); + if (err) + return err; + fib->lpm_tree = new_tree; + mlxsw_sp_lpm_tree_hold(new_tree); + return 0; +} + +static void +mlxsw_sp_vrs_prefixes(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_sp_l3proto proto, + struct mlxsw_sp_prefix_usage *req_prefix_usage) +{ + int i; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) { + struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i]; + struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto); + unsigned char prefix; + + if (!mlxsw_sp_vr_is_used(vr)) + continue; + mlxsw_sp_prefix_usage_for_each(prefix, &fib->prefix_usage) + mlxsw_sp_prefix_usage_set(req_prefix_usage, prefix); + } +} + static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_vr *vr; @@ -2586,6 +2625,67 @@ mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node, struct mlxsw_sp_fib_entry, list) == fib_entry; } +static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib, + struct mlxsw_sp_fib_node *fib_node) +{ + struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; + struct mlxsw_sp_lpm_tree *lpm_tree; + int err; + + /* Since the tree is shared between all virtual routers we must + * make sure it contains all the required prefix lengths. This + * can be computed by either adding the new prefix length to the + * existing prefix usage of a bound tree, or by aggregating the + * prefix lengths across all virtual routers and adding the new + * one as well. + */ + if (fib->lpm_tree) + mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, + &fib->lpm_tree->prefix_usage); + else + mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage); + mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); + + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + fib->proto); + if (IS_ERR(lpm_tree)) + return PTR_ERR(lpm_tree); + + if (fib->lpm_tree && fib->lpm_tree->id == lpm_tree->id) + return 0; + + err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); + if (err) + return err; + + return 0; +} + +static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib *fib) +{ + struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; + struct mlxsw_sp_lpm_tree *lpm_tree; + + /* Aggregate prefix lengths across all virtual routers to make + * sure we only have used prefix lengths in the LPM tree. + */ + mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage); + lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, + fib->proto); + if (IS_ERR(lpm_tree)) + goto err_tree_get; + mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); + +err_tree_get: + if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) + return; + mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); + mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree); + fib->lpm_tree = NULL; +} + static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node) { unsigned char prefix_len = fib_node->key.prefix_len; @@ -2608,8 +2708,6 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node, struct mlxsw_sp_fib *fib) { - struct mlxsw_sp_prefix_usage req_prefix_usage; - struct mlxsw_sp_lpm_tree *lpm_tree; int err; err = mlxsw_sp_fib_node_insert(fib, fib_node); @@ -2617,33 +2715,15 @@ static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp, return err; fib_node->fib = fib; - mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage); - mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len); - - if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { - err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, - &req_prefix_usage); - if (err) - goto err_tree_check; - } else { - lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, - fib->proto); - if (IS_ERR(lpm_tree)) - return PTR_ERR(lpm_tree); - fib->lpm_tree = lpm_tree; - err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id); - if (err) - goto err_tree_bind; - } + err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib, fib_node); + if (err) + goto err_fib_lpm_tree_link; mlxsw_sp_fib_node_prefix_inc(fib_node); return 0; -err_tree_bind: - fib->lpm_tree = NULL; - mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); -err_tree_check: +err_fib_lpm_tree_link: fib_node->fib = NULL; mlxsw_sp_fib_node_remove(fib, fib_node); return err; @@ -2652,19 +2732,10 @@ err_tree_check: static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_node *fib_node) { - struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree; struct mlxsw_sp_fib *fib = fib_node->fib; mlxsw_sp_fib_node_prefix_dec(fib_node); - - if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) { - mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); - fib->lpm_tree = NULL; - mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree); - } else { - mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage); - } - + mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib); fib_node->fib = NULL; mlxsw_sp_fib_node_remove(fib, fib_node); } -- cgit v1.2.3-55-g7522 From 54161ed4eedeef9442a0a7d3b1a15ad44f8f9831 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Mon, 14 Aug 2017 15:43:00 +0200 Subject: net: phy: Use tab for indentation in Kconfig Using tabs instead of space for indentation. Signed-off-by: Michal Simek Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 8c919203291a..5afe6fdcc968 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -5,7 +5,7 @@ menuconfig MDIO_DEVICE tristate "MDIO bus device drivers" help - MDIO devices and driver infrastructure code. + MDIO devices and driver infrastructure code. config MDIO_BUS tristate @@ -117,11 +117,11 @@ config MDIO_I2C This is library mode. config MDIO_MOXART - tristate "MOXA ART MDIO interface support" - depends on ARCH_MOXART - help - This driver supports the MDIO interface found in the network - interface units of the MOXA ART SoC + tristate "MOXA ART MDIO interface support" + depends on ARCH_MOXART + help + This driver supports the MDIO interface found in the network + interface units of the MOXA ART SoC config MDIO_OCTEON tristate "Octeon and some ThunderX SOCs MDIO buses" @@ -192,7 +192,7 @@ config LED_TRIGGER_PHY state change will trigger the events, for consumption by an LED class driver. There are triggers for each link speed currently supported by the phy, and are of the form: - :: + :: Where speed is in the form: Mbps or Gbps @@ -211,9 +211,9 @@ config AMD_PHY Currently supports the am79c874 config AQUANTIA_PHY - tristate "Aquantia PHYs" - ---help--- - Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 + tristate "Aquantia PHYs" + ---help--- + Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 config AT803X_PHY tristate "AT803X PHYs" @@ -382,21 +382,21 @@ config STE10XP This is the driver for the STe100p and STe101p PHYs. config TERANETICS_PHY - tristate "Teranetics PHYs" - ---help--- - Currently supports the Teranetics TN2020 + tristate "Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 config VITESSE_PHY - tristate "Vitesse PHYs" - ---help--- - Currently supports the vsc8244 + tristate "Vitesse PHYs" + ---help--- + Currently supports the vsc8244 config XILINX_GMII2RGMII - tristate "Xilinx GMII2RGMII converter driver" - ---help--- - This driver support xilinx GMII to RGMII IP core it provides - the Reduced Gigabit Media Independent Interface(RGMII) between - Ethernet physical media devices and the Gigabit Ethernet controller. + tristate "Xilinx GMII2RGMII converter driver" + ---help--- + This driver support xilinx GMII to RGMII IP core it provides + the Reduced Gigabit Media Independent Interface(RGMII) between + Ethernet physical media devices and the Gigabit Ethernet controller. endif # PHYLIB -- cgit v1.2.3-55-g7522 From 8122e08b1d876b9af605db5ad351961c5ea276ed Mon Sep 17 00:00:00 2001 From: Ohad Oz Date: Mon, 14 Aug 2017 15:38:21 +0000 Subject: Allow Mellanox switch devices to be configured if only I2C bus is set Mellanox switches (mlxsw) supports I2C systems without PCI, in order to give the ability to the users to use such functionality, there is need to update Kconfig. Signed-off-by: Ohad Oz Acked-by: Leon Romanovsky Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index 84a200764111..09497419eab2 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_MELLANOX bool "Mellanox devices" default y - depends on PCI + depends on PCI || I2C ---help--- If you have a network (Ethernet) card belonging to this class, say Y. -- cgit v1.2.3-55-g7522 From a656d34a6e5acc4779c4cb8bad19f4ab040def45 Mon Sep 17 00:00:00 2001 From: Ohad Oz Date: Mon, 14 Aug 2017 15:38:22 +0000 Subject: Change Kconfig description This patch apply Mellanox network vendor which includes: - Mellanox card devices: ConnectX-4, ConnectX-5 and Connect-IB cards. - Mellanox switch device: SwitchX-2 Switch-IB, Spectrum. Therefore rephrasing help. Signed-off-by: Ohad Oz Acked-by: Leon Romanovsky Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig index 09497419eab2..872548cd9431 100644 --- a/drivers/net/ethernet/mellanox/Kconfig +++ b/drivers/net/ethernet/mellanox/Kconfig @@ -7,7 +7,8 @@ config NET_VENDOR_MELLANOX default y depends on PCI || I2C ---help--- - If you have a network (Ethernet) card belonging to this class, say Y. + If you have a network (Ethernet or RDMA) device belonging to this + class, say Y. Note that the answer to this question doesn't directly affect the kernel: saying N will just cause the configurator to skip all -- cgit v1.2.3-55-g7522 From 9834e586fa664781c22a970d254c60610bd9a1af Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Mon, 14 Aug 2017 22:39:59 +0200 Subject: Bluetooth: btusb: Add workaround for Broadcom devices without product id The GPD Pocket is shipping with a BCM2045 USB HCI with its vendor and product information set to 0000:0000 and also has its interface class set to 255 (Vendor Specific Class). Luckily it does advertise usable manufacturer and product strings. T: Bus=01 Lev=01 Prnt=01 Port=02 Cnt=02 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 2.00 Cls=ff(vend.) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0000 ProdID=0000 Rev= 1.12 S: Manufacturer=Broadcom Corp S: Product=BCM2045A0 S: SerialNumber=AC83F30677CB C:* #Ifs= 4 Cfg#= 1 Atr=80 MxPwr=100mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=ff(vend.) Sub=01 Prot=01 Driver=btusb E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none) E: Ad=84(I) Atr=02(Bulk) MxPS= 32 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS= 32 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 0 Cls=fe(app. ) Sub=01 Prot=01 Driver=(none) Reported-by: Christopher Williamson Signed-off-by: Hans de Goede Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- drivers/bluetooth/btusb.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e1124ba44154..e8d8a3f61f5b 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -66,6 +66,7 @@ static struct usb_driver btusb_driver; #define BTUSB_BCM2045 0x40000 #define BTUSB_IFNUM_2 0x80000 #define BTUSB_CW6622 0x100000 +#define BTUSB_BCM_NO_PRODID 0x200000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ @@ -170,6 +171,10 @@ static const struct usb_device_id btusb_table[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Broadcom devices with missing product id */ + { USB_DEVICE_AND_INTERFACE_INFO(0x0000, 0x0000, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM | BTUSB_BCM_NO_PRODID }, + /* Intel Bluetooth USB Bootloader (RAM module) */ { USB_DEVICE(0x8087, 0x0a5a), .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, @@ -2899,6 +2904,19 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info == BTUSB_IGNORE) return -ENODEV; + if (id->driver_info & BTUSB_BCM_NO_PRODID) { + struct usb_device *udev = interface_to_usbdev(intf); + + /* For the broken Broadcom devices that show 0000:0000 + * as USB vendor and product information, check that the + * manufacturer string identifies them as Broadcom based + * devices. + */ + if (!udev->manufacturer || + strcmp(udev->manufacturer, "Broadcom Corp")) + return -ENODEV; + } + if (id->driver_info & BTUSB_ATH3012) { struct usb_device *udev = interface_to_usbdev(intf); -- cgit v1.2.3-55-g7522 From e65a8ccb0d89d09b5c096b1ee706eae42c4a6671 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:31 -0700 Subject: liquidio: moved wait_for_pending_requests to octeon_network.h Moving common function wait_for_pending_requests to octeon_network.h Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../ethernet/cavium/liquidio/cn23xx_vf_device.h | 2 -- drivers/net/ethernet/cavium/liquidio/lio_main.c | 26 -------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 28 +--------------------- .../net/ethernet/cavium/liquidio/octeon_device.h | 2 ++ .../net/ethernet/cavium/liquidio/octeon_network.h | 26 ++++++++++++++++++++ 5 files changed, 29 insertions(+), 55 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h index 3f98c7334957..2d06097d3f61 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h @@ -36,8 +36,6 @@ struct octeon_cn23xx_vf { #define CN23XX_MAILBOX_MSGPARAM_SIZE 6 -#define MAX_VF_IP_OP_PENDING_PKT_COUNT 100 - void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct); int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 8ec0b6d978d2..73b3547bf142 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -275,32 +275,6 @@ static void force_io_queues_off(struct octeon_device *oct) } } -/** - * \brief wait for all pending requests to complete - * @param oct Pointer to Octeon device - * - * Called during shutdown sequence - */ -static int wait_for_pending_requests(struct octeon_device *oct) -{ - int i, pcount = 0; - - for (i = 0; i < 100; i++) { - pcount = - atomic_read(&oct->response_list - [OCTEON_ORDERED_SC_LIST].pending_req_count); - if (pcount) - schedule_timeout_uninterruptible(HZ / 10); - else - break; - } - - if (pcount) - return 1; - - return 0; -} - /** * \brief Cause device to go quiet so it can be safely removed/reset/etc * @param oct Pointer to Octeon device diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index c6f52f235647..17623ed1f0fa 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -123,7 +123,7 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) { struct octeon_device_priv *oct_priv = (struct octeon_device_priv *)oct->priv; - int retry = MAX_VF_IP_OP_PENDING_PKT_COUNT; + int retry = MAX_IO_PENDING_PKT_COUNT; int pkt_cnt = 0, pending_pkts; int i; @@ -147,32 +147,6 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct) return pkt_cnt; } -/** - * \brief wait for all pending requests to complete - * @param oct Pointer to Octeon device - * - * Called during shutdown sequence - */ -static int wait_for_pending_requests(struct octeon_device *oct) -{ - int i, pcount = 0; - - for (i = 0; i < MAX_VF_IP_OP_PENDING_PKT_COUNT; i++) { - pcount = atomic_read( - &oct->response_list[OCTEON_ORDERED_SC_LIST] - .pending_req_count); - if (pcount) - schedule_timeout_uninterruptible(HZ / 10); - else - break; - } - - if (pcount) - return 1; - - return 0; -} - /** * \brief Cause device to go quiet so it can be safely removed/reset/etc * @param oct Pointer to Octeon device diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h index 2c554729bfe4..894af199ddef 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -571,6 +571,8 @@ struct octeon_device { #define CHIP_CONF(oct, TYPE) \ (((struct octeon_ ## TYPE *)((oct)->chip))->conf) +#define MAX_IO_PENDING_PKT_COUNT 100 + /*------------------ Function Prototypes ----------------------*/ /** Initialize device list memory */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index ec8504b2942d..043f6e653731 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -448,4 +448,30 @@ static inline void ifstate_reset(struct lio *lio, int state_flag) atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); } +/** + * \brief wait for all pending requests to complete + * @param oct Pointer to Octeon device + * + * Called during shutdown sequence + */ +static inline int wait_for_pending_requests(struct octeon_device *oct) +{ + int i, pcount = 0; + + for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) { + pcount = atomic_read( + &oct->response_list[OCTEON_ORDERED_SC_LIST] + .pending_req_count); + if (pcount) + schedule_timeout_uninterruptible(HZ / 10); + else + break; + } + + if (pcount) + return 1; + + return 0; +} + #endif -- cgit v1.2.3-55-g7522 From 25d43f182d42b40ba750e2e0516b693e62f8f723 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:37 -0700 Subject: liquidio: moved update_txq_status to lio_core.c Moving common update_txq_status to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 33 ++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 35 +--------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 26 +--------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 1 + 4 files changed, 36 insertions(+), 59 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index adde7745d069..b55ab75a7d9c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -364,3 +364,36 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev) destroy_workqueue(lio->rxq_status_wq.wq); } } + +/* Runs in interrupt context. */ +void lio_update_txq_status(struct octeon_device *oct, int iq_num) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; + struct net_device *netdev; + struct lio *lio; + + netdev = oct->props[iq->ifidx].netdev; + + /* This is needed because the first IQ does not have + * a netdev associated with it. + */ + if (!netdev) + return; + + lio = GET_LIO(netdev); + if (netif_is_multiqueue(netdev)) { + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + netif_wake_subqueue(netdev, iq->q_index); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, + tx_restart, 1); + } + } else if (netif_queue_stopped(netdev) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, lio->txq))) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq, + tx_restart, 1); + netif_wake_queue(netdev); + } +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 73b3547bf142..01c6985cff6d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -906,39 +906,6 @@ static inline void update_link_status(struct net_device *netdev, } } -/* Runs in interrupt context. */ -static void update_txq_status(struct octeon_device *oct, int iq_num) -{ - struct net_device *netdev; - struct lio *lio; - struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; - - netdev = oct->props[iq->ifidx].netdev; - - /* This is needed because the first IQ does not have - * a netdev associated with it. - */ - if (!netdev) - return; - - lio = GET_LIO(netdev); - if (netif_is_multiqueue(netdev)) { - if (__netif_subqueue_stopped(netdev, iq->q_index) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, iq_num))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, - tx_restart, 1); - netif_wake_subqueue(netdev, iq->q_index); - } - } else if (netif_queue_stopped(netdev) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, lio->txq))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, - lio->txq, tx_restart, 1); - netif_wake_queue(netdev); - } -} - static int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) { @@ -2518,7 +2485,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ - update_txq_status(oct, iq_no); + lio_update_txq_status(oct, iq_no); } else { dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", __func__, iq_no); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 17623ed1f0fa..dd0265a52521 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -647,30 +647,6 @@ static void update_link_status(struct net_device *netdev, } } -static void update_txq_status(struct octeon_device *oct, int iq_num) -{ - struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; - struct net_device *netdev; - struct lio *lio; - - netdev = oct->props[iq->ifidx].netdev; - lio = GET_LIO(netdev); - if (netif_is_multiqueue(netdev)) { - if (__netif_subqueue_stopped(netdev, iq->q_index) && - lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, iq_num))) { - netif_wake_subqueue(netdev, iq->q_index); - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, - tx_restart, 1); - } - } else if (netif_queue_stopped(netdev) && lio->linfo.link.s.link_up && - (!octnet_iq_is_full(oct, lio->txq))) { - INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, - lio->txq, tx_restart, 1); - netif_wake_queue(netdev); - } -} - static int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) { @@ -1608,7 +1584,7 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) /* Update iq read-index rather than waiting for next interrupt. * Return back if tx_done is false. */ - update_txq_status(oct, iq_no); + lio_update_txq_status(oct, iq_no); } else { dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", __func__, iq_no); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 043f6e653731..0e44ee1d4cb2 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -474,4 +474,5 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) return 0; } +void lio_update_txq_status(struct octeon_device *oct, int iq_num); #endif -- cgit v1.2.3-55-g7522 From 69f9c60e832575fe73cea0387c2d6a079f0fc96c Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:41 -0700 Subject: liquidio: moved octeon_setup_droq to lio_core.c Moving common octeon_setup_droq to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 35 ++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 37 ---------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 35 -------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 2 ++ 4 files changed, 37 insertions(+), 72 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index b55ab75a7d9c..90583ce8642d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -397,3 +397,38 @@ void lio_update_txq_status(struct octeon_device *oct, int iq_num) netif_wake_queue(netdev); } } + +/** + * \brief Setup output queue + * @param oct octeon device + * @param q_no which queue + * @param num_descs how many descriptors + * @param desc_size size of each descriptor + * @param app_ctx application context + */ +int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, + int desc_size, void *app_ctx) +{ + int ret_val; + + dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); + /* droq creation and local register settings. */ + ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); + if (ret_val < 0) + return ret_val; + + if (ret_val == 1) { + dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); + return 0; + } + + /* Enable the droq queues */ + octeon_set_droq_pkt_op(oct, q_no, 1); + + /* Send Credit for Octeon Output queues. Credits are always + * sent after the output queue is enabled. + */ + writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); + + return ret_val; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 01c6985cff6d..02e71184fa4c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2198,43 +2198,6 @@ static int load_firmware(struct octeon_device *oct) return ret; } -/** - * \brief Setup output queue - * @param oct octeon device - * @param q_no which queue - * @param num_descs how many descriptors - * @param desc_size size of each descriptor - * @param app_ctx application context - */ -static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, - int desc_size, void *app_ctx) -{ - int ret_val = 0; - - dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); - /* droq creation and local register settings. */ - ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); - if (ret_val < 0) - return ret_val; - - if (ret_val == 1) { - dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); - return 0; - } - /* tasklet creation for the droq */ - - /* Enable the droq queues */ - octeon_set_droq_pkt_op(oct, q_no, 1); - - /* Send Credit for Octeon Output queues. Credits are always - * sent after the output queue is enabled. - */ - writel(oct->droq[q_no]->max_count, - oct->droq[q_no]->pkts_credit_reg); - - return ret_val; -} - /** * \brief Callback for getting interface configuration * @param status status of request diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index dd0265a52521..a6efd75c30bb 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1344,41 +1344,6 @@ static void free_netsgbuf_with_resp(void *buf) check_txq_state(lio, skb); } -/** - * \brief Setup output queue - * @param oct octeon device - * @param q_no which queue - * @param num_descs how many descriptors - * @param desc_size size of each descriptor - * @param app_ctx application context - */ -static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, - int desc_size, void *app_ctx) -{ - int ret_val; - - dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); - /* droq creation and local register settings. */ - ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); - if (ret_val < 0) - return ret_val; - - if (ret_val == 1) { - dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); - return 0; - } - - /* Enable the droq queues */ - octeon_set_droq_pkt_op(oct, q_no, 1); - - /* Send Credit for Octeon Output queues. Credits are always - * sent after the output queue is enabled. - */ - writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); - - return ret_val; -} - /** * \brief Callback for getting interface configuration * @param status status of request diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 0e44ee1d4cb2..b2bb34dbc900 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -475,4 +475,6 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) } void lio_update_txq_status(struct octeon_device *oct, int iq_num); +int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, + int desc_size, void *app_ctx); #endif -- cgit v1.2.3-55-g7522 From 21f0888b5a8a1d42630f2c7bb602a7a348714dd3 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:44 -0700 Subject: liquidio: moved liquidio_push_packet to lio_core.c Moving common liquidio_push_packet to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 149 +++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 147 -------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 128 ------------------ .../net/ethernet/cavium/liquidio/octeon_network.h | 7 + 4 files changed, 156 insertions(+), 275 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 90583ce8642d..b0b246e654cd 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -432,3 +432,152 @@ int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, return ret_val; } + +/** Routine to push packets arriving on Octeon interface upto network layer. + * @param oct_id - octeon device id. + * @param skbuff - skbuff struct to be passed to network layer. + * @param len - size of total data received. + * @param rh - Control header associated with the packet + * @param param - additional control data with the packet + * @param arg - farg registered in droq_ops + */ +void +liquidio_push_packet(u32 octeon_id __attribute__((unused)), + void *skbuff, + u32 len, + union octeon_rh *rh, + void *param, + void *arg) +{ + struct net_device *netdev = (struct net_device *)arg; + struct octeon_droq *droq = + container_of(param, struct octeon_droq, napi); + struct sk_buff *skb = (struct sk_buff *)skbuff; + struct skb_shared_hwtstamps *shhwtstamps; + struct napi_struct *napi = param; + u16 vtag = 0; + u32 r_dh_off; + u64 ns; + + if (netdev) { + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int packet_was_received; + + /* Do not proceed if the interface is not in RUNNING state. */ + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { + recv_buffer_free(skb); + droq->stats.rx_dropped++; + return; + } + + skb->dev = netdev; + + skb_record_rx_queue(skb, droq->q_no); + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + /* For Paged allocation use the frags */ + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } + + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + + if (oct->ptp_enable) { + if (rh->r_dh.has_hwtstamp) { + /* timestamp is included from the hardware at + * the beginning of the packet. + */ + if (ifstate_check + (lio, + LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { + /* Nanoseconds are in the first 64-bits + * of the packet. + */ + memcpy(&ns, (skb->data + r_dh_off), + sizeof(ns)); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(ns + + lio->ptp_adjust); + } + } + } + + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); + + if ((netdev->features & NETIF_F_RXCSUM) && + (((rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || + (!(rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) + /* checksum has already been verified */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + /* Setting Encapsulation field on basis of status received + * from the firmware + */ + if (rh->r_dh.encap_on) { + skb->encapsulation = 1; + skb->csum_level = 1; + droq->stats.rx_vxlan++; + } + + /* inbound VLAN tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + rh->r_dh.vlan) { + u16 priority = rh->r_dh.priority; + u16 vid = rh->r_dh.vlan; + + vtag = (priority << VLAN_PRIO_SHIFT) | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + + packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); + + if (packet_was_received) { + droq->stats.rx_bytes_received += len; + droq->stats.rx_pkts_received++; + } else { + droq->stats.rx_dropped++; + netif_info(lio, rx_err, lio->netdev, + "droq:%d error rx_dropped:%llu\n", + droq->q_no, droq->stats.rx_dropped); + } + + } else { + recv_buffer_free(skb); + } +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 02e71184fa4c..48a178ea30a6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2231,153 +2231,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** Routine to push packets arriving on Octeon interface upto network layer. - * @param oct_id - octeon device id. - * @param skbuff - skbuff struct to be passed to network layer. - * @param len - size of total data received. - * @param rh - Control header associated with the packet - * @param param - additional control data with the packet - * @param arg - farg registered in droq_ops - */ -static void -liquidio_push_packet(u32 octeon_id __attribute__((unused)), - void *skbuff, - u32 len, - union octeon_rh *rh, - void *param, - void *arg) -{ - struct napi_struct *napi = param; - struct sk_buff *skb = (struct sk_buff *)skbuff; - struct skb_shared_hwtstamps *shhwtstamps; - u64 ns; - u16 vtag = 0; - u32 r_dh_off; - struct net_device *netdev = (struct net_device *)arg; - struct octeon_droq *droq = container_of(param, struct octeon_droq, - napi); - if (netdev) { - int packet_was_received; - struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct = lio->oct_dev; - - /* Do not proceed if the interface is not in RUNNING state. */ - if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { - recv_buffer_free(skb); - droq->stats.rx_dropped++; - return; - } - - skb->dev = netdev; - - skb_record_rx_queue(skb, droq->q_no); - if (likely(len > MIN_SKB_SIZE)) { - struct octeon_skb_page_info *pg_info; - unsigned char *va; - - pg_info = ((struct octeon_skb_page_info *)(skb->cb)); - if (pg_info->page) { - /* For Paged allocation use the frags */ - va = page_address(pg_info->page) + - pg_info->page_offset; - memcpy(skb->data, va, MIN_SKB_SIZE); - skb_put(skb, MIN_SKB_SIZE); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + - MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); - } - } else { - struct octeon_skb_page_info *pg_info = - ((struct octeon_skb_page_info *)(skb->cb)); - skb_copy_to_linear_data(skb, page_address(pg_info->page) - + pg_info->page_offset, len); - skb_put(skb, len); - put_page(pg_info->page); - } - - r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; - - if (oct->ptp_enable) { - if (rh->r_dh.has_hwtstamp) { - /* timestamp is included from the hardware at - * the beginning of the packet. - */ - if (ifstate_check - (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { - /* Nanoseconds are in the first 64-bits - * of the packet. - */ - memcpy(&ns, (skb->data + r_dh_off), - sizeof(ns)); - r_dh_off -= BYTES_PER_DHLEN_UNIT; - shhwtstamps = skb_hwtstamps(skb); - shhwtstamps->hwtstamp = - ns_to_ktime(ns + - lio->ptp_adjust); - } - } - } - - if (rh->r_dh.has_hash) { - __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); - u32 hash = be32_to_cpu(*hash_be); - - skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); - r_dh_off -= BYTES_PER_DHLEN_UNIT; - } - - skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); - - skb->protocol = eth_type_trans(skb, skb->dev); - if ((netdev->features & NETIF_F_RXCSUM) && - (((rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || - (!(rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) - /* checksum has already been verified */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; - - /* Setting Encapsulation field on basis of status received - * from the firmware - */ - if (rh->r_dh.encap_on) { - skb->encapsulation = 1; - skb->csum_level = 1; - droq->stats.rx_vxlan++; - } - - /* inbound VLAN tag */ - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - (rh->r_dh.vlan != 0)) { - u16 vid = rh->r_dh.vlan; - u16 priority = rh->r_dh.priority; - - vtag = priority << 13 | vid; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); - } - - packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; - - if (packet_was_received) { - droq->stats.rx_bytes_received += len; - droq->stats.rx_pkts_received++; - } else { - droq->stats.rx_dropped++; - netif_info(lio, rx_err, lio->netdev, - "droq:%d error rx_dropped:%llu\n", - droq->q_no, droq->stats.rx_dropped); - } - - } else { - recv_buffer_free(skb); - } -} - /** * \brief wrapper for calling napi_schedule * @param param parameters to pass to napi_schedule diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index a6efd75c30bb..013a8613d4de 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1376,134 +1376,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** Routine to push packets arriving on Octeon interface upto network layer. - * @param oct_id - octeon device id. - * @param skbuff - skbuff struct to be passed to network layer. - * @param len - size of total data received. - * @param rh - Control header associated with the packet - * @param param - additional control data with the packet - * @param arg - farg registered in droq_ops - */ -static void -liquidio_push_packet(u32 octeon_id __attribute__((unused)), - void *skbuff, - u32 len, - union octeon_rh *rh, - void *param, - void *arg) -{ - struct napi_struct *napi = param; - struct octeon_droq *droq = - container_of(param, struct octeon_droq, napi); - struct net_device *netdev = (struct net_device *)arg; - struct sk_buff *skb = (struct sk_buff *)skbuff; - u16 vtag = 0; - u32 r_dh_off; - - if (netdev) { - struct lio *lio = GET_LIO(netdev); - int packet_was_received; - - /* Do not proceed if the interface is not in RUNNING state. */ - if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { - recv_buffer_free(skb); - droq->stats.rx_dropped++; - return; - } - - skb->dev = netdev; - - skb_record_rx_queue(skb, droq->q_no); - if (likely(len > MIN_SKB_SIZE)) { - struct octeon_skb_page_info *pg_info; - unsigned char *va; - - pg_info = ((struct octeon_skb_page_info *)(skb->cb)); - if (pg_info->page) { - /* For Paged allocation use the frags */ - va = page_address(pg_info->page) + - pg_info->page_offset; - memcpy(skb->data, va, MIN_SKB_SIZE); - skb_put(skb, MIN_SKB_SIZE); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - pg_info->page, - pg_info->page_offset + - MIN_SKB_SIZE, - len - MIN_SKB_SIZE, - LIO_RXBUFFER_SZ); - } - } else { - struct octeon_skb_page_info *pg_info = - ((struct octeon_skb_page_info *)(skb->cb)); - skb_copy_to_linear_data(skb, - page_address(pg_info->page) + - pg_info->page_offset, len); - skb_put(skb, len); - put_page(pg_info->page); - } - - r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; - - if (rh->r_dh.has_hwtstamp) - r_dh_off -= BYTES_PER_DHLEN_UNIT; - - if (rh->r_dh.has_hash) { - __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); - u32 hash = be32_to_cpu(*hash_be); - - skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); - r_dh_off -= BYTES_PER_DHLEN_UNIT; - } - - skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); - skb->protocol = eth_type_trans(skb, skb->dev); - - if ((netdev->features & NETIF_F_RXCSUM) && - (((rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || - (!(rh->r_dh.encap_on) && - (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED)))) - /* checksum has already been verified */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - else - skb->ip_summed = CHECKSUM_NONE; - - /* Setting Encapsulation field on basis of status received - * from the firmware - */ - if (rh->r_dh.encap_on) { - skb->encapsulation = 1; - skb->csum_level = 1; - droq->stats.rx_vxlan++; - } - - /* inbound VLAN tag */ - if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && - rh->r_dh.vlan) { - u16 priority = rh->r_dh.priority; - u16 vid = rh->r_dh.vlan; - - vtag = (priority << VLAN_PRIO_SHIFT) | vid; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); - } - - packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP); - - if (packet_was_received) { - droq->stats.rx_bytes_received += len; - droq->stats.rx_pkts_received++; - } else { - droq->stats.rx_dropped++; - netif_info(lio, rx_err, lio->netdev, - "droq:%d error rx_dropped:%llu\n", - droq->q_no, droq->stats.rx_dropped); - } - - } else { - recv_buffer_free(skb); - } -} - /** * \brief callback when receive interrupt occurs and we are in NAPI mode * @param arg pointer to octeon output queue diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index b2bb34dbc900..5d78fd6ad3cd 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -477,4 +477,11 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) void lio_update_txq_status(struct octeon_device *oct, int iq_num); int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, int desc_size, void *app_ctx); +void +liquidio_push_packet(u32 octeon_id __attribute__((unused)), + void *skbuff, + u32 len, + union octeon_rh *rh, + void *param, + void *arg); #endif -- cgit v1.2.3-55-g7522 From d1d97ee6e3a8e337883f203e90b507196e64e5fb Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:48 -0700 Subject: liquidio: moved liquidio_napi_drv_callback to lio_core.c Moving common liquidio_napi_drv_callback to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 39 ++++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 38 --------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 13 +------- .../net/ethernet/cavium/liquidio/octeon_network.h | 1 + 4 files changed, 41 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index b0b246e654cd..8cba9278c274 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -581,3 +581,42 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), recv_buffer_free(skb); } } + +/** + * \brief wrapper for calling napi_schedule + * @param param parameters to pass to napi_schedule + * + * Used when scheduling on different CPUs + */ +static void napi_schedule_wrapper(void *param) +{ + struct napi_struct *napi = param; + + napi_schedule(napi); +} + +/** + * \brief callback when receive interrupt occurs and we are in NAPI mode + * @param arg pointer to octeon output queue + */ +void liquidio_napi_drv_callback(void *arg) +{ + struct octeon_device *oct; + struct octeon_droq *droq = arg; + int this_cpu = smp_processor_id(); + + oct = droq->oct_dev; + + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) || + droq->cpu_id == this_cpu) { + napi_schedule_irqoff(&droq->napi); + } else { + struct call_single_data *csd = &droq->csd; + + csd->func = napi_schedule_wrapper; + csd->info = &droq->napi; + csd->flags = 0; + + smp_call_function_single_async(droq->cpu_id, csd); + } +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 48a178ea30a6..e72618f6b2eb 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2231,44 +2231,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief wrapper for calling napi_schedule - * @param param parameters to pass to napi_schedule - * - * Used when scheduling on different CPUs - */ -static void napi_schedule_wrapper(void *param) -{ - struct napi_struct *napi = param; - - napi_schedule(napi); -} - -/** - * \brief callback when receive interrupt occurs and we are in NAPI mode - * @param arg pointer to octeon output queue - */ -static void liquidio_napi_drv_callback(void *arg) -{ - struct octeon_device *oct; - struct octeon_droq *droq = arg; - int this_cpu = smp_processor_id(); - - oct = droq->oct_dev; - - if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) { - napi_schedule_irqoff(&droq->napi); - } else { - struct call_single_data *csd = &droq->csd; - - csd->func = napi_schedule_wrapper; - csd->info = &droq->napi; - csd->flags = 0; - - smp_call_function_single_async(droq->cpu_id, csd); - } -} - /** * \brief Entry point for NAPI polling * @param napi NAPI structure diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 013a8613d4de..2663bd60e5d2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1376,17 +1376,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief callback when receive interrupt occurs and we are in NAPI mode - * @param arg pointer to octeon output queue - */ -static void liquidio_vf_napi_drv_callback(void *arg) -{ - struct octeon_droq *droq = arg; - - napi_schedule_irqoff(&droq->napi); -} - /** * \brief Entry point for NAPI polling * @param napi NAPI structure @@ -1473,7 +1462,7 @@ static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) droq_ops.farg = netdev; droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_vf_napi_drv_callback; + droq_ops.napi_fn = liquidio_napi_drv_callback; cpu_id = 0; cpu_id_modulus = num_present_cpus(); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 5d78fd6ad3cd..076fdfcdd103 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -484,4 +484,5 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), union octeon_rh *rh, void *param, void *arg); +void liquidio_napi_drv_callback(void *arg); #endif -- cgit v1.2.3-55-g7522 From d314ac222829c4e5cf7c0f505f207cb8848e0b8f Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:50 -0700 Subject: liquidio: moved liquidio_napi_poll to lio_core.c Moving common liquidio_napi_poll to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 61 +++++++++++++++++++++- drivers/net/ethernet/cavium/liquidio/lio_main.c | 52 ------------------ drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 54 ------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 2 +- 4 files changed, 61 insertions(+), 108 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 8cba9278c274..2030c2531309 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -366,7 +366,7 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev) } /* Runs in interrupt context. */ -void lio_update_txq_status(struct octeon_device *oct, int iq_num) +static void lio_update_txq_status(struct octeon_device *oct, int iq_num) { struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; struct net_device *netdev; @@ -620,3 +620,62 @@ void liquidio_napi_drv_callback(void *arg) smp_call_function_single_async(droq->cpu_id, csd); } } + +/** + * \brief Entry point for NAPI polling + * @param napi NAPI structure + * @param budget maximum number of items to process + */ +int liquidio_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_instr_queue *iq; + struct octeon_device *oct; + struct octeon_droq *droq; + int tx_done = 0, iq_no; + int work_done; + + droq = container_of(napi, struct octeon_droq, napi); + oct = droq->oct_dev; + iq_no = droq->q_no; + + /* Handle Droq descriptors */ + work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, + POLL_EVENT_PROCESS_PKTS, + budget); + + /* Flush the instruction queue */ + iq = oct->instr_queue[iq_no]; + if (iq) { + /* TODO: move this check to inside octeon_flush_iq, + * once check_db_timeout is removed + */ + if (atomic_read(&iq->instr_pending)) + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, budget); + else + tx_done = 1; + /* Update iq read-index rather than waiting for next interrupt. + * Return back if tx_done is false. + */ + /* sub-queue status update */ + lio_update_txq_status(oct, iq_no); + } else { + dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", + __func__, iq_no); + } + +#define MAX_REG_CNT 2000000U + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if (((work_done < budget) && (tx_done)) || + (iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + tx_done = 1; + napi_complete_done(napi, work_done); + + octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, + POLL_EVENT_ENABLE_INTR, 0); + return 0; + } + + return (!tx_done) ? (budget) : (work_done); +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e72618f6b2eb..632c395a4f44 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2231,58 +2231,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Entry point for NAPI polling - * @param napi NAPI structure - * @param budget maximum number of items to process - */ -static int liquidio_napi_poll(struct napi_struct *napi, int budget) -{ - struct octeon_droq *droq; - int work_done; - int tx_done = 0, iq_no; - struct octeon_instr_queue *iq; - struct octeon_device *oct; - - droq = container_of(napi, struct octeon_droq, napi); - oct = droq->oct_dev; - iq_no = droq->q_no; - /* Handle Droq descriptors */ - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); - - /* Flush the instruction queue */ - iq = oct->instr_queue[iq_no]; - if (iq) { - if (atomic_read(&iq->instr_pending)) - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); - else - tx_done = 1; - /* Update iq read-index rather than waiting for next interrupt. - * Return back if tx_done is false. - */ - lio_update_txq_status(oct, iq_no); - } else { - dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", - __func__, iq_no); - } - - /* force enable interrupt if reg cnts are high to avoid wraparound */ - if ((work_done < budget && tx_done) || - (iq && iq->pkt_in_done >= MAX_REG_CNT) || - (droq->pkt_count >= MAX_REG_CNT)) { - tx_done = 1; - napi_complete_done(napi, work_done); - octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, - POLL_EVENT_ENABLE_INTR, 0); - return 0; - } - - return (!tx_done) ? (budget) : (work_done); -} - /** * \brief Setup input and output queues * @param octeon_dev octeon device diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 2663bd60e5d2..0bd0c30077c7 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1376,60 +1376,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Entry point for NAPI polling - * @param napi NAPI structure - * @param budget maximum number of items to process - */ -static int liquidio_napi_poll(struct napi_struct *napi, int budget) -{ - struct octeon_instr_queue *iq; - struct octeon_device *oct; - struct octeon_droq *droq; - int tx_done = 0, iq_no; - int work_done; - - droq = container_of(napi, struct octeon_droq, napi); - oct = droq->oct_dev; - iq_no = droq->q_no; - - /* Handle Droq descriptors */ - work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, - POLL_EVENT_PROCESS_PKTS, - budget); - - /* Flush the instruction queue */ - iq = oct->instr_queue[iq_no]; - if (iq) { - if (atomic_read(&iq->instr_pending)) - /* Process iq buffers with in the budget limits */ - tx_done = octeon_flush_iq(oct, iq, budget); - else - tx_done = 1; - - /* Update iq read-index rather than waiting for next interrupt. - * Return back if tx_done is false. - */ - lio_update_txq_status(oct, iq_no); - } else { - dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", - __func__, iq_no); - } - - /* force enable interrupt if reg cnts are high to avoid wraparound */ - if ((work_done < budget && tx_done) || - (iq && iq->pkt_in_done >= MAX_REG_CNT) || - (droq->pkt_count >= MAX_REG_CNT)) { - tx_done = 1; - napi_complete_done(napi, work_done); - octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, - POLL_EVENT_ENABLE_INTR, 0); - return 0; - } - - return (!tx_done) ? (budget) : (work_done); -} - /** * \brief Setup input and output queues * @param octeon_dev octeon device diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 076fdfcdd103..b6597ef56968 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -474,7 +474,6 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) return 0; } -void lio_update_txq_status(struct octeon_device *oct, int iq_num); int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, int desc_size, void *app_ctx); void @@ -485,4 +484,5 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)), void *param, void *arg); void liquidio_napi_drv_callback(void *arg); +int liquidio_napi_poll(struct napi_struct *napi, int budget); #endif -- cgit v1.2.3-55-g7522 From 8974de1b7dadfea29a4d2182af775cd73df02319 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:53 -0700 Subject: liquidio: moved liquidio_setup_io_queues to lio_core.c Moving common liquidio_setup_io_queues to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 119 ++++++++++++++++++++- drivers/net/ethernet/cavium/liquidio/lio_main.c | 109 +------------------ drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 93 +--------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 13 +-- 4 files changed, 118 insertions(+), 216 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 2030c2531309..d20d0eb45048 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -406,8 +406,8 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num) * @param desc_size size of each descriptor * @param app_ctx application context */ -int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, - int desc_size, void *app_ctx) +static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, + int desc_size, void *app_ctx) { int ret_val; @@ -441,7 +441,7 @@ int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, * @param param - additional control data with the packet * @param arg - farg registered in droq_ops */ -void +static void liquidio_push_packet(u32 octeon_id __attribute__((unused)), void *skbuff, u32 len, @@ -599,7 +599,7 @@ static void napi_schedule_wrapper(void *param) * \brief callback when receive interrupt occurs and we are in NAPI mode * @param arg pointer to octeon output queue */ -void liquidio_napi_drv_callback(void *arg) +static void liquidio_napi_drv_callback(void *arg) { struct octeon_device *oct; struct octeon_droq *droq = arg; @@ -626,7 +626,7 @@ void liquidio_napi_drv_callback(void *arg) * @param napi NAPI structure * @param budget maximum number of items to process */ -int liquidio_napi_poll(struct napi_struct *napi, int budget) +static int liquidio_napi_poll(struct napi_struct *napi, int budget) { struct octeon_instr_queue *iq; struct octeon_device *oct; @@ -679,3 +679,112 @@ int liquidio_napi_poll(struct napi_struct *napi, int budget) return (!tx_done) ? (budget) : (work_done); } + +/** + * \brief Setup input and output queues + * @param octeon_dev octeon device + * @param ifidx Interface index + * + * Note: Queues are with respect to the octeon device. Thus + * an input queue is for egress packets, and output queues + * are for ingress packets. + */ +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) +{ + struct octeon_droq_ops droq_ops; + struct net_device *netdev; + struct octeon_droq *droq; + struct napi_struct *napi; + int cpu_id_modulus; + int num_tx_descs; + struct lio *lio; + int retval = 0; + int q, q_no; + int cpu_id; + + netdev = octeon_dev->props[ifidx].netdev; + + lio = GET_LIO(netdev); + + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + droq_ops.farg = netdev; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); + + /* set up DROQs. */ + for (q = 0; q < lio->linfo.num_rxpciq; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + dev_dbg(&octeon_dev->pci_dev->dev, + "%s index:%d linfo.rxpciq.s.q_no:%d\n", + __func__, q, q_no); + retval = octeon_setup_droq( + octeon_dev, q_no, + CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + NULL); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "%s : Runtime DROQ(RxQ) creation failed.\n", + __func__); + return 1; + } + + droq = octeon_dev->droq[q_no]; + napi = &droq->napi; + dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n", + (u64)netdev, (u64)octeon_dev); + netif_napi_add(netdev, napi, liquidio_napi_poll, 64); + + /* designate a CPU for this droq */ + droq->cpu_id = cpu_id; + cpu_id++; + if (cpu_id >= cpu_id_modulus) + cpu_id = 0; + + octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); + } + + if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) { + /* 23XX PF/VF can send/recv control messages (via the first + * PF/VF-owned droq) from the firmware even if the ethX + * interface is down, so that's why poll_mode must be off + * for the first droq. + */ + octeon_dev->droq[0]->ops.poll_mode = 0; + } + + /* set up IQs. */ + for (q = 0; q < lio->linfo.num_txpciq; q++) { + num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( + octeon_get_conf(octeon_dev), lio->ifidx); + retval = octeon_setup_iq(octeon_dev, ifidx, q, + lio->linfo.txpciq[q], num_tx_descs, + netdev_get_tx_queue(netdev, q)); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + " %s : Runtime IQ(TxQ) creation failed.\n", + __func__); + return 1; + } + + /* XPS */ + if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on && + octeon_dev->ioq_vector) { + struct octeon_ioq_vector *ioq_vector; + + ioq_vector = &octeon_dev->ioq_vector[q]; + netif_set_xps_queue(netdev, + &ioq_vector->affinity_mask, + ioq_vector->iq_index); + } + } + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 632c395a4f44..832db5abff48 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2231,113 +2231,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Setup input and output queues - * @param octeon_dev octeon device - * @param ifidx Interface Index - * - * Note: Queues are with respect to the octeon device. Thus - * an input queue is for egress packets, and output queues - * are for ingress packets. - */ -static inline int setup_io_queues(struct octeon_device *octeon_dev, - int ifidx) -{ - struct octeon_droq_ops droq_ops; - struct net_device *netdev; - int cpu_id; - int cpu_id_modulus; - struct octeon_droq *droq; - struct napi_struct *napi; - int q, q_no, retval = 0; - struct lio *lio; - int num_tx_descs; - - netdev = octeon_dev->props[ifidx].netdev; - - lio = GET_LIO(netdev); - - memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); - - droq_ops.fptr = liquidio_push_packet; - droq_ops.farg = (void *)netdev; - - droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_napi_drv_callback; - cpu_id = 0; - cpu_id_modulus = num_present_cpus(); - - /* set up DROQs. */ - for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q].s.q_no; - dev_dbg(&octeon_dev->pci_dev->dev, - "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n", - q, q_no); - retval = octeon_setup_droq(octeon_dev, q_no, - CFG_GET_NUM_RX_DESCS_NIC_IF - (octeon_get_conf(octeon_dev), - lio->ifidx), - CFG_GET_NUM_RX_BUF_SIZE_NIC_IF - (octeon_get_conf(octeon_dev), - lio->ifidx), NULL); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - "%s : Runtime DROQ(RxQ) creation failed.\n", - __func__); - return 1; - } - - droq = octeon_dev->droq[q_no]; - napi = &droq->napi; - dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n", - (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num); - netif_napi_add(netdev, napi, liquidio_napi_poll, 64); - - /* designate a CPU for this droq */ - droq->cpu_id = cpu_id; - cpu_id++; - if (cpu_id >= cpu_id_modulus) - cpu_id = 0; - - octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); - } - - if (OCTEON_CN23XX_PF(octeon_dev)) { - /* 23XX PF can receive control messages (via the first PF-owned - * droq) from the firmware even if the ethX interface is down, - * so that's why poll_mode must be off for the first droq. - */ - octeon_dev->droq[0]->ops.poll_mode = 0; - } - - /* set up IQs. */ - for (q = 0; q < lio->linfo.num_txpciq; q++) { - num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf - (octeon_dev), - lio->ifidx); - retval = octeon_setup_iq(octeon_dev, ifidx, q, - lio->linfo.txpciq[q], num_tx_descs, - netdev_get_tx_queue(netdev, q)); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - " %s : Runtime IQ(TxQ) creation failed.\n", - __func__); - return 1; - } - - if (octeon_dev->ioq_vector) { - struct octeon_ioq_vector *ioq_vector; - - ioq_vector = &octeon_dev->ioq_vector[q]; - netif_set_xps_queue(netdev, - &ioq_vector->affinity_mask, - ioq_vector->iq_index); - } - } - - return 0; -} - /** * \brief Poll routine for checking transmit queue status * @param work work_struct data structure @@ -3898,7 +3791,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) */ lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; - if (setup_io_queues(octeon_dev, i)) { + if (liquidio_setup_io_queues(octeon_dev, i)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 0bd0c30077c7..aa502a8d3fdb 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1376,97 +1376,6 @@ static void if_cfg_callback(struct octeon_device *oct, wake_up_interruptible(&ctx->wc); } -/** - * \brief Setup input and output queues - * @param octeon_dev octeon device - * @param ifidx Interface index - * - * Note: Queues are with respect to the octeon device. Thus - * an input queue is for egress packets, and output queues - * are for ingress packets. - */ -static int setup_io_queues(struct octeon_device *octeon_dev, int ifidx) -{ - struct octeon_droq_ops droq_ops; - struct net_device *netdev; - int cpu_id_modulus; - struct octeon_droq *droq; - struct napi_struct *napi; - int cpu_id; - int num_tx_descs; - struct lio *lio; - int retval = 0; - int q, q_no; - - netdev = octeon_dev->props[ifidx].netdev; - - lio = GET_LIO(netdev); - - memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); - - droq_ops.fptr = liquidio_push_packet; - droq_ops.farg = netdev; - - droq_ops.poll_mode = 1; - droq_ops.napi_fn = liquidio_napi_drv_callback; - cpu_id = 0; - cpu_id_modulus = num_present_cpus(); - - /* set up DROQs. */ - for (q = 0; q < lio->linfo.num_rxpciq; q++) { - q_no = lio->linfo.rxpciq[q].s.q_no; - - retval = octeon_setup_droq( - octeon_dev, q_no, - CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), - lio->ifidx), - CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), - lio->ifidx), - NULL); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - "%s : Runtime DROQ(RxQ) creation failed.\n", - __func__); - return 1; - } - - droq = octeon_dev->droq[q_no]; - napi = &droq->napi; - netif_napi_add(netdev, napi, liquidio_napi_poll, 64); - - /* designate a CPU for this droq */ - droq->cpu_id = cpu_id; - cpu_id++; - if (cpu_id >= cpu_id_modulus) - cpu_id = 0; - - octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); - } - - /* 23XX VF can send/recv control messages (via the first VF-owned - * droq) from the firmware even if the ethX interface is down, - * so that's why poll_mode must be off for the first droq. - */ - octeon_dev->droq[0]->ops.poll_mode = 0; - - /* set up IQs. */ - for (q = 0; q < lio->linfo.num_txpciq; q++) { - num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( - octeon_get_conf(octeon_dev), lio->ifidx); - retval = octeon_setup_iq(octeon_dev, ifidx, q, - lio->linfo.txpciq[q], num_tx_descs, - netdev_get_tx_queue(netdev, q)); - if (retval) { - dev_err(&octeon_dev->pci_dev->dev, - " %s : Runtime IQ(TxQ) creation failed.\n", - __func__); - return 1; - } - } - - return 0; -} - /** * \brief Net device open for LiquidIO * @param netdev network device @@ -2695,7 +2604,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Copy MAC Address to OS network device structure */ ether_addr_copy(netdev->dev_addr, mac); - if (setup_io_queues(octeon_dev, i)) { + if (liquidio_setup_io_queues(octeon_dev, i)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index b6597ef56968..b49b155ef523 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -166,6 +166,8 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev); */ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx); + /** * \brief Register ethtool operations * @param netdev pointer to network device @@ -474,15 +476,4 @@ static inline int wait_for_pending_requests(struct octeon_device *oct) return 0; } -int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, - int desc_size, void *app_ctx); -void -liquidio_push_packet(u32 octeon_id __attribute__((unused)), - void *skbuff, - u32 len, - union octeon_rh *rh, - void *param, - void *arg); -void liquidio_napi_drv_callback(void *arg); -int liquidio_napi_poll(struct napi_struct *napi, int budget); #endif -- cgit v1.2.3-55-g7522 From d18ca7df9f2a863cf67645609bb4a97b0b088116 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Mon, 14 Aug 2017 12:01:56 -0700 Subject: liquidio: added support for ethtool --set-ring feature added support for ethtool --set-ring feature Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 131 +++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 6 +- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 6 +- .../net/ethernet/cavium/liquidio/octeon_config.h | 13 +- .../net/ethernet/cavium/liquidio/octeon_device.c | 14 +-- .../net/ethernet/cavium/liquidio/octeon_network.h | 1 + 6 files changed, 160 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 4f65c08461f6..a59c8ccebd10 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -637,6 +637,9 @@ lio_ethtool_get_ringparam(struct net_device *netdev, u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, rx_pending = 0; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + if (OCTEON_CN6XXX(oct)) { struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); @@ -661,6 +664,126 @@ lio_ethtool_get_ringparam(struct net_device *netdev, ering->rx_jumbo_max_pending = 0; } +static int lio_reset_queues(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + int i; + + dev_dbg(&oct->pci_dev->dev, "%s:%d ifidx %d\n", + __func__, __LINE__, lio->ifidx); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + if (lio_wait_for_instr_fetch(oct)) + dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + + if (octeon_set_io_queues_off(oct)) { + dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); + return -1; + } + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon. + */ + oct->fn_list.disable_io_queues(oct); + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + octeon_delete_droq(oct, i); + } + + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + octeon_delete_instr_queue(oct, i); + } + + if (oct->fn_list.setup_device_regs(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); + return -1; + } + + if (liquidio_setup_io_queues(oct, 0)) { + dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); + return -1; + } + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues"); + return -1; + } + + return 0; +} + +static int lio_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + u32 rx_count, tx_count, rx_count_old, tx_count_old; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int stopped = 0; + + if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) + return -EINVAL; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, + CN23XX_MAX_OQ_DESCRIPTORS); + tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, + CN23XX_MAX_IQ_DESCRIPTORS); + + rx_count_old = oct->droq[0]->max_count; + tx_count_old = oct->instr_queue[0]->max_count; + + if (rx_count == rx_count_old && tx_count == tx_count_old) + return 0; + + ifstate_set(lio, LIO_IFSTATE_RESETTING); + + if (netif_running(netdev)) { + netdev->netdev_ops->ndo_stop(netdev); + stopped = 1; + } + + /* Change RX/TX DESCS count */ + if (tx_count != tx_count_old) + CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + tx_count); + if (rx_count != rx_count_old) + CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + rx_count); + + if (lio_reset_queues(netdev)) + goto err_lio_reset_queues; + + if (stopped) + netdev->netdev_ops->ndo_open(netdev); + + ifstate_reset(lio, LIO_IFSTATE_RESETTING); + + return 0; + +err_lio_reset_queues: + if (tx_count != tx_count_old) + CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + tx_count_old); + if (rx_count != rx_count_old) + CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + rx_count_old); + return -EINVAL; +} + static u32 lio_get_msglevel(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); @@ -779,6 +902,9 @@ lio_get_ethtool_stats(struct net_device *netdev, struct net_device_stats *netstats = &netdev->stats; int i = 0, j; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + netdev->netdev_ops->ndo_get_stats(netdev); octnet_get_link_stats(netdev); @@ -1043,6 +1169,9 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, struct octeon_device *oct_dev = lio->oct_dev; int i = 0, j, vj; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + netdev->netdev_ops->ndo_get_stats(netdev); /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ data[i++] = CVM_CAST64(netstats->rx_packets); @@ -2574,6 +2703,7 @@ static const struct ethtool_ops lio_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, + .set_ringparam = lio_ethtool_set_ringparam, .get_channels = lio_ethtool_get_channels, .set_phys_id = lio_set_phys_id, .get_eeprom_len = lio_get_eeprom_len, @@ -2599,6 +2729,7 @@ static const struct ethtool_ops lio_vf_ethtool_ops = { .get_link = ethtool_op_get_link, .get_drvinfo = lio_get_vf_drvinfo, .get_ringparam = lio_ethtool_get_ringparam, + .set_ringparam = lio_ethtool_set_ringparam, .get_channels = lio_ethtool_get_channels, .get_strings = lio_vf_get_strings, .get_ethtool_stats = lio_vf_get_ethtool_stats, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 832db5abff48..38b7ea591d04 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -817,7 +817,8 @@ static void print_link_info(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { + if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && + ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { struct oct_link_info *linfo = &lio->linfo; if (linfo->link.s.link_up) { @@ -2520,6 +2521,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return stats; + for (i = 0; i < lio->linfo.num_txpciq; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index aa502a8d3fdb..2fc2da3a8018 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -548,7 +548,8 @@ static void print_link_info(struct net_device *netdev) { struct lio *lio = GET_LIO(netdev); - if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { + if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && + ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { struct oct_link_info *linfo = &lio->linfo; if (linfo->link.s.link_up) { @@ -1633,6 +1634,9 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) oct = lio->oct_dev; + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return stats; + for (i = 0; i < lio->linfo.num_txpciq; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index f229d792c2b3..63bd9c94e547 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -71,13 +71,17 @@ #define CN23XX_MAX_RINGS_PER_VF 8 #define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF -#define CN23XX_MAX_IQ_DESCRIPTORS 512 +#define CN23XX_MAX_IQ_DESCRIPTORS 2048 +#define CN23XX_DEFAULT_IQ_DESCRIPTORS 512 +#define CN23XX_MIN_IQ_DESCRIPTORS 128 #define CN23XX_DB_MIN 1 #define CN23XX_DB_MAX 8 #define CN23XX_DB_TIMEOUT 1 #define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF -#define CN23XX_MAX_OQ_DESCRIPTORS 512 +#define CN23XX_MAX_OQ_DESCRIPTORS 2048 +#define CN23XX_DEFAULT_OQ_DESCRIPTORS 512 +#define CN23XX_MIN_OQ_DESCRIPTORS 128 #define CN23XX_OQ_BUF_SIZE 1664 #define CN23XX_OQ_PKTSPER_INTR 128 /*#define CAVIUM_ONLY_CN23XX_RX_PERF*/ @@ -163,6 +167,11 @@ ((cfg)->misc.oct_link_query_interval) #define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) +#define CFG_SET_NUM_RX_DESCS_NIC_IF(cfg, idx, value) \ + ((cfg)->nic_if_cfg[idx].num_rx_descs = value) +#define CFG_SET_NUM_TX_DESCS_NIC_IF(cfg, idx, value) \ + ((cfg)->nic_if_cfg[idx].num_tx_descs = value) + /* Max IOQs per OCTEON Link */ #define MAX_IOQS_PER_NICIF 64 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 495cc8880646..29d53b1763a7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -418,7 +418,7 @@ static struct octeon_config default_cn23xx_conf = { /** IQ attributes */ .iq = { .max_iqs = CN23XX_CFG_IO_QUEUES, - .pending_list_size = (CN23XX_MAX_IQ_DESCRIPTORS * + .pending_list_size = (CN23XX_DEFAULT_IQ_DESCRIPTORS * CN23XX_CFG_IO_QUEUES), .instr_type = OCTEON_64BYTE_INSTR, .db_min = CN23XX_DB_MIN, @@ -436,8 +436,8 @@ static struct octeon_config default_cn23xx_conf = { }, .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX, - .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, - .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, + .num_def_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, + .num_def_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, .def_rx_buf_size = CN23XX_OQ_BUF_SIZE, /* For ethernet interface 0: Port cfg Attributes */ @@ -455,10 +455,10 @@ static struct octeon_config default_cn23xx_conf = { .num_rxqs = DEF_RXQS_PER_INTF, /* Num of desc for rx rings */ - .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, + .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, /* Num of desc for tx rings */ - .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, + .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, /* SKB size, We need not change buf size even for Jumbo frames. * Octeon can send jumbo frames in 4 consecutive descriptors, @@ -484,10 +484,10 @@ static struct octeon_config default_cn23xx_conf = { .num_rxqs = DEF_RXQS_PER_INTF, /* Num of desc for rx rings */ - .num_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS, + .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, /* Num of desc for tx rings */ - .num_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS, + .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, /* SKB size, We need not change buf size even for Jumbo frames. * Octeon can send jumbo frames in 4 consecutive descriptors, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index b49b155ef523..d4b39305ad68 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -33,6 +33,7 @@ #define LIO_IFSTATE_REGISTERED 0x02 #define LIO_IFSTATE_RUNNING 0x04 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 +#define LIO_IFSTATE_RESETTING 0x10 struct oct_nic_stats_resp { u64 rh; -- cgit v1.2.3-55-g7522 From ba31d3666924adf1b4ad8e19400324a06b99c52c Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 14 Aug 2017 21:09:19 +0200 Subject: mlxsw: spectrum_router: Prepare nexthop group's hash table for IPv6 This patch does preparation before introducing IPv6 nexthop group consolidation. Currently the nexthop group hash table is used only by IPv4 and uses fixed key size. In order to support the IPv6's variable length key the current table is changed. Signed-off-by: Arkadi Sharshevsky Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 69 ++++++++++++++++------ 1 file changed, 52 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 3d9be36965f6..510042905f22 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1522,15 +1522,11 @@ struct mlxsw_sp_nexthop { struct mlxsw_sp_neigh_entry *neigh_entry; }; -struct mlxsw_sp_nexthop_group_key { - struct fib_info *fi; -}; - struct mlxsw_sp_nexthop_group { + void *priv; struct rhash_head ht_node; struct list_head fib_list; /* list of fib entries that use this group */ struct neigh_table *neigh_tbl; - struct mlxsw_sp_nexthop_group_key key; u8 adj_index_valid:1, gateway:1; /* routes using the group use a gateway */ u32 adj_index; @@ -1540,10 +1536,46 @@ struct mlxsw_sp_nexthop_group { #define nh_rif nexthops[0].rif }; +static struct fib_info * +mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp) +{ + return nh_grp->priv; +} + +struct mlxsw_sp_nexthop_group_cmp_arg { + struct fib_info *fi; +}; + +static int +mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr) +{ + const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key; + const struct mlxsw_sp_nexthop_group *nh_grp = ptr; + + return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp); +} + +static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) +{ + const struct mlxsw_sp_nexthop_group *nh_grp = data; + struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(nh_grp); + + return jhash(&fi, sizeof(fi), seed); +} + +static u32 +mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed) +{ + const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data; + + return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed); +} + static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { - .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key), .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node), - .key_len = sizeof(struct mlxsw_sp_nexthop_group_key), + .hashfn = mlxsw_sp_nexthop_group_hash, + .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj, + .obj_cmpfn = mlxsw_sp_nexthop_group_cmp, }; static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, @@ -1563,10 +1595,14 @@ static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, } static struct mlxsw_sp_nexthop_group * -mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp, - struct mlxsw_sp_nexthop_group_key key) +mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp, + struct fib_info *fi) { - return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key, + struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; + + cmp_arg.fi = fi; + return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, + &cmp_arg, mlxsw_sp_nexthop_group_ht_params); } @@ -2063,12 +2099,12 @@ mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) nh_grp = kzalloc(alloc_size, GFP_KERNEL); if (!nh_grp) return ERR_PTR(-ENOMEM); + nh_grp->priv = fi; INIT_LIST_HEAD(&nh_grp->fib_list); nh_grp->neigh_tbl = &arp_tbl; nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; nh_grp->count = fi->fib_nhs; - nh_grp->key.fi = fi; fib_info_hold(fi); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; @@ -2089,7 +2125,7 @@ err_nexthop4_init: nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop4_fini(mlxsw_sp, nh); } - fib_info_put(nh_grp->key.fi); + fib_info_put(fi); kfree(nh_grp); return ERR_PTR(err); } @@ -2108,7 +2144,7 @@ mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp, } mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); WARN_ON_ONCE(nh_grp->adj_index_valid); - fib_info_put(nh_grp->key.fi); + fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp)); kfree(nh_grp); } @@ -2116,11 +2152,9 @@ static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib_entry *fib_entry, struct fib_info *fi) { - struct mlxsw_sp_nexthop_group_key key; struct mlxsw_sp_nexthop_group *nh_grp; - key.fi = fi; - nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key); + nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi); if (!nh_grp) { nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi); if (IS_ERR(nh_grp)) @@ -2551,7 +2585,8 @@ mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp, if (fib4_entry->tb_id == fen_info->tb_id && fib4_entry->tos == fen_info->tos && fib4_entry->type == fen_info->type && - fib4_entry->common.nh_group->key.fi == fen_info->fi) { + mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) == + fen_info->fi) { return fib4_entry; } } -- cgit v1.2.3-55-g7522 From e6f3b379c0c599a870bc0e7d8a0cbb0b316502f5 Mon Sep 17 00:00:00 2001 From: Arkadi Sharshevsky Date: Mon, 14 Aug 2017 21:09:20 +0200 Subject: mlxsw: spectrum_router: Add support for nexthop group consolidation for IPv6 Due to limited ASIC resources the maximum number of routes is limited by the nexthop resource. In order to improve the routing scale nexthop consolidation should be performed. This patch adds support for IPv6 neighbor consolidation. The hash value is calculated based on the nexthop set, by performing bitwise xor on the ifindexs of the nexthops, in a similar way to IPv4's kernel implementation. In case of collision a full match is performed between the sets which include address and ifindex comparison. Non gateway nexthop groups are not inserted to the hash table due to lack of nexthop device (ifindex). Signed-off-by: Arkadi Sharshevsky Reviewed-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_router.c | 150 +++++++++++++++++++-- 1 file changed, 141 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 510042905f22..16676fffbf70 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1509,6 +1509,7 @@ struct mlxsw_sp_nexthop { struct rhash_head ht_node; struct mlxsw_sp_nexthop_key key; unsigned char gw_addr[sizeof(struct in6_addr)]; + int ifindex; struct mlxsw_sp_rif *rif; u8 should_offload:1, /* set indicates this neigh is connected and * should be put to KVD linear area of this group. @@ -1543,24 +1544,115 @@ mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp) } struct mlxsw_sp_nexthop_group_cmp_arg { - struct fib_info *fi; + enum mlxsw_sp_l3proto proto; + union { + struct fib_info *fi; + struct mlxsw_sp_fib6_entry *fib6_entry; + }; }; +static bool +mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp, + const struct in6_addr *gw, int ifindex) +{ + int i; + + for (i = 0; i < nh_grp->count; i++) { + const struct mlxsw_sp_nexthop *nh; + + nh = &nh_grp->nexthops[i]; + if (nh->ifindex == ifindex && + ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr)) + return true; + } + + return false; +} + +static bool +mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp, + const struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + + if (nh_grp->count != fib6_entry->nrt6) + return false; + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + struct in6_addr *gw; + int ifindex; + + ifindex = mlxsw_sp_rt6->rt->dst.dev->ifindex; + gw = &mlxsw_sp_rt6->rt->rt6i_gateway; + if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex)) + return false; + } + + return true; +} + static int mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr) { const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key; const struct mlxsw_sp_nexthop_group *nh_grp = ptr; - return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp); + switch (cmp_arg->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp); + case MLXSW_SP_L3_PROTO_IPV6: + return !mlxsw_sp_nexthop6_group_cmp(nh_grp, + cmp_arg->fib6_entry); + default: + WARN_ON(1); + return 1; + } +} + +static int +mlxsw_sp_nexthop_group_type(const struct mlxsw_sp_nexthop_group *nh_grp) +{ + return nh_grp->neigh_tbl->family; } static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed) { const struct mlxsw_sp_nexthop_group *nh_grp = data; - struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(nh_grp); + const struct mlxsw_sp_nexthop *nh; + struct fib_info *fi; + unsigned int val; + int i; + + switch (mlxsw_sp_nexthop_group_type(nh_grp)) { + case AF_INET: + fi = mlxsw_sp_nexthop4_group_fi(nh_grp); + return jhash(&fi, sizeof(fi), seed); + case AF_INET6: + val = nh_grp->count; + for (i = 0; i < nh_grp->count; i++) { + nh = &nh_grp->nexthops[i]; + val ^= nh->ifindex; + } + return jhash(&val, sizeof(val), seed); + default: + WARN_ON(1); + return 0; + } +} + +static u32 +mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed) +{ + unsigned int val = fib6_entry->nrt6; + struct mlxsw_sp_rt6 *mlxsw_sp_rt6; + struct net_device *dev; + + list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { + dev = mlxsw_sp_rt6->rt->dst.dev; + val ^= dev->ifindex; + } - return jhash(&fi, sizeof(fi), seed); + return jhash(&val, sizeof(val), seed); } static u32 @@ -1568,7 +1660,15 @@ mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed) { const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data; - return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed); + switch (cmp_arg->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed); + case MLXSW_SP_L3_PROTO_IPV6: + return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed); + default: + WARN_ON(1); + return 0; + } } static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { @@ -1581,6 +1681,10 @@ static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = { static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && + !nh_grp->gateway) + return 0; + return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht, &nh_grp->ht_node, mlxsw_sp_nexthop_group_ht_params); @@ -1589,6 +1693,10 @@ static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp, static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop_group *nh_grp) { + if (mlxsw_sp_nexthop_group_type(nh_grp) == AF_INET6 && + !nh_grp->gateway) + return; + rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht, &nh_grp->ht_node, mlxsw_sp_nexthop_group_ht_params); @@ -1600,12 +1708,26 @@ mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; + cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4; cmp_arg.fi = fi; return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &cmp_arg, mlxsw_sp_nexthop_group_ht_params); } +static struct mlxsw_sp_nexthop_group * +mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_fib6_entry *fib6_entry) +{ + struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg; + + cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6; + cmp_arg.fib6_entry = fib6_entry; + return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, + &cmp_arg, + mlxsw_sp_nexthop_group_ht_params); +} + static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = { .key_offset = offsetof(struct mlxsw_sp_nexthop, key), .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node), @@ -3197,6 +3319,7 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp, if (!dev) return 0; + nh->ifindex = dev->ifindex; rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); if (!rif) @@ -3254,9 +3377,15 @@ mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp, goto err_nexthop6_init; mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list); } + + err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp); + if (err) + goto err_nexthop_group_insert; + mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); return nh_grp; +err_nexthop_group_insert: err_nexthop6_init: for (i--; i >= 0; i--) { nh = &nh_grp->nexthops[i]; @@ -3273,6 +3402,7 @@ mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_nexthop *nh; int i = nh_grp->count; + mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp); for (i--; i >= 0; i--) { nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop6_fini(mlxsw_sp, nh); @@ -3287,10 +3417,12 @@ static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_nexthop_group *nh_grp; - /* For now, don't consolidate nexthop groups */ - nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); - if (IS_ERR(nh_grp)) - return PTR_ERR(nh_grp); + nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry); + if (!nh_grp) { + nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry); + if (IS_ERR(nh_grp)) + return PTR_ERR(nh_grp); + } list_add_tail(&fib6_entry->common.nexthop_group_node, &nh_grp->fib_list); -- cgit v1.2.3-55-g7522 From d396179c166932e51b3a65c4f766671ac4e48763 Mon Sep 17 00:00:00 2001 From: Derek Chickles Date: Mon, 14 Aug 2017 12:17:56 -0700 Subject: liquidio: fix issues with fw_type module parameter The fw_type module parameter isn't showing up in the /sys/module/liquidio/parameters directory. Fix it by setting the read permission bits for user, group, other in module_param_string(). Revise the description of fw_type. Initialize the fw_type static char array with the default value to conform to the module parameter description. Signed-off-by: Derek Chickles Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 38b7ea591d04..247f5de6da57 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -59,9 +59,9 @@ static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); -static char fw_type[LIO_MAX_FW_TYPE_LEN]; -module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); -MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); +static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_NIC; +module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); +MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\". Use \"none\" to load firmware from flash."); static u32 console_bitmask; module_param(console_bitmask, int, 0644); -- cgit v1.2.3-55-g7522 From 886e1974dcc5866cbc3d133d1f3d2cc26af68cfc Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:39 +0200 Subject: s390/qeth: don't access skb after transmission After transmitting a skb via send_packet[_fast](), the statistics code accesses the skb once more to account for transmitted page frags. This has a (theoretical?) race against the TX completion - if the TX completion is processed and frees the skb before hard_start_xmit() gets to the statistics part, we access random memory. Fix this by caching the # of page frags, before the skb is transmitted. Signed-off-by: Julian Wiedmann Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 14 ++++++-------- drivers/s390/net/qeth_l3_main.c | 4 ++-- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ad110abfdd47..28c9a7eda507 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -707,7 +707,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, int data_offset = -1; int elements_needed = 0; int hd_len = 0; - int nr_frags; + unsigned int nr_frags; if (card->qdio.do_prio_queueing || (cast_type && card->info.is_multicast_different)) @@ -747,6 +747,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, if (lin_rc) goto tx_drop; } + nr_frags = skb_shinfo(new_skb)->nr_frags; if (card->info.type == QETH_CARD_TYPE_OSN) hdr = (struct qeth_hdr *)skb->data; @@ -799,13 +800,10 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, if (!rc) { card->stats.tx_packets++; card->stats.tx_bytes += tx_bytes; - if (card->options.performance_stats) { - nr_frags = skb_shinfo(new_skb)->nr_frags; - if (nr_frags) { - card->perf_stats.sg_skbs_sent++; - /* nr_frags + skb->data */ - card->perf_stats.sg_frags_sent += nr_frags + 1; - } + if (card->options.performance_stats && nr_frags) { + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += nr_frags + 1; } if (new_skb != skb) dev_kfree_skb_any(skb); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d42e758518ed..6648f02d61ea 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2650,7 +2650,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, int tx_bytes = skb->len; bool use_tso; int data_offset = -1; - int nr_frags; + unsigned int nr_frags; if (((card->info.type == QETH_CARD_TYPE_IQD) && (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || @@ -2727,6 +2727,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (lin_rc) goto tx_drop; } + nr_frags = skb_shinfo(new_skb)->nr_frags; if (use_tso) { hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); @@ -2786,7 +2787,6 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { - nr_frags = skb_shinfo(new_skb)->nr_frags; if (use_tso) { card->perf_stats.large_send_bytes += tx_bytes; card->perf_stats.large_send_cnt++; -- cgit v1.2.3-55-g7522 From 7b1115bbb700c7e46923bbbf007c04f04230873c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:40 +0200 Subject: s390/qeth: remove extra L2 adapterparms query qeth_l2_request_initial_mac() queries the device for its supported adapterparms, even though they already have been queried as part of the device's high-level setup. Remove that extra call. The only call chain for qeth_l2_request_initial_mac() is __qeth_l2_set_online() qeth_core_hardsetup_card() qeth_query_setadapterparms() qeth_l2_setup_netdev() qeth_l2_request_initial_mac() qeth_query_setadapterparms() , and we only reach qeth_l2_request_initial_mac() if the first adapterparms query succeeds. Hence removing the second query results in no loss of functionality. Signed-off-by: Julian Wiedmann Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 28c9a7eda507..a6ba897ed707 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -519,15 +519,6 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) /* fall back to alternative mechanism: */ } - if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "could not query adapter " - "parameters on device %s: x%x\n", - CARD_BUS_ID(card), rc); - } - } - if (card->info.type == QETH_CARD_TYPE_IQD || card->info.type == QETH_CARD_TYPE_OSM || card->info.type == QETH_CARD_TYPE_OSX || -- cgit v1.2.3-55-g7522 From 699d3fe53702e993af9bdafa70542d96d3f3ee5c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:41 +0200 Subject: s390/qeth: remove extra L3 adapterparms query qeth_l3_setadapter_parms() queries the device for supported adapterparms, even though they already have been queried as part of the device's high-level setup. Remove that extra call. The only call chain for qeth_l3_setadapter_parms() is __qeth_l3_set_online() qeth_core_hardsetup_card() qeth_query_setadapterparms() qeth_l3_setadapter_parms() qeth_query_setadapterparms() , and we only reach qeth_l3_setadapter_parms() if the first adapterparms query succeeds. Hence removing the second query results in no loss of functionality. Signed-off-by: Julian Wiedmann Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6648f02d61ea..13124e6fd9d3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -890,22 +890,10 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) static int qeth_l3_setadapter_parms(struct qeth_card *card) { - int rc; + int rc = 0; QETH_DBF_TEXT(SETUP, 2, "setadprm"); - if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { - dev_info(&card->gdev->dev, - "set adapter parameters not supported.\n"); - QETH_DBF_TEXT(SETUP, 2, " notsupp"); - return 0; - } - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " - "0x%x\n", dev_name(&card->gdev->dev), rc); - return rc; - } if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { rc = qeth_setadpparms_change_macaddr(card); if (rc) -- cgit v1.2.3-55-g7522 From 5258830bf6a2af4fd4c3521a37541ee59095d010 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:42 +0200 Subject: s390/qeth: simplify fragment type selection Improve readability of the code that determines a buffer element's fragment type, and reduce the number of cases down from 5 to 3. Signed-off-by: Julian Wiedmann Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 4792cabb862e..3623ba23ff0b 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3918,23 +3918,16 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, buffer->element[element].addr = data; buffer->element[element].length = length_here; length -= length_here; - if (!length) { - if (first_lap) - if (skb_shinfo(skb)->nr_frags) - buffer->element[element].eflags = - SBAL_EFLAGS_FIRST_FRAG; - else - buffer->element[element].eflags = 0; - else - buffer->element[element].eflags = - SBAL_EFLAGS_MIDDLE_FRAG; - } else { - if (first_lap) + if (first_lap) { + if (length || skb_is_nonlinear(skb)) + /* skb needs additional elements */ buffer->element[element].eflags = - SBAL_EFLAGS_FIRST_FRAG; + SBAL_EFLAGS_FIRST_FRAG; else - buffer->element[element].eflags = - SBAL_EFLAGS_MIDDLE_FRAG; + buffer->element[element].eflags = 0; + } else { + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; } data += length_here; element++; -- cgit v1.2.3-55-g7522 From 384d2ef145062fcbd3993d80456155e27418cbf2 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:43 +0200 Subject: s390/qeth: straighten out fill_buffer() interface 1. for adjusting the buffer's next_element_to_fill in __fill_buffer(), just pass the full qeth_qdio_out_buffer struct 2. when adding a header element, be consistent about passing a hint ('is_first_elem') to __fill_buffer() No functional change. Signed-off-by: Julian Wiedmann Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 3623ba23ff0b..9796388780f9 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3889,24 +3889,21 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); static inline void __qeth_fill_buffer(struct sk_buff *skb, - struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, - int offset) + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, int offset) { + struct qdio_buffer *buffer = buf->buffer; + int element = buf->next_element_to_fill; int length = skb_headlen(skb); - int length_here; - int element; + int length_here, cnt; char *data; - int first_lap, cnt; struct skb_frag_struct *frag; - element = *next_element_to_fill; data = skb->data; - first_lap = (is_tso == 0 ? 1 : 0); if (offset >= 0) { data = skb->data + offset; length -= offset; - first_lap = 0; } while (length > 0) { @@ -3918,7 +3915,8 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, buffer->element[element].addr = data; buffer->element[element].length = length_here; length -= length_here; - if (first_lap) { + if (is_first_elem) { + is_first_elem = false; if (length || skb_is_nonlinear(skb)) /* skb needs additional elements */ buffer->element[element].eflags = @@ -3931,7 +3929,6 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, } data += length_here; element++; - first_lap = 0; } for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { @@ -3957,7 +3954,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; - *next_element_to_fill = element; + buf->next_element_to_fill = element; } static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, @@ -3965,7 +3962,8 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_hdr *hdr, int offset, int hd_len) { struct qdio_buffer *buffer; - int flush_cnt = 0, hdr_len, large_send = 0; + int flush_cnt = 0, hdr_len; + bool is_first_elem = true; buffer = buf->buffer; refcount_inc(&skb->users); @@ -3974,6 +3972,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, /*check first on TSO ....*/ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { int element = buf->next_element_to_fill; + is_first_elem = false; hdr_len = sizeof(struct qeth_hdr_tso) + ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len; @@ -3984,11 +3983,12 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buf->next_element_to_fill++; skb->data += hdr_len; skb->len -= hdr_len; - large_send = 1; } if (offset >= 0) { int element = buf->next_element_to_fill; + is_first_elem = false; + buffer->element[element].addr = hdr; buffer->element[element].length = sizeof(struct qeth_hdr) + hd_len; @@ -3997,8 +3997,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buf->next_element_to_fill++; } - __qeth_fill_buffer(skb, buffer, large_send, - (int *)&buf->next_element_to_fill, offset); + __qeth_fill_buffer(skb, buf, is_first_elem, offset); if (!queue->do_pack) { QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); -- cgit v1.2.3-55-g7522 From cc309f83d16e0229d3c4051e1bb53579bfd2d3a8 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:44 +0200 Subject: s390/qeth: clean up fill_buffer() offset logic For some xmit paths we pass down a data offset to qeth_fill_buffer(), to indicate that the first k bytes of the skb should be skipped when mapping it into buffer elements. Commit acd9776b5c45 ("s390/qeth: no ETH header for outbound AF_IUCV") recently switched the offset for the IUCV-over-HiperSockets path from 0 to ETH_HLEN, and now we have device offset OSA = 0 IQD > 0 for all xmit paths. OSA would previously pass down -1 from do_send_packet(), to distinguish between 1) OSA and 2) IQD with offset 0. That's no longer needed now, so have it pass 0, make the offset unsigned and clean up how we apply the offset in __qeth_fill_buffer(). No change of behaviour for any of our current xmit paths. Signed-off-by: Julian Wiedmann Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 6 ++++-- drivers/s390/net/qeth_core_main.c | 31 ++++++++++++++----------------- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 7a0ffc71b25d..95266449a50a 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -951,8 +951,10 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); -int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int, int); +int qeth_do_send_packet_fast(struct qeth_card *card, + struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + int hd_len); int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9796388780f9..394bee93b891 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3890,22 +3890,16 @@ EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); static inline void __qeth_fill_buffer(struct sk_buff *skb, struct qeth_qdio_out_buffer *buf, - bool is_first_elem, int offset) + bool is_first_elem, unsigned int offset) { struct qdio_buffer *buffer = buf->buffer; int element = buf->next_element_to_fill; - int length = skb_headlen(skb); + int length = skb_headlen(skb) - offset; + char *data = skb->data + offset; int length_here, cnt; - char *data; struct skb_frag_struct *frag; - data = skb->data; - - if (offset >= 0) { - data = skb->data + offset; - length -= offset; - } - + /* map linear part into buffer element(s) */ while (length > 0) { /* length_here is the remaining amount of data in this page */ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); @@ -3931,6 +3925,7 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, element++; } + /* map page frags into buffer element(s) */ for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { frag = &skb_shinfo(skb)->frags[cnt]; data = (char *)page_to_phys(skb_frag_page(frag)) + @@ -3958,8 +3953,9 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, } static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, - struct qeth_hdr *hdr, int offset, int hd_len) + struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, int hd_len) { struct qdio_buffer *buffer; int flush_cnt = 0, hdr_len; @@ -3969,7 +3965,6 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, refcount_inc(&skb->users); skb_queue_tail(&buf->skb_list, skb); - /*check first on TSO ....*/ if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { int element = buf->next_element_to_fill; is_first_elem = false; @@ -3985,7 +3980,8 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, skb->len -= hdr_len; } - if (offset >= 0) { + /* IQD */ + if (offset > 0) { int element = buf->next_element_to_fill; is_first_elem = false; @@ -4022,8 +4018,9 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, } int qeth_do_send_packet_fast(struct qeth_card *card, - struct qeth_qdio_out_q *queue, struct sk_buff *skb, - struct qeth_hdr *hdr, int offset, int hd_len) + struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; @@ -4103,7 +4100,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } } - tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); + tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, 0); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; -- cgit v1.2.3-55-g7522 From f8eb49306dfbdc3f929d09b584bdb834e940237c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:45 +0200 Subject: s390/qeth: make more use of skb API Replace some open-coded parts with their proper API calls. Also remove two skb_[re]set_mac_header() calls in the L2 xmit paths that are clearly no longer required, since at least commit 6d1ccff62780 ("net: reset mac header in dev_start_xmit()"). Signed-off-by: Julian Wiedmann Acked-by: Ursula Braun Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 5 ----- drivers/s390/net/qeth_core_main.c | 12 +++++------- drivers/s390/net/qeth_l2_main.c | 7 +++---- drivers/s390/net/qeth_l3_main.c | 4 ++-- 4 files changed, 10 insertions(+), 18 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 95266449a50a..4a4ca5cb37a0 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -857,11 +857,6 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) } } -static inline int qeth_get_ip_protocol(struct sk_buff *skb) -{ - return ip_hdr(skb)->protocol; -} - static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, struct qeth_buffer_pool_entry *entry) { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 394bee93b891..6286a8e35924 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3897,7 +3897,6 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, int length = skb_headlen(skb) - offset; char *data = skb->data + offset; int length_here, cnt; - struct skb_frag_struct *frag; /* map linear part into buffer element(s) */ while (length > 0) { @@ -3927,10 +3926,10 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, /* map page frags into buffer element(s) */ for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { - frag = &skb_shinfo(skb)->frags[cnt]; - data = (char *)page_to_phys(skb_frag_page(frag)) + - frag->page_offset; - length = frag->size; + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; + + data = skb_frag_address(frag); + length = skb_frag_size(frag); while (length > 0) { length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); @@ -3976,8 +3975,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buffer->element[element].length = hdr_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->next_element_to_fill++; - skb->data += hdr_len; - skb->len -= hdr_len; + skb_pull(skb, hdr_len); } /* IQD */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a6ba897ed707..9c789ad6831a 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -752,11 +752,11 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, if (!hdr) goto tx_drop; elements_needed++; - skb_reset_mac_header(new_skb); qeth_l2_fill_header(card, hdr, new_skb, cast_type); hdr->hdr.l2.pkt_length = new_skb->len; - memcpy(((char *)hdr) + sizeof(struct qeth_hdr), - skb_mac_header(new_skb), ETH_HLEN); + skb_copy_from_linear_data(new_skb, + ((char *)hdr) + sizeof(*hdr), + ETH_HLEN); } else { /* create a clone with writeable headroom */ new_skb = skb_realloc_headroom(skb, @@ -764,7 +764,6 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, if (!new_skb) goto tx_drop; hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - skb_set_mac_header(new_skb, sizeof(struct qeth_hdr)); qeth_l2_fill_header(card, hdr, new_skb, cast_type); if (new_skb->ip_summed == CHECKSUM_PARTIAL) qeth_l2_hdr_csum(card, hdr, new_skb); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 13124e6fd9d3..97ca8a6cbb21 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2570,7 +2570,7 @@ static void qeth_tso_fill_header(struct qeth_card *card, hdr->ext.hdr_len = 28; /*insert non-fix values */ hdr->ext.mss = skb_shinfo(skb)->gso_size; - hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); + hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb)); hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - sizeof(struct qeth_hdr_tso)); tcph->check = 0; @@ -2663,7 +2663,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, /* Ignore segment size from skb_is_gso(), 1 page is always used. */ use_tso = skb_is_gso(skb) && - (qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4); + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); if (card->info.type == QETH_CARD_TYPE_IQD) { new_skb = skb; -- cgit v1.2.3-55-g7522 From cef6ff220234e2475220c0ba7692d43af7c53265 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:46 +0200 Subject: s390/net: reduce inlining Clean up the inline cruft in s390 net drivers. Many of the inlined functions had only one caller anyway. Suggested-by: Joe Perches Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/ctcm_main.c | 2 +- drivers/s390/net/lcs.c | 28 +++++++---------- drivers/s390/net/netiucv.c | 4 +-- drivers/s390/net/qeth_core_main.c | 66 +++++++++++++++++++++------------------ drivers/s390/net/qeth_core_sys.c | 2 +- drivers/s390/net/qeth_l2_main.c | 24 +++++--------- drivers/s390/net/qeth_l3_main.c | 15 +++++---- 7 files changed, 67 insertions(+), 74 deletions(-) diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 2ade6131a89f..26363e0816fe 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -305,7 +305,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) * ch The channel, the sense code belongs to. * sense The sense code to inspect. */ -static inline void ccw_unit_check(struct channel *ch, __u8 sense) +static void ccw_unit_check(struct channel *ch, __u8 sense) { CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "%s(%s): %02x", diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 619da81dca70..d01b5c2a7760 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -327,8 +327,7 @@ lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) spin_unlock_irqrestore(&card->mask_lock, flags); wake_up(&card->wait_q); } -static inline int -lcs_threads_running(struct lcs_card *card, unsigned long threads) +static int lcs_threads_running(struct lcs_card *card, unsigned long threads) { unsigned long flags; int rc = 0; @@ -346,8 +345,7 @@ lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) lcs_threads_running(card, threads) == 0); } -static inline int -lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) +static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) { unsigned long flags; @@ -373,8 +371,7 @@ lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) wake_up(&card->wait_q); } -static inline int -__lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) { unsigned long flags; int rc = 0; @@ -444,8 +441,7 @@ lcs_setup_card(struct lcs_card *card) INIT_LIST_HEAD(&card->lancmd_waiters); } -static inline void -lcs_clear_multicast_list(struct lcs_card *card) +static void lcs_clear_multicast_list(struct lcs_card *card) { #ifdef CONFIG_IP_MULTICAST struct lcs_ipm_list *ipm; @@ -656,8 +652,7 @@ __lcs_resume_channel(struct lcs_channel *channel) /** * Make a buffer ready for processing. */ -static inline void -__lcs_ready_buffer_bits(struct lcs_channel *channel, int index) +static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) { int prev, next; @@ -1169,8 +1164,8 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) /** * function called by net device to handle multicast address relevant things */ -static inline void -lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +static void lcs_remove_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) { struct ip_mc_list *im4; struct list_head *l; @@ -1196,8 +1191,9 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) spin_unlock_irqrestore(&card->ipm_lock, flags); } -static inline struct lcs_ipm_list * -lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf) +static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, + struct ip_mc_list *im4, + char *buf) { struct lcs_ipm_list *tmp, *ipm = NULL; struct list_head *l; @@ -1218,8 +1214,8 @@ lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf) return ipm; } -static inline void -lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +static void lcs_set_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) { struct ip_mc_list *im4; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 7e0e6a4019f3..b9c7c1e61da2 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -249,14 +249,14 @@ struct ll_header { * Compatibility macros for busy handling * of network devices. */ -static inline void netiucv_clear_busy(struct net_device *dev) +static void netiucv_clear_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); clear_bit(0, &priv->tbusy); netif_wake_queue(dev); } -static inline int netiucv_test_and_set_busy(struct net_device *dev) +static int netiucv_test_and_set_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); netif_stop_queue(dev); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 6286a8e35924..415424e618ad 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -101,7 +101,7 @@ void qeth_close_dev(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_close_dev); -static inline const char *qeth_get_cardname(struct qeth_card *card) +static const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { @@ -330,7 +330,7 @@ static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) return q; } -static inline int qeth_cq_init(struct qeth_card *card) +static int qeth_cq_init(struct qeth_card *card) { int rc; @@ -352,7 +352,7 @@ out: return rc; } -static inline int qeth_alloc_cq(struct qeth_card *card) +static int qeth_alloc_cq(struct qeth_card *card) { int rc; @@ -397,7 +397,7 @@ kmsg_out: goto out; } -static inline void qeth_free_cq(struct qeth_card *card) +static void qeth_free_cq(struct qeth_card *card) { if (card->qdio.c_q) { --card->qdio.no_in_queues; @@ -408,8 +408,9 @@ static inline void qeth_free_cq(struct qeth_card *card) card->qdio.out_bufstates = NULL; } -static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, - int delayed) { +static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, + int delayed) +{ enum iucv_tx_notify n; switch (sbalf15) { @@ -432,8 +433,8 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, return n; } -static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, - int bidx, int forced_cleanup) +static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, + int forced_cleanup) { if (q->card->options.cq != QETH_CQ_ENABLED) return; @@ -475,8 +476,9 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, } -static inline void qeth_qdio_handle_aob(struct qeth_card *card, - unsigned long phys_aob_addr) { +static void qeth_qdio_handle_aob(struct qeth_card *card, + unsigned long phys_aob_addr) +{ struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; @@ -2228,7 +2230,7 @@ static int qeth_cm_setup(struct qeth_card *card) } -static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) +static int qeth_get_initial_mtu_for_card(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_UNKNOWN: @@ -2251,7 +2253,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) } } -static inline int qeth_get_mtu_outof_framesize(int framesize) +static int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { case 0x4000: @@ -2267,7 +2269,7 @@ static inline int qeth_get_mtu_outof_framesize(int framesize) } } -static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) +static int qeth_mtu_is_valid(struct qeth_card *card, int mtu) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: @@ -2738,8 +2740,8 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card) } } -static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( - struct qeth_card *card) +static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( + struct qeth_card *card) { struct list_head *plh; struct qeth_buffer_pool_entry *entry; @@ -2870,7 +2872,7 @@ int qeth_init_qdio_queues(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); -static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) +static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) { switch (link_type) { case QETH_LINK_TYPE_HSTR: @@ -3888,9 +3890,9 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) } EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); -static inline void __qeth_fill_buffer(struct sk_buff *skb, - struct qeth_qdio_out_buffer *buf, - bool is_first_elem, unsigned int offset) +static void __qeth_fill_buffer(struct sk_buff *skb, + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, unsigned int offset) { struct qdio_buffer *buffer = buf->buffer; int element = buf->next_element_to_fill; @@ -3951,10 +3953,10 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, buf->next_element_to_fill = element; } -static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, int hd_len) +static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, int hd_len) { struct qdio_buffer *buffer; int flush_cnt = 0, hdr_len; @@ -4821,7 +4823,7 @@ out: } EXPORT_SYMBOL_GPL(qeth_vm_request_mac); -static inline int qeth_get_qdio_q_format(struct qeth_card *card) +static int qeth_get_qdio_q_format(struct qeth_card *card) { if (card->info.type == QETH_CARD_TYPE_IQD) return QDIO_IQDIO_QFMT; @@ -4886,9 +4888,12 @@ out: return; } -static inline void qeth_qdio_establish_cq(struct qeth_card *card, - struct qdio_buffer **in_sbal_ptrs, - void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) { +static void qeth_qdio_establish_cq(struct qeth_card *card, + struct qdio_buffer **in_sbal_ptrs, + void (**queue_start_poll) + (struct ccw_device *, int, + unsigned long)) +{ int i; if (card->options.cq == QETH_CQ_ENABLED) { @@ -5180,9 +5185,10 @@ out: } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); -static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, - struct qdio_buffer_element *element, - struct sk_buff **pskb, int offset, int *pfrag, int data_len) +static int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, + struct qdio_buffer_element *element, + struct sk_buff **pskb, int offset, int *pfrag, + int data_len) { struct page *page = virt_to_page(element->addr); if (*pskb == NULL) { diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 6d255c22656d..d1ee9e30c68b 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -78,7 +78,7 @@ static ssize_t qeth_dev_card_type_show(struct device *dev, static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL); -static inline const char *qeth_get_bufsize_str(struct qeth_card *card) +static const char *qeth_get_bufsize_str(struct qeth_card *card) { if (card->qdio.in_buf_size == 16384) return "16k"; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 9c789ad6831a..368fb85d8851 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -231,13 +231,7 @@ static void qeth_l2_del_all_macs(struct qeth_card *card) spin_unlock_bh(&card->mclock); } -static inline u32 qeth_l2_mac_hash(const u8 *addr) -{ - return get_unaligned((u32 *)(&addr[2])); -} - -static inline int qeth_l2_get_cast_type(struct qeth_card *card, - struct sk_buff *skb) +static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { if (card->info.type == QETH_CARD_TYPE_OSN) return RTN_UNSPEC; @@ -248,8 +242,8 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card, return RTN_UNSPEC; } -static inline void qeth_l2_hdr_csum(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); @@ -606,13 +600,13 @@ static void qeth_promisc_to_bridge(struct qeth_card *card) * only if there is not in the hash table storage already * */ -static void -qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) +static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, + u8 is_uc) { + u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); struct qeth_mac *mac; - hash_for_each_possible(card->mac_htable, mac, hnode, - qeth_l2_mac_hash(ha->addr)) { + hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) { if (is_uc == mac->is_uc && !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) { mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; @@ -629,9 +623,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) mac->is_uc = is_uc; mac->disp_flag = QETH_DISP_ADDR_ADD; - hash_add(card->mac_htable, &mac->hnode, - qeth_l2_mac_hash(mac->mac_addr)); - + hash_add(card->mac_htable, &mac->hnode, mac_hash); } static void qeth_l2_set_rx_mode(struct net_device *dev) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 97ca8a6cbb21..140ed124d2f0 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -867,7 +867,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, return rc; } -static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) +static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) { if (cast_type == RTN_MULTICAST) return QETH_CAST_MULTICAST; @@ -876,7 +876,7 @@ static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) return QETH_CAST_UNICAST; } -static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) +static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) { u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6; if (cast_type == RTN_MULTICAST) @@ -1644,9 +1644,8 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, return 0; } -static inline int qeth_l3_rebuild_skb(struct qeth_card *card, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned short *vlan_id) +static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned short *vlan_id) { __u16 prot; struct iphdr *ip_hdr; @@ -2396,7 +2395,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return rc; } -inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) +static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; struct neighbour *n = NULL; @@ -2534,8 +2533,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_unlock(); } -static inline void qeth_l3_hdr_csum(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); -- cgit v1.2.3-55-g7522 From d65626ad7c999e6e2ab98ea2160e71cf210c366d Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Tue, 15 Aug 2017 17:02:47 +0200 Subject: s390/qeth: extract bridgeport cmd builder Consolidation of duplicated code, no functional change. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 62 +++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 36 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 368fb85d8851..438a7f29e99f 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1724,11 +1724,26 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, return rc; } -static inline int ipa_cmd_sbp(struct qeth_card *card) +static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, + enum qeth_ipa_sbp_cmd sbp_cmd, + unsigned int cmd_length) { - return (card->info.type == QETH_CARD_TYPE_IQD) ? - IPA_CMD_SETBRIDGEPORT_IQD : - IPA_CMD_SETBRIDGEPORT_OSA; + enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ? + IPA_CMD_SETBRIDGEPORT_IQD : + IPA_CMD_SETBRIDGEPORT_OSA; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0); + if (!iob) + return iob; + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + + cmd_length; + cmd->data.sbp.hdr.command_code = sbp_cmd; + cmd->data.sbp.hdr.used_total = 1; + cmd->data.sbp.hdr.seq_no = 1; + return iob; } static int qeth_bridgeport_query_support_cb(struct qeth_card *card, @@ -1758,21 +1773,13 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card, static void qeth_bridgeport_query_support(struct qeth_card *card) { struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl; QETH_CARD_TEXT(card, 2, "brqsuppo"); - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED, + sizeof(struct qeth_sbp_query_cmds_supp)); if (!iob) return; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = - sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_query_cmds_supp); - cmd->data.sbp.hdr.command_code = - IPA_SBP_QUERY_COMMANDS_SUPPORTED; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, (void *)&cbctl) || qeth_bridgeport_makerc(card, &cbctl, @@ -1826,7 +1833,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, { int rc = 0; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl = { .data = { .qports = { @@ -1839,16 +1845,9 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "brqports"); if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) return -EOPNOTSUPP; - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); if (!iob) return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = - sizeof(struct qeth_ipacmd_sbp_hdr); - cmd->data.sbp.hdr.command_code = - IPA_SBP_QUERY_BRIDGE_PORTS; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, (void *)&cbctl); if (rc < 0) @@ -1880,7 +1879,6 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) int rc = 0; int cmdlength; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl; enum qeth_ipa_sbp_cmd setcmd; @@ -1888,32 +1886,24 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) switch (role) { case QETH_SBP_ROLE_NONE: setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_reset_role); + cmdlength = sizeof(struct qeth_sbp_reset_role); break; case QETH_SBP_ROLE_PRIMARY: setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_set_primary); + cmdlength = sizeof(struct qeth_sbp_set_primary); break; case QETH_SBP_ROLE_SECONDARY: setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_set_secondary); + cmdlength = sizeof(struct qeth_sbp_set_secondary); break; default: return -EINVAL; } if (!(card->options.sbp.supported_funcs & setcmd)) return -EOPNOTSUPP; - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); if (!iob) return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = cmdlength; - cmd->data.sbp.hdr.command_code = setcmd; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, (void *)&cbctl); if (rc < 0) -- cgit v1.2.3-55-g7522 From aa9bea0b081c015ad5d54a2cced0977917775ed7 Mon Sep 17 00:00:00 2001 From: Kittipon Meesompop Date: Tue, 15 Aug 2017 17:02:48 +0200 Subject: s390/qeth: reject multicast rxip addresses There exist different commands to add unicast and multicast addresses on the OSA card. rxip addresses are always set as unicast addresses and thus just unicast addresses should be allowed. Adding a multicast address now fails and a grace message is generated. Signed-off-by: Kittipon Meesompop Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_sys.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index f2f94f59e0fa..2000ef190e94 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -895,9 +895,26 @@ static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, u8 *addr) { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { return -EINVAL; } + if (proto == QETH_PROT_IPV4) { + memcpy(&ipv4_addr, addr, sizeof(ipv4_addr)); + if (ipv4_is_multicast(ipv4_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } else if (proto == QETH_PROT_IPV6) { + memcpy(&ipv6_addr, addr, sizeof(ipv6_addr)); + if (ipv6_addr_is_multicast(&ipv6_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } + return 0; } -- cgit v1.2.3-55-g7522 From ea4ae35a829cc53b4df131e845216b2409ed706f Mon Sep 17 00:00:00 2001 From: Kittipon Meesompop Date: Tue, 15 Aug 2017 17:02:49 +0200 Subject: s390/qeth: fix trace-messages for deleting rxip addresses change trace-messages: - from addrxip4 to delrxip4 - from addrxip6 to delrxip6 Signed-off-by: Kittipon Meesompop Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 140ed124d2f0..41bd00454d0f 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -784,11 +784,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_CARD_TEXT(card, 2, "addrxip4"); + QETH_CARD_TEXT(card, 2, "delrxip4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_CARD_TEXT(card, 2, "addrxip6"); + QETH_CARD_TEXT(card, 2, "delrxip6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } -- cgit v1.2.3-55-g7522 From cb816192d986f7596009dedcf2201fe2e5bc2aa7 Mon Sep 17 00:00:00 2001 From: Kittipon Meesompop Date: Tue, 15 Aug 2017 17:02:50 +0200 Subject: s390/qeth: fix using of ref counter for rxip addresses IP-address setting and removal are delayed when the device is not yet in state SOFTSETUP or UP. ref_counter has been implemented only for ip-address with type normal. In this patch ref_counter logic is also used for ip-address with type rxip to allow appropriate handling of multiple postponed rxip add and del calls. Signed-off-by: Kittipon Meesompop Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l3_main.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 41bd00454d0f..0a3dc14a1381 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -247,7 +247,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) return -ENOENT; addr->ref_counter--; - if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) + if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || + addr->type == QETH_IP_TYPE_RXIP)) return rc; if (addr->in_progress) return -EINPROGRESS; @@ -329,8 +330,9 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) kfree(addr); } } else { - if (addr->type == QETH_IP_TYPE_NORMAL) - addr->ref_counter++; + if (addr->type == QETH_IP_TYPE_NORMAL || + addr->type == QETH_IP_TYPE_RXIP) + addr->ref_counter++; } return rc; -- cgit v1.2.3-55-g7522 From dc503a8ad98474ea0073a1c5c4d9f18cb8dd0dbf Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 15 Aug 2017 20:34:35 +0100 Subject: bpf/verifier: track liveness for pruning State of a register doesn't matter if it wasn't read in reaching an exit; a write screens off all reads downstream of it from all explored_states upstream of it. This allows us to prune many more branches; here are some processed insn counts for some Cilium programs: Program before after bpf_lb_opt_-DLB_L3.o 6515 3361 bpf_lb_opt_-DLB_L4.o 8976 5176 bpf_lb_opt_-DUNKNOWN.o 2960 1137 bpf_lxc_opt_-DDROP_ALL.o 95412 48537 bpf_lxc_opt_-DUNKNOWN.o 141706 78718 bpf_netdev.o 24251 17995 bpf_overlay.o 10999 9385 The runtime is also improved; here are 'time' results in ms: Program before after bpf_lb_opt_-DLB_L3.o 24 6 bpf_lb_opt_-DLB_L4.o 26 11 bpf_lb_opt_-DUNKNOWN.o 11 2 bpf_lxc_opt_-DDROP_ALL.o 1288 139 bpf_lxc_opt_-DUNKNOWN.o 1768 234 bpf_netdev.o 62 31 bpf_overlay.o 15 13 Signed-off-by: Edward Cree Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf_verifier.h | 11 ++- kernel/bpf/verifier.c | 189 +++++++++++++++++++++++++++++++++---------- 2 files changed, 156 insertions(+), 44 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index c61c3033522e..91d07efed2ba 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -21,6 +21,12 @@ */ #define BPF_MAX_VAR_SIZ INT_MAX +enum bpf_reg_liveness { + REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ + REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ + REG_LIVE_WRITTEN, /* reg was written first, screening off later reads */ +}; + struct bpf_reg_state { enum bpf_reg_type type; union { @@ -40,7 +46,7 @@ struct bpf_reg_state { * came from, when one is tested for != NULL. */ u32 id; - /* These five fields must be last. See states_equal() */ + /* Ordering of fields matters. See states_equal() */ /* For scalar types (SCALAR_VALUE), this represents our knowledge of * the actual value. * For pointer types, this represents the variable part of the offset @@ -57,6 +63,8 @@ struct bpf_reg_state { s64 smax_value; /* maximum possible (s64)value */ u64 umin_value; /* minimum possible (u64)value */ u64 umax_value; /* maximum possible (u64)value */ + /* This field must be last, for states_equal() reasons. */ + enum bpf_reg_liveness live; }; enum bpf_stack_slot_type { @@ -74,6 +82,7 @@ struct bpf_verifier_state { struct bpf_reg_state regs[MAX_BPF_REG]; u8 stack_slot_type[MAX_BPF_STACK]; struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; + struct bpf_verifier_state *parent; }; /* linked list of verifier states used to prune search */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ecc590e01a1d..7dd96d064be1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -629,8 +629,10 @@ static void init_reg_state(struct bpf_reg_state *regs) { int i; - for (i = 0; i < MAX_BPF_REG; i++) + for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(regs, i); + regs[i].live = REG_LIVE_NONE; + } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; @@ -647,9 +649,26 @@ enum reg_arg_type { DST_OP_NO_MARK /* same as above, check only, don't mark */ }; -static int check_reg_arg(struct bpf_reg_state *regs, u32 regno, +static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) +{ + struct bpf_verifier_state *parent = state->parent; + + while (parent) { + /* if read wasn't screened by an earlier write ... */ + if (state->regs[regno].live & REG_LIVE_WRITTEN) + break; + /* ... then we depend on parent's value */ + parent->regs[regno].live |= REG_LIVE_READ; + state = parent; + parent = state->parent; + } +} + +static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { + struct bpf_reg_state *regs = env->cur_state.regs; + if (regno >= MAX_BPF_REG) { verbose("R%d is invalid\n", regno); return -EINVAL; @@ -661,12 +680,14 @@ static int check_reg_arg(struct bpf_reg_state *regs, u32 regno, verbose("R%d !read_ok\n", regno); return -EACCES; } + mark_reg_read(&env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose("frame pointer is read only\n"); return -EACCES; } + regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(regs, regno); } @@ -695,7 +716,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type) static int check_stack_write(struct bpf_verifier_state *state, int off, int size, int value_regno) { - int i; + int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ @@ -710,15 +731,14 @@ static int check_stack_write(struct bpf_verifier_state *state, int off, } /* save register state */ - state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = - state->regs[value_regno]; + state->spilled_regs[spi] = state->regs[value_regno]; + state->spilled_regs[spi].live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; } else { /* regular write of data into stack */ - state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = - (struct bpf_reg_state) {}; + state->spilled_regs[spi] = (struct bpf_reg_state) {}; for (i = 0; i < size; i++) state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; @@ -726,11 +746,26 @@ static int check_stack_write(struct bpf_verifier_state *state, int off, return 0; } +static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) +{ + struct bpf_verifier_state *parent = state->parent; + + while (parent) { + /* if read wasn't screened by an earlier write ... */ + if (state->spilled_regs[slot].live & REG_LIVE_WRITTEN) + break; + /* ... then we depend on parent's value */ + parent->spilled_regs[slot].live |= REG_LIVE_READ; + state = parent; + parent = state->parent; + } +} + static int check_stack_read(struct bpf_verifier_state *state, int off, int size, int value_regno) { u8 *slot_type; - int i; + int i, spi; slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; @@ -746,10 +781,13 @@ static int check_stack_read(struct bpf_verifier_state *state, int off, int size, } } - if (value_regno >= 0) + spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE; + + if (value_regno >= 0) { /* restore register state from stack */ - state->regs[value_regno] = - state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; + state->regs[value_regno] = state->spilled_regs[spi]; + mark_stack_slot_read(state, spi); + } return 0; } else { for (i = 0; i < size; i++) { @@ -1167,7 +1205,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { - struct bpf_reg_state *regs = env->cur_state.regs; int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || @@ -1177,12 +1214,12 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins } /* check src1 operand */ - err = check_reg_arg(regs, insn->src_reg, SRC_OP); + err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ - err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; @@ -1297,10 +1334,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, if (arg_type == ARG_DONTCARE) return 0; - if (type == NOT_INIT) { - verbose("R%d !read_ok\n", regno); - return -EACCES; - } + err = check_reg_arg(env, regno, SRC_OP); + if (err) + return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { @@ -1639,10 +1675,12 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) } /* reset caller saved regs */ - for (i = 0; i < CALLER_SAVED_REGS; i++) + for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(regs, caller_saved[i]); + check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); + } - /* update return register */ + /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(regs, BPF_REG_0); @@ -2250,7 +2288,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check src operand */ - err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; @@ -2261,7 +2299,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check dest operand */ - err = check_reg_arg(regs, insn->dst_reg, DST_OP); + err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; @@ -2274,7 +2312,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check src operand */ - err = check_reg_arg(regs, insn->src_reg, SRC_OP); + err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { @@ -2285,7 +2323,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check dest operand */ - err = check_reg_arg(regs, insn->dst_reg, DST_OP); + err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; @@ -2328,7 +2366,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } /* check src1 operand */ - err = check_reg_arg(regs, insn->src_reg, SRC_OP); + err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { @@ -2339,7 +2377,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check src2 operand */ - err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; @@ -2360,7 +2398,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check dest operand */ - err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); + err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; @@ -2717,7 +2755,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, } /* check src1 operand */ - err = check_reg_arg(regs, insn->src_reg, SRC_OP); + err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; @@ -2734,7 +2772,7 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, } /* check src2 operand */ - err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; @@ -2851,7 +2889,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } - err = check_reg_arg(regs, insn->dst_reg, DST_OP); + err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; @@ -2917,7 +2955,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) } /* check whether implicit source operand (register R6) is readable */ - err = check_reg_arg(regs, BPF_REG_6, SRC_OP); + err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; @@ -2928,17 +2966,20 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) if (mode == BPF_IND) { /* check explicit source operand */ - err = check_reg_arg(regs, insn->src_reg, SRC_OP); + err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ - for (i = 0; i < CALLER_SAVED_REGS; i++) + for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(regs, caller_saved[i]); + check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); + } /* mark destination R0 register as readable, since it contains - * the value fetched from the packet + * the value fetched from the packet. + * Already marked as written above. */ mark_reg_unknown(regs, BPF_REG_0); return 0; @@ -3194,7 +3235,11 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, bool varlen_map_access, struct idpair *idmap) { - if (memcmp(rold, rcur, sizeof(*rold)) == 0) + if (!(rold->live & REG_LIVE_READ)) + /* explored state didn't use this */ + return true; + + if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) @@ -3372,10 +3417,56 @@ out_free: return ret; } +static bool do_propagate_liveness(const struct bpf_verifier_state *state, + struct bpf_verifier_state *parent) +{ + bool touched = false; /* any changes made? */ + int i; + + if (!parent) + return touched; + /* Propagate read liveness of registers... */ + BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); + /* We don't need to worry about FP liveness because it's read-only */ + for (i = 0; i < BPF_REG_FP; i++) { + if (parent->regs[i].live & REG_LIVE_READ) + continue; + if (state->regs[i].live == REG_LIVE_READ) { + parent->regs[i].live |= REG_LIVE_READ; + touched = true; + } + } + /* ... and stack slots */ + for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) { + if (parent->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL) + continue; + if (state->stack_slot_type[i * BPF_REG_SIZE] != STACK_SPILL) + continue; + if (parent->spilled_regs[i].live & REG_LIVE_READ) + continue; + if (state->spilled_regs[i].live == REG_LIVE_READ) { + parent->regs[i].live |= REG_LIVE_READ; + touched = true; + } + } + return touched; +} + +static void propagate_liveness(const struct bpf_verifier_state *state, + struct bpf_verifier_state *parent) +{ + while (do_propagate_liveness(state, parent)) { + /* Something changed, so we need to feed those changes onward */ + state = parent; + parent = state->parent; + } +} + static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; + int i; sl = env->explored_states[insn_idx]; if (!sl) @@ -3385,11 +3476,14 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) return 0; while (sl != STATE_LIST_MARK) { - if (states_equal(env, &sl->state, &env->cur_state)) + if (states_equal(env, &sl->state, &env->cur_state)) { /* reached equivalent register/stack state, - * prune the search + * prune the search. + * Registers read by the continuation are read by us. */ + propagate_liveness(&sl->state, &env->cur_state); return 1; + } sl = sl->next; } @@ -3407,6 +3501,14 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; + /* connect new state to parentage chain */ + env->cur_state.parent = &new_sl->state; + /* clear liveness marks in current state */ + for (i = 0; i < BPF_REG_FP; i++) + env->cur_state.regs[i].live = REG_LIVE_NONE; + for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) + if (env->cur_state.stack_slot_type[i * BPF_REG_SIZE] == STACK_SPILL) + env->cur_state.spilled_regs[i].live = REG_LIVE_NONE; return 0; } @@ -3430,6 +3532,7 @@ static int do_check(struct bpf_verifier_env *env) bool do_print_state = false; init_reg_state(regs); + state->parent = NULL; insn_idx = 0; env->varlen_map_value_access = false; for (;;) { @@ -3500,11 +3603,11 @@ static int do_check(struct bpf_verifier_env *env) /* check for reserved fields is already done */ /* check src operand */ - err = check_reg_arg(regs, insn->src_reg, SRC_OP); + err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; - err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); + err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; @@ -3554,11 +3657,11 @@ static int do_check(struct bpf_verifier_env *env) } /* check src1 operand */ - err = check_reg_arg(regs, insn->src_reg, SRC_OP); + err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ - err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; @@ -3589,7 +3692,7 @@ static int do_check(struct bpf_verifier_env *env) return -EINVAL; } /* check src operand */ - err = check_reg_arg(regs, insn->dst_reg, SRC_OP); + err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; @@ -3643,7 +3746,7 @@ static int do_check(struct bpf_verifier_env *env) * of bpf_exit, which means that program wrote * something into it earlier */ - err = check_reg_arg(regs, BPF_REG_0, SRC_OP); + err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; -- cgit v1.2.3-55-g7522 From e084a8b89c173d7b37e886b277b6d3e05669259a Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 15 Aug 2017 02:33:05 -0400 Subject: mlx4: remove unnecessary pci_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure. Thus, it is not necessary to manually clear the device driver data to NULL. Cc: Joe Jin Cc: Junxiao Bi Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 09b9bc17bce9..df9b0efb5ab1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3782,7 +3782,6 @@ err_release_regions: err_disable_pdev: mlx4_pci_disable_device(&priv->dev); - pci_set_drvdata(pdev, NULL); return err; } @@ -3997,7 +3996,6 @@ static void mlx4_remove_one(struct pci_dev *pdev) devlink_unregister(devlink); kfree(dev->persist); devlink_free(devlink); - pci_set_drvdata(pdev, NULL); } static int restore_current_port_types(struct mlx4_dev *dev, -- cgit v1.2.3-55-g7522 From 26d159482a0283568557eb606d7f869db7fa03d4 Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Tue, 15 Aug 2017 02:33:06 -0400 Subject: mlx5: remove unnecessary pci_set_drvdata() The driver core clears the driver data to NULL after device_release or on probe failure. Thus, it is not necessary to manually clear the device driver data to NULL. Cc: Joe Jin Cc: Junxiao Bi Signed-off-by: Zhu Yanjun Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 6dbd637b4e66..7e6e24398926 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1400,7 +1400,6 @@ clean_srcu: cleanup_srcu_struct(&priv->pfault_srcu); clean_dev: #endif - pci_set_drvdata(pdev, NULL); devlink_free(devlink); return err; @@ -1427,7 +1426,6 @@ static void remove_one(struct pci_dev *pdev) #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING cleanup_srcu_struct(&priv->pfault_srcu); #endif - pci_set_drvdata(pdev, NULL); devlink_free(devlink); } -- cgit v1.2.3-55-g7522 From fe4007999599c02598c17b643e8de43e487d48e8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 15 Aug 2017 09:09:49 +0200 Subject: ipv6: fib: Provide offload indication using nexthop flags IPv6 routes currently lack nexthop flags as in IPv4. This has several implications. In the forwarding path, it requires us to check the carrier state of the nexthop device and potentially ignore a linkdown route, instead of checking for RTNH_F_LINKDOWN. It also requires capable drivers to use the user facing IPv6-specific route flags to provide offload indication, instead of using the nexthop flags as in IPv4. Add nexthop flags to IPv6 routes in the 40 bytes hole and use it to provide offload indication instead of the RTF_OFFLOAD flag, which is removed while it's still not part of any official kernel release. In the near future we would like to use the field for the RTNH_F_{LINKDOWN,DEAD} flags, but this change is more involved and might not be ready in time for the current cycle. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Acked-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 8 ++++---- include/net/ip6_fib.h | 2 ++ include/uapi/linux/ipv6_route.h | 1 - net/ipv6/route.c | 7 +------ 4 files changed, 7 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 16676fffbf70..4895d5b8942b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -2397,7 +2397,7 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) { list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6, - list)->rt->rt6i_flags |= RTF_OFFLOAD; + list)->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; return; } @@ -2407,9 +2407,9 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry) nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6); if (nh && nh->offloaded) - mlxsw_sp_rt6->rt->rt6i_flags |= RTF_OFFLOAD; + mlxsw_sp_rt6->rt->rt6i_nh_flags |= RTNH_F_OFFLOAD; else - mlxsw_sp_rt6->rt->rt6i_flags &= ~RTF_OFFLOAD; + mlxsw_sp_rt6->rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; } } @@ -2424,7 +2424,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry) list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) { struct rt6_info *rt = mlxsw_sp_rt6->rt; - rt->rt6i_flags &= ~RTF_OFFLOAD; + rt->rt6i_nh_flags &= ~RTNH_F_OFFLOAD; } } diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 1d790ea40ea7..71c1646298ae 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -120,6 +120,8 @@ struct rt6_info { atomic_t rt6i_ref; + unsigned int rt6i_nh_flags; + /* These are in a separate cache line. */ struct rt6key rt6i_dst ____cacheline_aligned_in_smp; u32 rt6i_flags; diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h index 33e2a5732bd1..d496c02e14bc 100644 --- a/include/uapi/linux/ipv6_route.h +++ b/include/uapi/linux/ipv6_route.h @@ -35,7 +35,6 @@ #define RTF_PREF(pref) ((pref) << 27) #define RTF_PREF_MASK 0x18000000 -#define RTF_OFFLOAD 0x20000000 /* offloaded route */ #define RTF_PCPU 0x40000000 /* read-only: can not be set by user */ #define RTF_LOCAL 0x80000000 diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 035762fed07d..6793135d49db 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1820,11 +1820,6 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, goto out; } - if (cfg->fc_flags & RTF_OFFLOAD) { - NL_SET_ERR_MSG(extack, "Userspace can not set RTF_OFFLOAD"); - goto out; - } - if (cfg->fc_dst_len > 128) { NL_SET_ERR_MSG(extack, "Invalid prefix length"); goto out; @@ -3335,7 +3330,7 @@ static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, goto nla_put_failure; } - if (rt->rt6i_flags & RTF_OFFLOAD) + if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD) *flags |= RTNH_F_OFFLOAD; /* not needed for multipath encoding b/c it has a rtnexthop struct */ -- cgit v1.2.3-55-g7522 From df9a21f11f14e393ca48689e1d4b596c4d9200e5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 15 Aug 2017 09:10:33 +0200 Subject: mlxsw: spectrum_router: Use correct config option I made an embarrassing mistake and used 'IPV6' instead of 'CONFIG_IPV6' around the function that updates the kernel about IPv6 neighbours activity. This can be a problem if the kernel has more neighbours than a certain threshold and it starts deleting those that are supposedly inactive. Fixes: b5f3e0d43012 ("mlxsw: spectrum_router: Fix build when IPv6 isn't enabled") Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4895d5b8942b..a0a9728af989 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1042,7 +1042,7 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp, neigh_release(n); } -#if IS_ENABLED(IPV6) +#if IS_ENABLED(CONFIG_IPV6) static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp, char *rauhtd_pl, int rec_index) -- cgit v1.2.3-55-g7522 From 7324157b8af19ff797b550b7b1543aa1c9c07b48 Mon Sep 17 00:00:00 2001 From: Craig Gallek Date: Tue, 15 Aug 2017 09:43:40 -0400 Subject: dsa: fix flow disector null pointer A recent change to fix up DSA device behavior made the assumption that all skbs passing through the flow disector will be associated with a device. This does not appear to be a safe assumption. Syzkaller found the crash below by attaching a BPF socket filter that tries to find the payload offset of a packet passing between two unix sockets. kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP KASAN Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 0 PID: 2940 Comm: syzkaller872007 Not tainted 4.13.0-rc4-next-20170811 #1 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 task: ffff8801d1b425c0 task.stack: ffff8801d0bc0000 RIP: 0010:__skb_flow_dissect+0xdcd/0x3ae0 net/core/flow_dissector.c:445 RSP: 0018:ffff8801d0bc7340 EFLAGS: 00010206 RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000060 RSI: ffffffff856dc080 RDI: 0000000000000300 RBP: ffff8801d0bc7870 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000008 R11: ffffed003a178f1e R12: 0000000000000000 R13: 0000000000000000 R14: ffffffff856dc080 R15: ffff8801ce223140 FS: 00000000016ed880(0000) GS:ffff8801dc000000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020008000 CR3: 00000001ce22d000 CR4: 00000000001406f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: skb_flow_dissect_flow_keys include/linux/skbuff.h:1176 [inline] skb_get_poff+0x9a/0x1a0 net/core/flow_dissector.c:1079 ______skb_get_pay_offset net/core/filter.c:114 [inline] __skb_get_pay_offset+0x15/0x20 net/core/filter.c:112 Code: 80 3c 02 00 44 89 6d 10 0f 85 44 2b 00 00 4d 8b 67 20 48 b8 00 00 00 00 00 fc ff df 49 8d bc 24 00 03 00 00 48 89 fa 48 c1 ea 03 <80> 3c 02 00 0f 85 13 2b 00 00 4d 8b a4 24 00 03 00 00 4d 85 e4 RIP: __skb_flow_dissect+0xdcd/0x3ae0 net/core/flow_dissector.c:445 RSP: ffff8801d0bc7340 Fixes: 43e665287f93 ("net-next: dsa: fix flow dissection") Reported-by: Dmitry Vyukov Signed-off-by: Craig Gallek Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- net/core/flow_dissector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 79b9c06c83ad..e2eaa1ff948d 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -442,7 +442,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); #if IS_ENABLED(CONFIG_NET_DSA) - if (unlikely(netdev_uses_dsa(skb->dev))) { + if (unlikely(skb->dev && netdev_uses_dsa(skb->dev))) { const struct dsa_device_ops *ops; int offset; -- cgit v1.2.3-55-g7522 From 2cc7659545bbf5e87795726cb15d09827c6f0fa6 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Aug 2017 16:34:41 +0200 Subject: selftests: add 'ip get' to rtnetlink.sh exercise ip/ip6 RTM_GETROUTE doit() callpath. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- tools/testing/selftests/net/rtnetlink.sh | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 5b04ad912525..84b4acf5baa9 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -164,6 +164,37 @@ kci_test_polrouting() echo "PASS: policy routing" } +kci_test_route_get() +{ + ret=0 + + ip route get 127.0.0.1 > /dev/null + check_err $? + ip route get 127.0.0.1 dev "$devdummy" > /dev/null + check_err $? + ip route get ::1 > /dev/null + check_err $? + ip route get fe80::1 dev "$devdummy" > /dev/null + check_err $? + ip route get 127.0.0.1 from 127.0.0.1 oif lo tos 0x1 mark 0x1 > /dev/null + check_err $? + ip route get ::1 from ::1 iif lo oif lo tos 0x1 mark 0x1 > /dev/null + check_err $? + ip addr add dev "$devdummy" 10.23.7.11/24 + check_err $? + ip route get 10.23.7.11 from 10.23.7.12 iif "$devdummy" > /dev/null + check_err $? + ip addr del dev "$devdummy" 10.23.7.11/24 + check_err $? + + if [ $ret -ne 0 ];then + echo "FAIL: route get" + return 1 + fi + + echo "PASS: route get" +} + kci_test_rtnl() { kci_add_dummy @@ -173,6 +204,7 @@ kci_test_rtnl() fi kci_test_polrouting + kci_test_route_get kci_test_tc kci_test_gre kci_test_bridge -- cgit v1.2.3-55-g7522 From 121622dba8da9c709b72d801eae7664fa7da7c36 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Aug 2017 16:34:42 +0200 Subject: ipv6: route: make rtm_getroute not assume rtnl is locked __dev_get_by_index assumes RTNL is held, use _rcu version instead. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv6/route.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6793135d49db..60705b4d2c62 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3611,8 +3611,11 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct net_device *dev; int flags = 0; - dev = __dev_get_by_index(net, iif); + rcu_read_lock(); + + dev = dev_get_by_index_rcu(net, iif); if (!dev) { + rcu_read_unlock(); err = -ENODEV; goto errout; } @@ -3624,6 +3627,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (!fibmatch) dst = ip6_route_input_lookup(net, dev, &fl6, flags); + + rcu_read_unlock(); } else { fl6.flowi6_oif = oif; -- cgit v1.2.3-55-g7522 From e3a22b7f5cfb3b422669fbf3d668315ac7634e5a Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Aug 2017 16:34:43 +0200 Subject: ipv6: route: set ipv6 RTM_GETROUTE to not use rtnl Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv6/route.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 60705b4d2c62..11ff19ba7efd 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4107,7 +4107,8 @@ int __init ip6_route_init(void) ret = -ENOBUFS; if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) || __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) || - __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, 0)) + __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, + RTNL_FLAG_DOIT_UNLOCKED)) goto out_register_late_subsys; ret = register_netdevice_notifier(&ip6_route_dev_notifier); -- cgit v1.2.3-55-g7522 From 394f51abb3d04f33fb798f04b16ae6b0491ea4ec Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Aug 2017 16:34:44 +0200 Subject: ipv4: route: set ipv4 RTM_GETROUTE to not use rtnl Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- net/ipv4/route.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6810d2076b1b..618bbe1405fc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3073,7 +3073,8 @@ int __init ip_rt_init(void) xfrm_init(); xfrm4_init(); #endif - rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, 0); + rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, + RTNL_FLAG_DOIT_UNLOCKED); #ifdef CONFIG_SYSCTL register_pernet_subsys(&sysctl_route_ops); -- cgit v1.2.3-55-g7522 From 22cb7a3ac380ecaab6837670963813599b123a53 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Tue, 15 Aug 2017 15:40:20 +0100 Subject: dt-bindings: net: ravb : Add support for r8a7745 SoC Add a new compatible string for the RZ/G1E (R8A7745) SoC. Signed-off-by: Biju Das Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/renesas,ravb.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt index 4717bc24eada..16723535e1aa 100644 --- a/Documentation/devicetree/bindings/net/renesas,ravb.txt +++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt @@ -6,6 +6,7 @@ interface contains. Required properties: - compatible: Must contain one or more of the following: - "renesas,etheravb-r8a7743" for the R8A7743 SoC. + - "renesas,etheravb-r8a7745" for the R8A7745 SoC. - "renesas,etheravb-r8a7790" for the R8A7790 SoC. - "renesas,etheravb-r8a7791" for the R8A7791 SoC. - "renesas,etheravb-r8a7792" for the R8A7792 SoC. -- cgit v1.2.3-55-g7522 From 1ff392689f2eee806fad57977488181130c05830 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Tue, 15 Aug 2017 12:46:05 -0700 Subject: liquidio: moved liquidio_msix_intr_handler to lio_core.c Moving common liquidio_msix_intr_handler to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 41 ++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 43 --------------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 45 ---------------------- drivers/net/ethernet/cavium/liquidio/octeon_main.h | 6 +++ .../net/ethernet/cavium/liquidio/octeon_network.h | 3 ++ 5 files changed, 50 insertions(+), 88 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index d20d0eb45048..03746d8f2778 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -788,3 +788,44 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) return 0; } + +static +int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) +{ + struct octeon_device *oct = droq->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + } else { + if (ret & MSIX_PO_INT) { + if (OCTEON_CN23XX_VF(oct)) + dev_err(&oct->pci_dev->dev, + "should not come here should not get rx when poll mode = 0 for vf\n"); + tasklet_schedule(&oct_priv->droq_tasklet); + return 1; + } + /* this will be flushed periodically by check iq db */ + if (ret & MSIX_PI_INT) + return 0; + } + + return 0; +} + +irqreturn_t +liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) +{ + struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; + struct octeon_device *oct = ioq_vector->oct_dev; + struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; + u64 ret; + + ret = oct->fn_list.msix_interrupt_handler(ioq_vector); + + if (ret & MSIX_PO_INT || ret & MSIX_PI_INT) + liquidio_schedule_msix_droq_pkt_handler(droq, ret); + + return IRQ_HANDLED; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 247f5de6da57..96ba5ec756ad 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -175,12 +175,6 @@ struct handshake { int started_ok; }; -struct octeon_device_priv { - /** Tasklet structures for this device. */ - struct tasklet_struct droq_tasklet; - unsigned long napi_mask; -}; - #ifdef CONFIG_PCI_IOV static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); #endif @@ -907,27 +901,6 @@ static inline void update_link_status(struct net_device *netdev, } } -static -int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) -{ - struct octeon_device *oct = droq->oct_dev; - struct octeon_device_priv *oct_priv = - (struct octeon_device_priv *)oct->priv; - - if (droq->ops.poll_mode) { - droq->ops.napi_fn(droq); - } else { - if (ret & MSIX_PO_INT) { - tasklet_schedule(&oct_priv->droq_tasklet); - return 1; - } - /* this will be flushed periodically by check iq db */ - if (ret & MSIX_PI_INT) - return 0; - } - return 0; -} - /** * \brief Droq packet processor sceduler * @param oct octeon device @@ -957,22 +930,6 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) } } -static irqreturn_t -liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) -{ - u64 ret; - struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; - struct octeon_device *oct = ioq_vector->oct_dev; - struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; - - ret = oct->fn_list.msix_interrupt_handler(ioq_vector); - - if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) - liquidio_schedule_msix_droq_pkt_handler(droq, ret); - - return IRQ_HANDLED; -} - /** * \brief Interrupt handler for octeon * @param irq unused diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 2fc2da3a8018..688b438e3e19 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -107,12 +107,6 @@ struct octnic_gather { dma_addr_t sg_dma_ptr; }; -struct octeon_device_priv { - /* Tasklet structures for this device. */ - struct tasklet_struct droq_tasklet; - unsigned long napi_mask; -}; - static int liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void liquidio_vf_remove(struct pci_dev *pdev); @@ -648,45 +642,6 @@ static void update_link_status(struct net_device *netdev, } } -static -int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) -{ - struct octeon_device *oct = droq->oct_dev; - struct octeon_device_priv *oct_priv = - (struct octeon_device_priv *)oct->priv; - - if (droq->ops.poll_mode) { - droq->ops.napi_fn(droq); - } else { - if (ret & MSIX_PO_INT) { - dev_err(&oct->pci_dev->dev, - "should not come here should not get rx when poll mode = 0 for vf\n"); - tasklet_schedule(&oct_priv->droq_tasklet); - return 1; - } - /* this will be flushed periodically by check iq db */ - if (ret & MSIX_PI_INT) - return 0; - } - return 0; -} - -static irqreturn_t -liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) -{ - struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; - struct octeon_device *oct = ioq_vector->oct_dev; - struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; - u64 ret; - - ret = oct->fn_list.msix_interrupt_handler(ioq_vector); - - if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT)) - liquidio_schedule_msix_droq_pkt_handler(droq, ret); - - return IRQ_HANDLED; -} - /** * \brief Setup interrupt for octeon device * @param oct octeon device diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h index 7ccffbb0019e..32ef3a7d88d8 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -35,6 +35,12 @@ #define DRV_NAME "LiquidIO" +struct octeon_device_priv { + /** Tasklet structures for this device. */ + struct tasklet_struct droq_tasklet; + unsigned long napi_mask; +}; + /** This structure is used by NIC driver to store information required * to free the sk_buff when the packet has been fetched by Octeon. * Bytes offset below assume worst-case of a 64-bit system. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index d4b39305ad68..7454d711dd86 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -169,6 +169,9 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx); +irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), + void *dev); + /** * \brief Register ethtool operations * @param netdev pointer to network device -- cgit v1.2.3-55-g7522 From 5ef4ddb3397d95eef2a71b063913dbfef72c6d7b Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Tue, 15 Aug 2017 12:46:11 -0700 Subject: liquidio: moved liquidio_legacy_intr_handler to lio_core.c Moving liquidio_legacy_intr_handler to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 55 +++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 56 ---------------------- .../net/ethernet/cavium/liquidio/octeon_network.h | 2 + 3 files changed, 57 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 03746d8f2778..5c5f957f9d40 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -829,3 +829,58 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev) return IRQ_HANDLED; } + +/** + * \brief Droq packet processor sceduler + * @param oct octeon device + */ +static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct octeon_droq *droq; + u64 oq_no; + + if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); + oq_no++) { + if (!(oct->droq_intr & BIT_ULL(oq_no))) + continue; + + droq = oct->droq[oq_no]; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + oct_priv->napi_mask |= (1 << oq_no); + } else { + tasklet_schedule(&oct_priv->droq_tasklet); + } + } + } +} + +/** + * \brief Interrupt handler for octeon + * @param irq unused + * @param dev octeon device + */ +irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), + void *dev) +{ + struct octeon_device *oct = (struct octeon_device *)dev; + irqreturn_t ret; + + /* Disable our interrupts for the duration of ISR */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + ret = oct->fn_list.process_interrupt_regs(oct); + + if (ret == IRQ_HANDLED) + liquidio_schedule_droq_pkt_handlers(oct); + + /* Re-enable our interrupts */ + if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + return ret; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 96ba5ec756ad..478144ddebe2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -901,62 +901,6 @@ static inline void update_link_status(struct net_device *netdev, } } -/** - * \brief Droq packet processor sceduler - * @param oct octeon device - */ -static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) -{ - struct octeon_device_priv *oct_priv = - (struct octeon_device_priv *)oct->priv; - u64 oq_no; - struct octeon_droq *droq; - - if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { - for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); - oq_no++) { - if (!(oct->droq_intr & BIT_ULL(oq_no))) - continue; - - droq = oct->droq[oq_no]; - - if (droq->ops.poll_mode) { - droq->ops.napi_fn(droq); - oct_priv->napi_mask |= (1 << oq_no); - } else { - tasklet_schedule(&oct_priv->droq_tasklet); - } - } - } -} - -/** - * \brief Interrupt handler for octeon - * @param irq unused - * @param dev octeon device - */ -static -irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), - void *dev) -{ - struct octeon_device *oct = (struct octeon_device *)dev; - irqreturn_t ret; - - /* Disable our interrupts for the duration of ISR */ - oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); - - ret = oct->fn_list.process_interrupt_regs(oct); - - if (ret == IRQ_HANDLED) - liquidio_schedule_droq_pkt_handlers(oct); - - /* Re-enable our interrupts */ - if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) - oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); - - return ret; -} - /** * \brief Setup interrupt for octeon device * @param oct octeon device diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index 7454d711dd86..de5eecb56a15 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -172,6 +172,8 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx); irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev); +irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), + void *dev); /** * \brief Register ethtool operations * @param netdev pointer to network device -- cgit v1.2.3-55-g7522 From 14aec73aabcedb6be30f6a541f81295f5df44fbf Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Tue, 15 Aug 2017 12:46:15 -0700 Subject: liquidio: moved octeon_setup_interrupt to lio_core.c Moving common octeon_setup_interrupt to lio_core.c Signed-off-by: Intiyaz Basha Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 200 +++++++++++++++++++++ drivers/net/ethernet/cavium/liquidio/lio_main.c | 163 ----------------- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 98 ---------- .../net/ethernet/cavium/liquidio/octeon_network.h | 4 +- 4 files changed, 202 insertions(+), 263 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 5c5f957f9d40..217200ceecc7 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -864,6 +864,7 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) * @param irq unused * @param dev octeon device */ +static irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), void *dev) { @@ -884,3 +885,202 @@ irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), return ret; } + +/** + * \brief Setup interrupt for octeon device + * @param oct octeon device + * + * Enable interrupt in Octeon device as given in the PCI interrupt mask. + */ +int octeon_setup_interrupt(struct octeon_device *oct) +{ + struct msix_entry *msix_entries; + char *queue_irq_names = NULL; + int i, num_interrupts = 0; + int num_alloc_ioq_vectors; + char *aux_irq_name = NULL; + int num_ioq_vectors; + int irqret, err; + + if (oct->msix_on) { + if (OCTEON_CN23XX_PF(oct)) { + oct->num_msix_irqs = oct->sriov_info.num_pf_rings; + num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; + + /* one non ioq interrupt for handling + * sli_mac_pf_int_sum + */ + oct->num_msix_irqs += 1; + } else if (OCTEON_CN23XX_VF(oct)) { + oct->num_msix_irqs = oct->sriov_info.rings_per_vf; + num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF; + } + + /* allocate storage for the names assigned to each irq */ + oct->irq_name_storage = + kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL); + if (!oct->irq_name_storage) { + dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); + return -ENOMEM; + } + + queue_irq_names = oct->irq_name_storage; + + if (OCTEON_CN23XX_PF(oct)) + aux_irq_name = &queue_irq_names + [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; + + oct->msix_entries = kcalloc(oct->num_msix_irqs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!oct->msix_entries) { + dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return -ENOMEM; + } + + msix_entries = (struct msix_entry *)oct->msix_entries; + + /*Assumption is that pf msix vectors start from pf srn to pf to + * trs and not from 0. if not change this code + */ + if (OCTEON_CN23XX_PF(oct)) { + for (i = 0; i < oct->num_msix_irqs - 1; i++) + msix_entries[i].entry = + oct->sriov_info.pf_srn + i; + + msix_entries[oct->num_msix_irqs - 1].entry = + oct->sriov_info.trs; + } else if (OCTEON_CN23XX_VF(oct)) { + for (i = 0; i < oct->num_msix_irqs; i++) + msix_entries[i].entry = i; + } + num_alloc_ioq_vectors = pci_enable_msix_range( + oct->pci_dev, msix_entries, + oct->num_msix_irqs, + oct->num_msix_irqs); + if (num_alloc_ioq_vectors < 0) { + dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return num_alloc_ioq_vectors; + } + + dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); + + num_ioq_vectors = oct->num_msix_irqs; + /** For PF, there is one non-ioq interrupt handler */ + if (OCTEON_CN23XX_PF(oct)) { + num_ioq_vectors -= 1; + + snprintf(aux_irq_name, INTRNAMSIZ, + "LiquidIO%u-pf%u-aux", oct->octeon_id, + oct->pf_num); + irqret = request_irq( + msix_entries[num_ioq_vectors].vector, + liquidio_legacy_intr_handler, 0, + aux_irq_name, oct); + if (irqret) { + dev_err(&oct->pci_dev->dev, + "Request_irq failed for MSIX interrupt Error: %d\n", + irqret); + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + oct->msix_entries = NULL; + return irqret; + } + } + for (i = 0 ; i < num_ioq_vectors ; i++) { + if (OCTEON_CN23XX_PF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], + INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, i); + + if (OCTEON_CN23XX_VF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], + INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u", + oct->octeon_id, oct->vf_num, i); + + irqret = request_irq(msix_entries[i].vector, + liquidio_msix_intr_handler, 0, + &queue_irq_names[IRQ_NAME_OFF(i)], + &oct->ioq_vector[i]); + + if (irqret) { + dev_err(&oct->pci_dev->dev, + "Request_irq failed for MSIX interrupt Error: %d\n", + irqret); + /** Freeing the non-ioq irq vector here . */ + free_irq(msix_entries[num_ioq_vectors].vector, + oct); + + while (i) { + i--; + /** clearing affinity mask. */ + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + oct->msix_entries = NULL; + return irqret; + } + oct->ioq_vector[i].vector = msix_entries[i].vector; + /* assign the cpu mask for this msix interrupt vector */ + irq_set_affinity_hint(msix_entries[i].vector, + &oct->ioq_vector[i].affinity_mask + ); + } + dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", + oct->octeon_id); + } else { + err = pci_enable_msi(oct->pci_dev); + if (err) + dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", + err); + else + oct->flags |= LIO_FLAG_MSI_ENABLED; + + /* allocate storage for the names assigned to the irq */ + oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL); + if (!oct->irq_name_storage) + return -ENOMEM; + + queue_irq_names = oct->irq_name_storage; + + if (OCTEON_CN23XX_PF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, + "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, 0); + + if (OCTEON_CN23XX_VF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, + "LiquidIO%u-vf%u-rxtx-%u", + oct->octeon_id, oct->vf_num, 0); + + irqret = request_irq(oct->pci_dev->irq, + liquidio_legacy_intr_handler, + IRQF_SHARED, + &queue_irq_names[IRQ_NAME_OFF(0)], oct); + if (irqret) { + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", + irqret); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return irqret; + } + } + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 478144ddebe2..82ed201e7a30 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -901,169 +901,6 @@ static inline void update_link_status(struct net_device *netdev, } } -/** - * \brief Setup interrupt for octeon device - * @param oct octeon device - * - * Enable interrupt in Octeon device as given in the PCI interrupt mask. - */ -static int octeon_setup_interrupt(struct octeon_device *oct) -{ - int irqret, err; - struct msix_entry *msix_entries; - int i; - int num_ioq_vectors; - int num_alloc_ioq_vectors; - char *queue_irq_names = NULL; - char *aux_irq_name = NULL; - - if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { - oct->num_msix_irqs = oct->sriov_info.num_pf_rings; - /* one non ioq interrupt for handling sli_mac_pf_int_sum */ - oct->num_msix_irqs += 1; - - /* allocate storage for the names assigned to each irq */ - oct->irq_name_storage = - kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ, - GFP_KERNEL); - if (!oct->irq_name_storage) { - dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); - return -ENOMEM; - } - - queue_irq_names = oct->irq_name_storage; - aux_irq_name = &queue_irq_names - [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; - - oct->msix_entries = kcalloc( - oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); - if (!oct->msix_entries) { - dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return -ENOMEM; - } - - msix_entries = (struct msix_entry *)oct->msix_entries; - /*Assumption is that pf msix vectors start from pf srn to pf to - * trs and not from 0. if not change this code - */ - for (i = 0; i < oct->num_msix_irqs - 1; i++) - msix_entries[i].entry = oct->sriov_info.pf_srn + i; - msix_entries[oct->num_msix_irqs - 1].entry = - oct->sriov_info.trs; - num_alloc_ioq_vectors = pci_enable_msix_range( - oct->pci_dev, msix_entries, - oct->num_msix_irqs, - oct->num_msix_irqs); - if (num_alloc_ioq_vectors < 0) { - dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return num_alloc_ioq_vectors; - } - dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); - - num_ioq_vectors = oct->num_msix_irqs; - - /** For PF, there is one non-ioq interrupt handler */ - num_ioq_vectors -= 1; - - snprintf(aux_irq_name, INTRNAMSIZ, - "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num); - irqret = request_irq(msix_entries[num_ioq_vectors].vector, - liquidio_legacy_intr_handler, 0, - aux_irq_name, oct); - if (irqret) { - dev_err(&oct->pci_dev->dev, - "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", - irqret); - pci_disable_msix(oct->pci_dev); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - - for (i = 0; i < num_ioq_vectors; i++) { - snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, - "LiquidIO%u-pf%u-rxtx-%u", - oct->octeon_id, oct->pf_num, i); - - irqret = request_irq(msix_entries[i].vector, - liquidio_msix_intr_handler, 0, - &queue_irq_names[IRQ_NAME_OFF(i)], - &oct->ioq_vector[i]); - if (irqret) { - dev_err(&oct->pci_dev->dev, - "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", - irqret); - /** Freeing the non-ioq irq vector here . */ - free_irq(msix_entries[num_ioq_vectors].vector, - oct); - - while (i) { - i--; - /** clearing affinity mask. */ - irq_set_affinity_hint( - msix_entries[i].vector, NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); - } - pci_disable_msix(oct->pci_dev); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - oct->ioq_vector[i].vector = msix_entries[i].vector; - /* assign the cpu mask for this msix interrupt vector */ - irq_set_affinity_hint( - msix_entries[i].vector, - (&oct->ioq_vector[i].affinity_mask)); - } - dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", - oct->octeon_id); - } else { - err = pci_enable_msi(oct->pci_dev); - if (err) - dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", - err); - else - oct->flags |= LIO_FLAG_MSI_ENABLED; - - /* allocate storage for the names assigned to the irq */ - oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL); - if (!oct->irq_name_storage) - return -ENOMEM; - - queue_irq_names = oct->irq_name_storage; - - snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, - "LiquidIO%u-pf%u-rxtx-%u", - oct->octeon_id, oct->pf_num, 0); - - irqret = request_irq(oct->pci_dev->irq, - liquidio_legacy_intr_handler, - IRQF_SHARED, - &queue_irq_names[IRQ_NAME_OFF(0)], oct); - if (irqret) { - if (oct->flags & LIO_FLAG_MSI_ENABLED) - pci_disable_msi(oct->pci_dev); - dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", - irqret); - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - } - return 0; -} - static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) { struct octeon_device *other_oct; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 688b438e3e19..a2f0d628958d 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -642,104 +642,6 @@ static void update_link_status(struct net_device *netdev, } } -/** - * \brief Setup interrupt for octeon device - * @param oct octeon device - * - * Enable interrupt in Octeon device as given in the PCI interrupt mask. - */ -static int octeon_setup_interrupt(struct octeon_device *oct) -{ - struct msix_entry *msix_entries; - char *queue_irq_names = NULL; - int num_alloc_ioq_vectors; - int num_ioq_vectors; - int irqret; - int i; - - if (oct->msix_on) { - oct->num_msix_irqs = oct->sriov_info.rings_per_vf; - - /* allocate storage for the names assigned to each irq */ - oct->irq_name_storage = - kcalloc(MAX_IOQ_INTERRUPTS_PER_VF, INTRNAMSIZ, - GFP_KERNEL); - if (!oct->irq_name_storage) { - dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); - return -ENOMEM; - } - - queue_irq_names = oct->irq_name_storage; - - oct->msix_entries = kcalloc( - oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL); - if (!oct->msix_entries) { - dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return -ENOMEM; - } - - msix_entries = (struct msix_entry *)oct->msix_entries; - - for (i = 0; i < oct->num_msix_irqs; i++) - msix_entries[i].entry = i; - num_alloc_ioq_vectors = pci_enable_msix_range( - oct->pci_dev, msix_entries, - oct->num_msix_irqs, - oct->num_msix_irqs); - if (num_alloc_ioq_vectors < 0) { - dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return num_alloc_ioq_vectors; - } - dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); - - num_ioq_vectors = oct->num_msix_irqs; - - for (i = 0; i < num_ioq_vectors; i++) { - snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ, - "LiquidIO%u-vf%u-rxtx-%u", - oct->octeon_id, oct->vf_num, i); - - irqret = request_irq(msix_entries[i].vector, - liquidio_msix_intr_handler, 0, - &queue_irq_names[IRQ_NAME_OFF(i)], - &oct->ioq_vector[i]); - if (irqret) { - dev_err(&oct->pci_dev->dev, - "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n", - irqret); - - while (i) { - i--; - irq_set_affinity_hint( - msix_entries[i].vector, NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); - } - pci_disable_msix(oct->pci_dev); - kfree(oct->msix_entries); - oct->msix_entries = NULL; - kfree(oct->irq_name_storage); - oct->irq_name_storage = NULL; - return irqret; - } - oct->ioq_vector[i].vector = msix_entries[i].vector; - /* assign the cpu mask for this msix interrupt vector */ - irq_set_affinity_hint( - msix_entries[i].vector, - (&oct->ioq_vector[i].affinity_mask)); - } - dev_dbg(&oct->pci_dev->dev, - "OCTEON[%d]: MSI-X enabled\n", oct->octeon_id); - } - return 0; -} - /** * \brief PCI probe handler * @param pdev PCI device structure diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index de5eecb56a15..ad29550c91f7 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -172,8 +172,8 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx); irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev); -irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), - void *dev); +int octeon_setup_interrupt(struct octeon_device *oct); + /** * \brief Register ethtool operations * @param netdev pointer to network device -- cgit v1.2.3-55-g7522 From a82457f1b4bd37ab20be956f14bb18e679fde124 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Tue, 15 Aug 2017 12:46:18 -0700 Subject: liquidio: added support for ethtool --set-channels feature adding support for ethtool --set-channels feature Signed-off-by: Intiyaz Basha Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 17 +- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 190 +++++++++++++++++++-- drivers/net/ethernet/cavium/liquidio/lio_main.c | 33 ++-- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 28 +-- .../net/ethernet/cavium/liquidio/liquidio_common.h | 3 + .../net/ethernet/cavium/liquidio/octeon_network.h | 5 +- 6 files changed, 226 insertions(+), 50 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 217200ceecc7..8b8e78f04f94 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -275,6 +275,11 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); break; + case OCTNET_CMD_QUEUE_COUNT_CTL: + netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n", + nctrl->ncmd.s.param1); + break; + default: dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, nctrl->ncmd.s.cmd); @@ -689,7 +694,8 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) * an input queue is for egress packets, and output queues * are for ingress packets. */ -int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, + u32 num_iqs, u32 num_oqs) { struct octeon_droq_ops droq_ops; struct net_device *netdev; @@ -717,7 +723,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) cpu_id_modulus = num_present_cpus(); /* set up DROQs. */ - for (q = 0; q < lio->linfo.num_rxpciq; q++) { + for (q = 0; q < num_oqs; q++) { q_no = lio->linfo.rxpciq[q].s.q_no; dev_dbg(&octeon_dev->pci_dev->dev, "%s index:%d linfo.rxpciq.s.q_no:%d\n", @@ -761,7 +767,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx) } /* set up IQs. */ - for (q = 0; q < lio->linfo.num_txpciq; q++) { + for (q = 0; q < num_iqs; q++) { num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( octeon_get_conf(octeon_dev), lio->ifidx); retval = octeon_setup_iq(octeon_dev, ifidx, q, @@ -892,7 +898,7 @@ irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)), * * Enable interrupt in Octeon device as given in the PCI interrupt mask. */ -int octeon_setup_interrupt(struct octeon_device *oct) +int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) { struct msix_entry *msix_entries; char *queue_irq_names = NULL; @@ -902,9 +908,9 @@ int octeon_setup_interrupt(struct octeon_device *oct) int num_ioq_vectors; int irqret, err; + oct->num_msix_irqs = num_ioqs; if (oct->msix_on) { if (OCTEON_CN23XX_PF(oct)) { - oct->num_msix_irqs = oct->sriov_info.num_pf_rings; num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; /* one non ioq interrupt for handling @@ -912,7 +918,6 @@ int octeon_setup_interrupt(struct octeon_device *oct) */ oct->num_msix_irqs += 1; } else if (OCTEON_CN23XX_VF(oct)) { - oct->num_msix_irqs = oct->sriov_info.rings_per_vf; num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF; } diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index a59c8ccebd10..08aa06c90d46 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -31,6 +31,7 @@ #include "cn23xx_pf_device.h" #include "cn23xx_vf_device.h" +static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); static int octnet_get_link_stats(struct net_device *netdev); struct oct_intrmod_context { @@ -300,6 +301,35 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); } +static int +lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; + nctrl.ncmd.s.param1 = num_queues; + nctrl.ncmd.s.param2 = num_queues; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", + ret); + return -1; + } + + return 0; +} + static void lio_ethtool_get_channels(struct net_device *dev, struct ethtool_channels *channel) @@ -307,6 +337,7 @@ lio_ethtool_get_channels(struct net_device *dev, struct lio *lio = GET_LIO(dev); struct octeon_device *oct = lio->oct_dev; u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; + u32 combined_count = 0, max_combined = 0; if (OCTEON_CN6XXX(oct)) { struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); @@ -316,22 +347,137 @@ lio_ethtool_get_channels(struct net_device *dev, rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); } else if (OCTEON_CN23XX_PF(oct)) { - - max_rx = oct->sriov_info.num_pf_rings; - max_tx = oct->sriov_info.num_pf_rings; - rx_count = lio->linfo.num_rxpciq; - tx_count = lio->linfo.num_txpciq; + max_combined = lio->linfo.num_txpciq; + combined_count = oct->num_iqs; } else if (OCTEON_CN23XX_VF(oct)) { - max_tx = oct->sriov_info.rings_per_vf; - max_rx = oct->sriov_info.rings_per_vf; - rx_count = lio->linfo.num_rxpciq; - tx_count = lio->linfo.num_txpciq; + u64 reg_val = 0ULL; + u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); + + reg_val = octeon_read_csr64(oct, ctrl); + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + combined_count = oct->num_iqs; } channel->max_rx = max_rx; channel->max_tx = max_tx; + channel->max_combined = max_combined; channel->rx_count = rx_count; channel->tx_count = tx_count; + channel->combined_count = combined_count; +} + +static int +lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) +{ + struct msix_entry *msix_entries; + int num_msix_irqs = 0; + int i; + + if (!oct->msix_on) + return 0; + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon. + */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + if (oct->msix_on) { + if (OCTEON_CN23XX_PF(oct)) + num_msix_irqs = oct->num_msix_irqs - 1; + else if (OCTEON_CN23XX_VF(oct)) + num_msix_irqs = oct->num_msix_irqs; + + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < num_msix_irqs; i++) { + if (oct->ioq_vector[i].vector) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint(msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } + } + + /* non-iov vector's argument is oct struct */ + if (OCTEON_CN23XX_PF(oct)) + free_irq(msix_entries[i].vector, oct); + + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + } + + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + if (octeon_setup_interrupt(oct, num_ioqs)) { + dev_info(&oct->pci_dev->dev, "Setup interuupt failed\n"); + return 1; + } + + /* Enable Octeon device interrupts */ + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + return 0; +} + +static int +lio_ethtool_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + u32 combined_count, max_combined; + struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + int stopped = 0; + + if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { + dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); + return -EINVAL; + } + + if (!channel->combined_count || channel->other_count || + channel->rx_count || channel->tx_count) + return -EINVAL; + + combined_count = channel->combined_count; + + if (OCTEON_CN23XX_PF(oct)) { + max_combined = channel->max_combined; + } else if (OCTEON_CN23XX_VF(oct)) { + u64 reg_val = 0ULL; + u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); + + reg_val = octeon_read_csr64(oct, ctrl); + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + } else { + return -EINVAL; + } + + if (combined_count > max_combined || combined_count < 1) + return -EINVAL; + + if (combined_count == oct->num_iqs) + return 0; + + ifstate_set(lio, LIO_IFSTATE_RESETTING); + + if (netif_running(dev)) { + dev->netdev_ops->ndo_stop(dev); + stopped = 1; + } + + if (lio_reset_queues(dev, combined_count)) + return -EINVAL; + + lio_irq_reallocate_irqs(oct, combined_count); + if (stopped) + dev->netdev_ops->ndo_open(dev); + + ifstate_reset(lio, LIO_IFSTATE_RESETTING); + + return 0; } static int lio_get_eeprom_len(struct net_device *netdev) @@ -664,15 +810,12 @@ lio_ethtool_get_ringparam(struct net_device *netdev, ering->rx_jumbo_max_pending = 0; } -static int lio_reset_queues(struct net_device *netdev) +static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; struct napi_struct *napi, *n; - int i; - - dev_dbg(&oct->pci_dev->dev, "%s:%d ifidx %d\n", - __func__, __LINE__, lio->ifidx); + int i, update = 0; if (wait_for_pending_requests(oct)) dev_err(&oct->pci_dev->dev, "There were pending requests\n"); @@ -693,6 +836,12 @@ static int lio_reset_queues(struct net_device *netdev) list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) netif_napi_del(napi); + if (num_qs != oct->num_iqs) { + netif_set_real_num_rx_queues(netdev, num_qs); + netif_set_real_num_tx_queues(netdev, num_qs); + update = 1; + } + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; @@ -710,7 +859,7 @@ static int lio_reset_queues(struct net_device *netdev) return -1; } - if (liquidio_setup_io_queues(oct, 0)) { + if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { dev_err(&oct->pci_dev->dev, "IO queues initialization failed\n"); return -1; } @@ -721,6 +870,9 @@ static int lio_reset_queues(struct net_device *netdev) return -1; } + if (update && lio_send_queue_count_update(netdev, num_qs)) + return -1; + return 0; } @@ -764,7 +916,7 @@ static int lio_ethtool_set_ringparam(struct net_device *netdev, CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, rx_count); - if (lio_reset_queues(netdev)) + if (lio_reset_queues(netdev, lio->linfo.num_txpciq)) goto err_lio_reset_queues; if (stopped) @@ -1194,7 +1346,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, /* lio->link_changes */ data[i++] = CVM_CAST64(lio->link_changes); - for (vj = 0; vj < lio->linfo.num_txpciq; vj++) { + for (vj = 0; vj < oct_dev->num_iqs; vj++) { j = lio->linfo.txpciq[vj].s.q_no; /* packets to network port */ @@ -1236,7 +1388,7 @@ static void lio_vf_get_ethtool_stats(struct net_device *netdev, } /* RX */ - for (vj = 0; vj < lio->linfo.num_rxpciq; vj++) { + for (vj = 0; vj < oct_dev->num_oqs; vj++) { j = lio->linfo.rxpciq[vj].s.q_no; /* packets send to TCP/IP network stack */ @@ -2705,6 +2857,7 @@ static const struct ethtool_ops lio_ethtool_ops = { .get_ringparam = lio_ethtool_get_ringparam, .set_ringparam = lio_ethtool_set_ringparam, .get_channels = lio_ethtool_get_channels, + .set_channels = lio_ethtool_set_channels, .set_phys_id = lio_set_phys_id, .get_eeprom_len = lio_get_eeprom_len, .get_eeprom = lio_get_eeprom, @@ -2731,6 +2884,7 @@ static const struct ethtool_ops lio_vf_ethtool_ops = { .get_ringparam = lio_ethtool_get_ringparam, .set_ringparam = lio_ethtool_set_ringparam, .get_channels = lio_ethtool_get_channels, + .set_channels = lio_ethtool_set_channels, .get_strings = lio_vf_get_strings, .get_ethtool_stats = lio_vf_get_ethtool_stats, .get_regs_len = lio_get_regs_len, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 82ed201e7a30..0eea6a2d0200 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -560,7 +560,7 @@ static inline void txqs_wake(struct net_device *netdev) for (i = 0; i < netdev->num_tx_queues; i++) { int qno = lio->linfo.txpciq[i % - (lio->linfo.num_txpciq)].s.q_no; + lio->oct_dev->num_iqs].s.q_no; if (__netif_subqueue_stopped(netdev, i)) { INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, @@ -640,7 +640,7 @@ static inline int check_txq_status(struct lio *lio) /* check each sub-queue state */ for (q = 0; q < numqs; q++) { iq = lio->linfo.txpciq[q % - (lio->linfo.num_txpciq)].s.q_no; + lio->oct_dev->num_iqs].s.q_no; if (octnet_iq_is_full(lio->oct_dev, iq)) continue; if (__netif_subqueue_stopped(lio->netdev, q)) { @@ -1181,11 +1181,15 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->msix_on) { msix_entries = (struct msix_entry *)oct->msix_entries; for (i = 0; i < oct->num_msix_irqs - 1; i++) { - /* clear the affinity_cpumask */ - irq_set_affinity_hint(msix_entries[i].vector, - NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); + if (oct->ioq_vector[i].vector) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } } /* non-iov vector's argument is oct struct */ free_irq(msix_entries[i].vector, oct); @@ -1465,7 +1469,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); - for (j = 0; j < lio->linfo.num_rxpciq; j++) + for (j = 0; j < oct->num_oqs; j++) octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j].s.q_no); } @@ -1605,7 +1609,7 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; + iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no; } else { iq = lio->txq; q = iq; @@ -2262,7 +2266,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) return stats; - for (i = 0; i < lio->linfo.num_txpciq; i++) { + for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; pkts += iq_stats->tx_done; @@ -2278,7 +2282,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) drop = 0; bytes = 0; - for (i = 0; i < lio->linfo.num_rxpciq; i++) { + for (i = 0; i < oct->num_oqs; i++) { oq_no = lio->linfo.rxpciq[i].s.q_no; oq_stats = &oct->droq[oq_no]->stats; pkts += oq_stats->rx_pkts_received; @@ -3533,7 +3537,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) */ lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; - if (liquidio_setup_io_queues(octeon_dev, i)) { + if (liquidio_setup_io_queues(octeon_dev, i, + lio->linfo.num_txpciq, + lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } @@ -4012,7 +4018,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) /* Setup the interrupt handler and record the INT SUM register address */ - if (octeon_setup_interrupt(octeon_dev)) + if (octeon_setup_interrupt(octeon_dev, + octeon_dev->sriov_info.num_pf_rings)) return 1; /* Enable Octeon device interrupts */ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index a2f0d628958d..35a977abc7c4 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -342,7 +342,7 @@ static void txqs_wake(struct net_device *netdev) int i; for (i = 0; i < netdev->num_tx_queues; i++) { - int qno = lio->linfo.txpciq[i % (lio->linfo.num_txpciq)] + int qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs] .s.q_no; if (__netif_subqueue_stopped(netdev, i)) { INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, @@ -750,10 +750,14 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (oct->msix_on) { msix_entries = (struct msix_entry *)oct->msix_entries; for (i = 0; i < oct->num_msix_irqs; i++) { - irq_set_affinity_hint(msix_entries[i].vector, - NULL); - free_irq(msix_entries[i].vector, - &oct->ioq_vector[i]); + if (oct->ioq_vector[i].vector) { + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } } pci_disable_msix(oct->pci_dev); kfree(oct->msix_entries); @@ -986,7 +990,7 @@ static int liquidio_stop_nic_module(struct octeon_device *oct) for (i = 0; i < oct->ifcount; i++) { lio = GET_LIO(oct->props[i].netdev); - for (j = 0; j < lio->linfo.num_rxpciq; j++) + for (j = 0; j < oct->num_oqs; j++) octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j].s.q_no); } @@ -1074,7 +1078,7 @@ static int check_txq_state(struct lio *lio, struct sk_buff *skb) if (netif_is_multiqueue(lio->netdev)) { q = skb->queue_mapping; - iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no; + iq = lio->linfo.txpciq[q % lio->oct_dev->num_iqs].s.q_no; } else { iq = lio->txq; q = iq; @@ -1494,7 +1498,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) return stats; - for (i = 0; i < lio->linfo.num_txpciq; i++) { + for (i = 0; i < oct->num_iqs; i++) { iq_no = lio->linfo.txpciq[i].s.q_no; iq_stats = &oct->instr_queue[iq_no]->stats; pkts += iq_stats->tx_done; @@ -1510,7 +1514,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) drop = 0; bytes = 0; - for (i = 0; i < lio->linfo.num_rxpciq; i++) { + for (i = 0; i < oct->num_oqs; i++) { oq_no = lio->linfo.rxpciq[i].s.q_no; oq_stats = &oct->droq[oq_no]->stats; pkts += oq_stats->rx_pkts_received; @@ -2465,7 +2469,9 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) /* Copy MAC Address to OS network device structure */ ether_addr_copy(netdev->dev_addr, mac); - if (liquidio_setup_io_queues(octeon_dev, i)) { + if (liquidio_setup_io_queues(octeon_dev, i, + lio->linfo.num_txpciq, + lio->linfo.num_rxpciq)) { dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } @@ -2688,7 +2694,7 @@ static int octeon_device_init(struct octeon_device *oct) LIQUIDIO_VERSION, oct->sriov_info.rings_per_vf); /* Setup the interrupt handler and record the INT SUM register address*/ - if (octeon_setup_interrupt(oct)) + if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) return 1; atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 3b9e3646b971..18d29550e2f8 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -226,6 +226,9 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_SET_UC_LIST 0x1b #define OCTNET_CMD_SET_VF_LINKSTATE 0x1c + +#define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f + #define OCTNET_CMD_VXLAN_PORT_ADD 0x0 #define OCTNET_CMD_VXLAN_PORT_DEL 0x1 #define OCTNET_CMD_RXCSUM_ENABLE 0x0 diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h index ad29550c91f7..9e36319cead6 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -167,12 +167,13 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev); */ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); -int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx); +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, + u32 num_iqs, u32 num_oqs); irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev); -int octeon_setup_interrupt(struct octeon_device *oct); +int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); /** * \brief Register ethtool operations -- cgit v1.2.3-55-g7522 From a0085f2510e8976614ad8f766b209448b385492f Mon Sep 17 00:00:00 2001 From: Sukumar Ghorai Date: Wed, 16 Aug 2017 14:46:55 -0700 Subject: Bluetooth: btusb: driver to enable the usb-wakeup feature BT-Controller connected as platform non-root-hub device and usb-driver initialize such device with wakeup disabled, Ref. usb_new_device(). At present wakeup-capability get enabled by hid-input device from usb function driver(e.g. BT HID device) at runtime. Again some functional driver does not set usb-wakeup capability(e.g LE HID device implement as HID-over-GATT), and can't wakeup the host on USB. Most of the device operation (such as mass storage) initiated from host (except HID) and USB wakeup aligned with host resume procedure. For BT device, usb-wakeup capability need to enable form btusc driver as a generic solution for multiple profile use case and required for USB remote wakeup (in-bus wakeup) while host is suspended. Also usb-wakeup feature need to enable/disable with HCI interface up and down. Signed-off-by: Sukumar Ghorai Signed-off-by: Amit K Bag Acked-by: Oliver Neukum Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btusb.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e8d8a3f61f5b..7a5c06aaa181 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -1088,6 +1088,10 @@ static int btusb_open(struct hci_dev *hdev) } data->intf->needs_remote_wakeup = 1; + /* device specific wakeup source enabled and required for USB + * remote wakeup while host is suspended + */ + device_wakeup_enable(&data->udev->dev); if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done; @@ -1151,6 +1155,7 @@ static int btusb_close(struct hci_dev *hdev) goto failed; data->intf->needs_remote_wakeup = 0; + device_wakeup_disable(&data->udev->dev); usb_autopm_put_interface(data->intf); failed: -- cgit v1.2.3-55-g7522 From 74183a1c50a3c61e62d2e585107ea3c0b942a3ff Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Wed, 16 Aug 2017 09:53:30 +0200 Subject: Bluetooth: hci_bcm: Use operation speed of 4Mbps only for ACPI devices Not all Broadcom controller support the 4Mbps operational speed on UART devices. This is because the UART clock setting changes might not be supported. < HCI Command: Broadcom Write UART Clock Setting (0x3f|0x0045) plen 1 01 . > HCI Event: Command Complete (0x0e) plen 4 Broadcom Write UART Clock Setting (0x3f|0x0045) ncmd 1 Status: Unknown HCI Command (0x01) To support any operational speed higher than 3Mbps, support for this command is required. With that respect it is better to not enforce any operational speed by default. Only when its support is known, then allow for higher operational speed. This patch assigns the 4Mbps opertional speed only for devices discovered through ACPI and leave all others at the default 115200. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- drivers/bluetooth/hci_bcm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 6b42372c53ef..1eb286ade48a 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -59,6 +59,7 @@ struct bcm_device { bool clk_enabled; u32 init_speed; + u32 oper_speed; int irq; u8 irq_polarity; @@ -303,6 +304,7 @@ static int bcm_open(struct hci_uart *hu) if (hu->tty->dev->parent == dev->pdev->dev.parent) { bcm->dev = dev; hu->init_speed = dev->init_speed; + hu->oper_speed = dev->oper_speed; #ifdef CONFIG_PM dev->hu = hu; #endif @@ -699,8 +701,10 @@ static int bcm_resource(struct acpi_resource *ares, void *data) case ACPI_RESOURCE_TYPE_SERIAL_BUS: sb = &ares->data.uart_serial_bus; - if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) + if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) { dev->init_speed = sb->default_baud_rate; + dev->oper_speed = 4000000; + } break; default: @@ -853,7 +857,6 @@ static const struct hci_uart_proto bcm_proto = { .name = "Broadcom", .manufacturer = 15, .init_speed = 115200, - .oper_speed = 4000000, .open = bcm_open, .close = bcm_close, .flush = bcm_flush, -- cgit v1.2.3-55-g7522 From 959466588aa7f84ccf79ae36a1d89542eaf9aaec Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 15 Aug 2017 16:39:59 +0300 Subject: net_sched: call qlen_notify only if child qdisc is empty This callback is used for deactivating class in parent qdisc. This is cheaper to test queue length right here. Also this allows to catch draining screwed backlog and prevent second deactivation of already inactive parent class which will crash kernel for sure. Kernel with print warning at destruction of child qdisc where no packets but backlog is not zero. Signed-off-by: Konstantin Khlebnikov Signed-off-by: David S. Miller --- net/sched/sch_api.c | 10 +++++++++- net/sched/sch_cbq.c | 3 +-- net/sched/sch_drr.c | 3 +-- net/sched/sch_hfsc.c | 6 ++---- net/sched/sch_htb.c | 3 +-- net/sched/sch_qfq.c | 3 +-- 6 files changed, 15 insertions(+), 13 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 361377fbd780..0fea0c50b763 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -749,6 +749,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n, const struct Qdisc_class_ops *cops; unsigned long cl; u32 parentid; + bool notify; int drops; if (n == 0 && len == 0) @@ -761,6 +762,13 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n, if (sch->flags & TCQ_F_NOPARENT) break; + /* Notify parent qdisc only if child qdisc becomes empty. + * + * If child was empty even before update then backlog + * counter is screwed and we skip notification because + * parent class is already passive. + */ + notify = !sch->q.qlen && !WARN_ON_ONCE(!n); /* TODO: perform the search on a per txq basis */ sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); if (sch == NULL) { @@ -768,7 +776,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n, break; } cops = sch->ops->cl_ops; - if (cops->qlen_notify) { + if (notify && cops->qlen_notify) { cl = cops->get(sch, parentid); cops->qlen_notify(sch, cl); cops->put(sch, cl); diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 780db43300b1..1bdb0106f342 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1385,8 +1385,7 @@ static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct cbq_class *cl = (struct cbq_class *)arg; - if (cl->q->q.qlen == 0) - cbq_deactivate_class(cl); + cbq_deactivate_class(cl); } static unsigned long cbq_get(struct Qdisc *sch, u32 classid) diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index a413dc1c2098..1d2f6235dfcf 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -246,8 +246,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; - if (cl->qdisc->q.qlen == 0) - list_del(&cl->alist); + list_del(&cl->alist); } static int drr_dump_class(struct Qdisc *sch, unsigned long arg, diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index fd15200f8627..14c99870cdb6 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -1221,10 +1221,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; - if (cl->qdisc->q.qlen == 0) { - update_vf(cl, 0, 0); - set_passive(cl); - } + update_vf(cl, 0, 0); + set_passive(cl); } static unsigned long diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 5d65ec5207e9..dcf3c85e1f4f 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -1186,8 +1186,7 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct htb_class *cl = (struct htb_class *)arg; - if (cl->un.leaf.q->q.qlen == 0) - htb_deactivate(qdisc_priv(sch), cl); + htb_deactivate(qdisc_priv(sch), cl); } static unsigned long htb_get(struct Qdisc *sch, u32 classid) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 0e16dfda0bd7..9caa959f91e1 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -1428,8 +1428,7 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl = (struct qfq_class *)arg; - if (cl->qdisc->q.qlen == 0) - qfq_deactivate_class(q, cl); + qfq_deactivate_class(q, cl); } static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt) -- cgit v1.2.3-55-g7522 From 6b0355f4a9a5cb1346f2c1e30bfee49f2b2c8631 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Tue, 15 Aug 2017 16:40:03 +0300 Subject: net_sched/hfsc: opencode trivial set_active() and set_passive() Any move comment abount update_vf() into right place. Signed-off-by: Konstantin Khlebnikov Signed-off-by: David S. Miller --- net/sched/sch_hfsc.c | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 14c99870cdb6..15f09cb9f1ff 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -829,28 +829,6 @@ update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time) } } -static void -set_active(struct hfsc_class *cl, unsigned int len) -{ - if (cl->cl_flags & HFSC_RSC) - init_ed(cl, len); - if (cl->cl_flags & HFSC_FSC) - init_vf(cl, len); - -} - -static void -set_passive(struct hfsc_class *cl) -{ - if (cl->cl_flags & HFSC_RSC) - eltree_remove(cl); - - /* - * vttree is now handled in update_vf() so that update_vf(cl, 0, 0) - * needs to be called explicitly to remove a class from vttree. - */ -} - static unsigned int qdisc_peek_len(struct Qdisc *sch) { @@ -1221,8 +1199,12 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) { struct hfsc_class *cl = (struct hfsc_class *)arg; + /* vttree is now handled in update_vf() so that update_vf(cl, 0, 0) + * needs to be called explicitly to remove a class from vttree. + */ update_vf(cl, 0, 0); - set_passive(cl); + if (cl->cl_flags & HFSC_RSC) + eltree_remove(cl); } static unsigned long @@ -1583,7 +1565,12 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } if (cl->qdisc->q.qlen == 1) { - set_active(cl, qdisc_pkt_len(skb)); + unsigned int len = qdisc_pkt_len(skb); + + if (cl->cl_flags & HFSC_RSC) + init_ed(cl, len); + if (cl->cl_flags & HFSC_FSC) + init_vf(cl, len); /* * If this is the first packet, isolate the head so an eventual * head drop before the first dequeue operation has no chance @@ -1647,18 +1634,18 @@ hfsc_dequeue(struct Qdisc *sch) if (realtime) cl->cl_cumul += qdisc_pkt_len(skb); - if (cl->qdisc->q.qlen != 0) { - if (cl->cl_flags & HFSC_RSC) { + if (cl->cl_flags & HFSC_RSC) { + if (cl->qdisc->q.qlen != 0) { /* update ed */ next_len = qdisc_peek_len(cl->qdisc); if (realtime) update_ed(cl, next_len); else update_d(cl, next_len); + } else { + /* the class becomes passive */ + eltree_remove(cl); } - } else { - /* the class becomes passive */ - set_passive(cl); } qdisc_bstats_update(sch, skb); -- cgit v1.2.3-55-g7522 From 120390468b38f04373e67dbc9f361e2bb2996691 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 15 Aug 2017 10:29:16 -0700 Subject: tun/tap: use paren's with sizeof Although sizeof is an operator in C. The kernel coding style convention is to always use it like a function and add parenthesis. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/tap.c | 2 +- drivers/net/tun.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 0d039411e64c..21b71ae947fd 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1215,7 +1215,7 @@ int tap_queue_resize(struct tap_dev *tap) int n = tap->numqueues; int ret, i = 0; - arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); + arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); if (!arrays) return -ENOMEM; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5892284eb8d0..f5017121cd57 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2737,7 +2737,7 @@ static int tun_queue_resize(struct tun_struct *tun) int n = tun->numqueues + tun->numdisabled; int ret, i; - arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL); + arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL); if (!arrays) return -ENOMEM; -- cgit v1.2.3-55-g7522 From a4a765031d007191c36a25edb9df2fa23c8e6496 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 15 Aug 2017 10:29:17 -0700 Subject: virtio: put paren around sizeof Kernel coding style is to put paren around operand of sizeof. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a3f3c66b4530..4302f313d9a7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -319,7 +319,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, hdr_len = vi->hdr_len; if (vi->mergeable_rx_bufs) - hdr_padded_len = sizeof *hdr; + hdr_padded_len = sizeof(*hdr); else hdr_padded_len = sizeof(struct padded_vnet_hdr); -- cgit v1.2.3-55-g7522 From 9d2ee98dafd439a768f4cef3757bcce328b86386 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 15 Aug 2017 10:29:18 -0700 Subject: skge: add paren around sizeof arg Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/skge.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 5d7d94de4e00..8a835e82256a 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3516,7 +3516,7 @@ static const char *skge_board_name(const struct skge_hw *hw) if (skge_chips[i].id == hw->chip_id) return skge_chips[i].name; - snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); + snprintf(buf, sizeof(buf), "chipid 0x%x", hw->chip_id); return buf; } -- cgit v1.2.3-55-g7522 From 31975e27a4b5ca3ff1ca42d7d12bc936d3166d4c Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 15 Aug 2017 10:29:19 -0700 Subject: mlx4: sizeof style usage The kernel coding style is to treat sizeof as a function (ie. with parenthesis) not as an operator. Also use kcalloc and kmalloc_array Signed-off-by: Stephen Hemminger Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/alloc.c | 2 +- drivers/net/ethernet/mellanox/mlx4/cmd.c | 4 ++-- drivers/net/ethernet/mellanox/mlx4/en_resources.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_rx.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 2 +- drivers/net/ethernet/mellanox/mlx4/eq.c | 20 +++++++++--------- drivers/net/ethernet/mellanox/mlx4/fw.c | 2 +- drivers/net/ethernet/mellanox/mlx4/icm.c | 2 +- drivers/net/ethernet/mellanox/mlx4/icm.h | 4 ++-- drivers/net/ethernet/mellanox/mlx4/intf.c | 2 +- drivers/net/ethernet/mellanox/mlx4/main.c | 12 +++++------ drivers/net/ethernet/mellanox/mlx4/mcg.c | 12 +++++------ drivers/net/ethernet/mellanox/mlx4/mr.c | 10 ++++----- drivers/net/ethernet/mellanox/mlx4/qp.c | 12 +++++------ .../net/ethernet/mellanox/mlx4/resource_tracker.c | 24 +++++++++++----------- 15 files changed, 56 insertions(+), 56 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index b651c1210555..6dabd983e7e0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -186,7 +186,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, bitmap->effective_len = bitmap->avail; spin_lock_init(&bitmap->lock); bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) * - sizeof (long), GFP_KERNEL); + sizeof(long), GFP_KERNEL); if (!bitmap->table) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 674773b28b2e..97aed30ead21 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -2637,7 +2637,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev) int err = 0; priv->cmd.context = kmalloc(priv->cmd.max_cmds * - sizeof (struct mlx4_cmd_context), + sizeof(struct mlx4_cmd_context), GFP_KERNEL); if (!priv->cmd.context) return -ENOMEM; @@ -2695,7 +2695,7 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev) { struct mlx4_cmd_mailbox *mailbox; - mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL); + mailbox = kmalloc(sizeof(*mailbox), GFP_KERNEL); if (!mailbox) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 86d2d42d658d..5a47f9669621 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; - memset(context, 0, sizeof *context); + memset(context, 0, sizeof(*context)); context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET); context->pd = cpu_to_be32(mdev->priv_pdn); context->mtu_msgmax = 0xff; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index bf1638044a7a..dcb8f8f84a97 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1056,7 +1056,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, } qp->event = mlx4_en_sqp_event; - memset(context, 0, sizeof *context); + memset(context, 0, sizeof(*context)); mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, qpn, ring->cqn, -1, context); context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 73faa3d77921..bcf422efd3b8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -643,7 +643,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, void *fragptr) { struct mlx4_wqe_inline_seg *inl = &tx_desc->inl; - int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; + int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof(*inl); unsigned int hlen = skb_headlen(skb); if (skb->len <= spc) { diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 07406cf2eacd..b98698bf75dd 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -259,7 +259,7 @@ int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) if (!s_slave->active) return 0; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; @@ -276,7 +276,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) /*don't send if we don't have the that slave */ if (dev->persist->num_vfs < slave) return 0; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; @@ -295,7 +295,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, /*don't send if we don't have the that slave */ if (dev->persist->num_vfs < slave) return 0; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; eqe.subtype = port_subtype_change; @@ -432,7 +432,7 @@ int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) { struct mlx4_eqe eqe; - memset(&eqe, 0, sizeof eqe); + memset(&eqe, 0, sizeof(eqe)); eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; @@ -726,7 +726,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) } memcpy(&priv->mfunc.master.comm_arm_bit_vector, eqe->event.comm_channel_arm.bit_vec, - sizeof eqe->event.comm_channel_arm.bit_vec); + sizeof(eqe)->event.comm_channel_arm.bit_vec); queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.comm_work); break; @@ -984,15 +984,15 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, */ npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; - eq->page_list = kmalloc(npages * sizeof *eq->page_list, - GFP_KERNEL); + eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list), + GFP_KERNEL); if (!eq->page_list) goto err_out; for (i = 0; i < npages; ++i) eq->page_list[i].buf = NULL; - dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); if (!dma_list) goto err_out_free; @@ -1161,7 +1161,7 @@ int mlx4_alloc_eq_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, - sizeof *priv->eq_table.eq, GFP_KERNEL); + sizeof(*priv->eq_table.eq), GFP_KERNEL); if (!priv->eq_table.eq) return -ENOMEM; @@ -1180,7 +1180,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) int i; priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), - sizeof *priv->eq_table.uar_map, + sizeof(*priv->eq_table.uar_map), GFP_KERNEL); if (!priv->eq_table.uar_map) { err = -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 041c0ed65929..042707623922 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -57,7 +57,7 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)"); do { \ void *__p = (char *) (source) + (offset); \ u64 val; \ - switch (sizeof (dest)) { \ + switch (sizeof(dest)) { \ case 1: (dest) = *(u8 *) __p; break; \ case 2: (dest) = be16_to_cpup(__p); break; \ case 4: (dest) = be32_to_cpup(__p); break; \ diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index 5a7816e7c7b4..a822f7a56bc5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -400,7 +400,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; - table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL); + table->icm = kcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL); if (!table->icm) return -ENOMEM; table->virt = virt; diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index dee67fa39107..c9169a490557 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -39,8 +39,8 @@ #include #define MLX4_ICM_CHUNK_LEN \ - ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ - (sizeof (struct scatterlist))) + ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ + (sizeof(struct scatterlist))) enum { MLX4_ICM_PAGE_SHIFT = 12, diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index e00f627331cb..2edcce98ab2d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -53,7 +53,7 @@ static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv) { struct mlx4_device_context *dev_ctx; - dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL); + dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL); if (!dev_ctx) return; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index df9b0efb5ab1..3797491f4b6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -925,10 +925,10 @@ static int mlx4_slave_cap(struct mlx4_dev *dev) mlx4_replace_zero_macs(dev); dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); - dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || @@ -2399,7 +2399,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; } priv->eq_table.inta_pin = adapter.inta_pin; - memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); + memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id)); return 0; @@ -2869,7 +2869,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) dev->caps.num_eqs - dev->caps.reserved_eqs, MAX_MSIX); - entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); + entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); if (!entries) goto no_msi; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 0710b3677464..4c5306dbcf11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -162,7 +162,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, return -EINVAL; s_steer = &mlx4_priv(dev)->steer[port - 1]; - new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); if (!new_entry) return -ENOMEM; @@ -175,7 +175,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, */ pqp = get_promisc_qp(dev, port, steer, qpn); if (pqp) { - dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); if (!dqp) { err = -ENOMEM; goto out_alloc; @@ -274,7 +274,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, } /* add the qp as a duplicate on this index */ - dqp = kmalloc(sizeof *dqp, GFP_KERNEL); + dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); if (!dqp) return -ENOMEM; dqp->qpn = qpn; @@ -443,7 +443,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, goto out_mutex; } - pqp = kmalloc(sizeof *pqp, GFP_KERNEL); + pqp = kmalloc(sizeof(*pqp), GFP_KERNEL); if (!pqp) { err = -ENOMEM; goto out_mutex; @@ -514,7 +514,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, /* add the new qpn to list of promisc qps */ list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); /* now need to add all the promisc qps to default entry */ - memset(mgm, 0, sizeof *mgm); + memset(mgm, 0, sizeof(*mgm)); members_count = 0; list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { if (members_count == dev->caps.num_qp_per_mgm) { @@ -1144,7 +1144,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], index += dev->caps.num_mgms; new_entry = 1; - memset(mgm, 0, sizeof *mgm); + memset(mgm, 0, sizeof(*mgm)); memcpy(mgm->gid, gid, 16); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 24282cd017d3..c7c0764991c9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -106,9 +106,9 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof(long *), GFP_KERNEL); - buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, + buddy->num_free = kcalloc(buddy->max_order + 1, sizeof(*buddy->num_free), GFP_KERNEL); if (!buddy->bits || !buddy->num_free) goto err_out; @@ -703,13 +703,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, return -ENOMEM; dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle, - npages * sizeof (u64), DMA_TO_DEVICE); + npages * sizeof(u64), DMA_TO_DEVICE); for (i = 0; i < npages; ++i) mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle, - npages * sizeof (u64), DMA_TO_DEVICE); + npages * sizeof(u64), DMA_TO_DEVICE); return 0; } @@ -1052,7 +1052,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, return -EINVAL; /* All MTTs must fit in the same page */ - if (max_pages * sizeof *fmr->mtts > PAGE_SIZE) + if (max_pages * sizeof(*fmr->mtts) > PAGE_SIZE) return -EINVAL; fmr->page_shift = page_shift; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 26747212526b..2b067763a6bc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -174,7 +174,7 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn)); *(__be32 *) mailbox->buf = cpu_to_be32(optpar); - memcpy(mailbox->buf + 8, context, sizeof *context); + memcpy(mailbox->buf + 8, context, sizeof(*context)); ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn = cpu_to_be32(qp->qpn); @@ -844,10 +844,10 @@ int mlx4_init_qp_table(struct mlx4_dev *dev) /* In mfunc, calculate proxy and tunnel qp offsets for the PF here, * since the PF does not call mlx4_slave_caps */ - dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); - dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); + dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); + dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) { @@ -907,7 +907,7 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (!err) - memcpy(context, mailbox->buf + 8, sizeof *context); + memcpy(context, mailbox->buf + 8, sizeof(*context)); mlx4_free_cmd_mailbox(dev, mailbox); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 215e21c3dc8a..fabb53379727 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1040,7 +1040,7 @@ static struct res_common *alloc_qp_tr(int id) { struct res_qp *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1058,7 +1058,7 @@ static struct res_common *alloc_mtt_tr(int id, int order) { struct res_mtt *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1074,7 +1074,7 @@ static struct res_common *alloc_mpt_tr(int id, int key) { struct res_mpt *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1089,7 +1089,7 @@ static struct res_common *alloc_eq_tr(int id) { struct res_eq *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1103,7 +1103,7 @@ static struct res_common *alloc_cq_tr(int id) { struct res_cq *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1118,7 +1118,7 @@ static struct res_common *alloc_srq_tr(int id) { struct res_srq *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1133,7 +1133,7 @@ static struct res_common *alloc_counter_tr(int id, int port) { struct res_counter *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1148,7 +1148,7 @@ static struct res_common *alloc_xrcdn_tr(int id) { struct res_xrcdn *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1162,7 +1162,7 @@ static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) { struct res_fs_rule *ret; - ret = kzalloc(sizeof *ret, GFP_KERNEL); + ret = kzalloc(sizeof(*ret), GFP_KERNEL); if (!ret) return NULL; @@ -1274,7 +1274,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; struct rb_root *root = &tracker->res_tree[type]; - res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); + res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL); if (!res_arr) return -ENOMEM; @@ -2027,7 +2027,7 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) return -EINVAL; - res = kzalloc(sizeof *res, GFP_KERNEL); + res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) { mlx4_release_resource(dev, slave, RES_MAC, 1, port); return -ENOMEM; @@ -4020,7 +4020,7 @@ static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp, struct res_gid *res; int err; - res = kzalloc(sizeof *res, GFP_KERNEL); + res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; -- cgit v1.2.3-55-g7522 From 251564f601a26d01b3b0e5a40889b4efb6823403 Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Tue, 15 Aug 2017 16:26:22 -0700 Subject: liquidio: update VF's netdev->max_mtu if there's a change in PF's MTU A VF's MTU is capped at the parent PF's MTU. So if there's a change in the PF's MTU, then update the VF's netdev->max_mtu. Also remove duplicate log messages for MTU change. Signed-off-by: Veerasenareddy Burru Signed-off-by: Raghu Vatsavayi Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 3 --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 9 ++++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index 8b8e78f04f94..d4f0646084b7 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -165,9 +165,6 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) /* If command is successful, change the MTU. */ netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", netdev->mtu, nctrl->ncmd.s.param1); - dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", - netdev->name, netdev->mtu, - nctrl->ncmd.s.param1); netdev->mtu = nctrl->ncmd.s.param1; queue_delayed_work(lio->link_status_wq.wq, &lio->link_status_wq.wk.work, 0); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 35a977abc7c4..0402b18d4689 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -630,6 +630,12 @@ static void update_link_status(struct net_device *netdev, txqs_stop(netdev); } + if (lio->linfo.link.s.mtu != netdev->max_mtu) { + dev_info(&oct->pci_dev->dev, "Max MTU Changed from %d to %d\n", + netdev->max_mtu, lio->linfo.link.s.mtu); + netdev->max_mtu = lio->linfo.link.s.mtu; + } + if (lio->linfo.link.s.mtu < netdev->mtu) { dev_warn(&oct->pci_dev->dev, "PF has changed the MTU for gmx port. Reducing the mtu from %d to %d\n", @@ -1539,14 +1545,11 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) { struct lio *lio = GET_LIO(netdev); - struct octeon_device *oct = lio->oct_dev; lio->mtu = new_mtu; netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", netdev->mtu, new_mtu); - dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", - netdev->name, netdev->mtu, new_mtu); netdev->mtu = new_mtu; -- cgit v1.2.3-55-g7522 From d369bcaf7dcbe1a5d09ed2e519f35e4841303003 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 16 Aug 2017 10:25:59 +0530 Subject: net: 3c509: constify pnp_device_id pnp_device_id are not supposed to change at runtime. All functions working with pnp_device_id provided by work with const pnp_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c509.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index f66c9710cb81..077d01d9f141 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -392,7 +392,7 @@ static struct isa_driver el3_isa_driver = { static int isa_registered; #ifdef CONFIG_PNP -static struct pnp_device_id el3_pnp_ids[] = { +static const struct pnp_device_id el3_pnp_ids[] = { { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ { .id = "TCM5091" }, /* 3Com Etherlink III */ { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ -- cgit v1.2.3-55-g7522 From f26de110f4f1d8d2490d1f9f003d5abbde030f03 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:30:47 -0700 Subject: net: early init support for strparser It is useful to allow strparser to init sockets before the read_sock callback has been established. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/strparser/strparser.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 0d18fbc6f870..434aa6637a52 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -373,6 +373,9 @@ static int strp_read_sock(struct strparser *strp) struct socket *sock = strp->sk->sk_socket; read_descriptor_t desc; + if (unlikely(!sock || !sock->ops || !sock->ops->read_sock)) + return -EBUSY; + desc.arg.data = strp; desc.error = 0; desc.count = 1; /* give more than one skb per call */ @@ -486,12 +489,7 @@ int strp_init(struct strparser *strp, struct sock *sk, * The upper layer calls strp_process for each skb to be parsed. */ - if (sk) { - struct socket *sock = sk->sk_socket; - - if (!sock->ops->read_sock || !sock->ops->peek_len) - return -EAFNOSUPPORT; - } else { + if (!sk) { if (!cb->lock || !cb->unlock) return -EINVAL; } -- cgit v1.2.3-55-g7522 From 45f91bdcd5c5ba559a4bb7c3a0e0709476cf570f Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:31:10 -0700 Subject: net: add sendmsg_locked and sendpage_locked to af_inet6 To complete the sendmsg_locked and sendpage_locked implementation add the hooks for af_inet6 as well. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/ipv6/af_inet6.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 0a7c74049a0c..3b58ee709f33 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -554,6 +554,8 @@ const struct proto_ops inet6_stream_ops = { .recvmsg = inet_recvmsg, /* ok */ .mmap = sock_no_mmap, .sendpage = inet_sendpage, + .sendmsg_locked = tcp_sendmsg_locked, + .sendpage_locked = tcp_sendpage_locked, .splice_read = tcp_splice_read, .read_sock = tcp_read_sock, .peek_len = tcp_peek_len, -- cgit v1.2.3-55-g7522 From db5980d804d7158917ad4b9186c78b2a3f1db4ef Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:31:34 -0700 Subject: net: fixes for skb_send_sock A couple fixes to new skb_send_sock infrastructure. However, no users currently exist for this code (adding user in next handful of patches) so it should not be possible to trigger a panic with existing in-kernel code. Fixes: 306b13eb3cf9 ("proto_ops: Add locked held versions of sendmsg and sendpage") Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- net/core/skbuff.c | 2 +- net/socket.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index cb123590c674..917da73d3ab3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2285,7 +2285,7 @@ do_frag_list: slen = min_t(int, len, skb_headlen(skb) - offset); kv.iov_base = skb->data + offset; - kv.iov_len = len; + kv.iov_len = slen; memset(&msg, 0, sizeof(msg)); ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen); diff --git a/net/socket.c b/net/socket.c index b332d1e8e4e4..c729625eb5d3 100644 --- a/net/socket.c +++ b/net/socket.c @@ -658,7 +658,7 @@ int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, struct socket *sock = sk->sk_socket; if (!sock->ops->sendmsg_locked) - sock_no_sendmsg_locked(sk, msg, size); + return sock_no_sendmsg_locked(sk, msg, size); iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size); -- cgit v1.2.3-55-g7522 From b005fd189cec9407b700599e1e80e0552446ee79 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:31:58 -0700 Subject: bpf: introduce new program type for skbs on sockets A class of programs, run from strparser and soon from a new map type called sock map, are used with skb as the context but on established sockets. By creating a specific program type for these we can use bpf helpers that expect full sockets and get the verifier to ensure these helpers are not used out of context. The new type is BPF_PROG_TYPE_SK_SKB. This patch introduces the infrastructure and type. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/bpf_types.h | 1 + include/uapi/linux/bpf.h | 1 + net/core/filter.c | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 38 insertions(+) diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index b1e1035ca24b..4b72db30dacf 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -11,6 +11,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops) BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops_prog_ops) +BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb_prog_ops) #endif #ifdef CONFIG_BPF_EVENTS BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 91da8371a2d0..2e796e384aeb 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -127,6 +127,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LWT_OUT, BPF_PROG_TYPE_LWT_XMIT, BPF_PROG_TYPE_SOCK_OPS, + BPF_PROG_TYPE_SK_SKB, }; enum bpf_attach_type { diff --git a/net/core/filter.c b/net/core/filter.c index e0688a855c47..46321033ae0e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3234,6 +3234,20 @@ static const struct bpf_func_proto * } } +static const struct bpf_func_proto *sk_skb_func_proto(enum bpf_func_id func_id) +{ + switch (func_id) { + case BPF_FUNC_skb_load_bytes: + return &bpf_skb_load_bytes_proto; + case BPF_FUNC_get_socket_cookie: + return &bpf_get_socket_cookie_proto; + case BPF_FUNC_get_socket_uid: + return &bpf_get_socket_uid_proto; + default: + return bpf_base_func_proto(func_id); + } +} + static const struct bpf_func_proto * lwt_xmit_func_proto(enum bpf_func_id func_id) { @@ -3525,6 +3539,22 @@ static bool sock_ops_is_valid_access(int off, int size, return __is_valid_sock_ops_access(off, size); } +static bool sk_skb_is_valid_access(int off, int size, + enum bpf_access_type type, + struct bpf_insn_access_aux *info) +{ + switch (off) { + case bpf_ctx_range(struct __sk_buff, data): + info->reg_type = PTR_TO_PACKET; + break; + case bpf_ctx_range(struct __sk_buff, data_end): + info->reg_type = PTR_TO_PACKET_END; + break; + } + + return bpf_skb_is_valid_access(off, size, type, info); +} + static u32 bpf_convert_ctx_access(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, @@ -3994,6 +4024,12 @@ const struct bpf_verifier_ops sock_ops_prog_ops = { .convert_ctx_access = sock_ops_convert_ctx_access, }; +const struct bpf_verifier_ops sk_skb_prog_ops = { + .get_func_proto = sk_skb_func_proto, + .is_valid_access = sk_skb_is_valid_access, + .convert_ctx_access = bpf_convert_ctx_access, +}; + int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; -- cgit v1.2.3-55-g7522 From a6f6df69c48b86cd84f36c70593eb4968fceb34a Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:32:22 -0700 Subject: bpf: export bpf_prog_inc_not_zero bpf_prog_inc_not_zero will be used by upcoming sockmap patches this patch simply exports it so we can pull it in. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/bpf.h | 7 +++++++ kernel/bpf/syscall.c | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 39229c455cba..d6e1de8ce0fc 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -252,6 +252,7 @@ struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); void bpf_prog_sub(struct bpf_prog *prog, int i); struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); +struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); void bpf_prog_put(struct bpf_prog *prog); int __bpf_prog_charge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages); @@ -344,6 +345,12 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) return ERR_PTR(-EOPNOTSUPP); } +static inline struct bpf_prog *__must_check +bpf_prog_inc_not_zero(struct bpf_prog *prog) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) { return 0; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index fbe09a0cccf4..17e29f596de1 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -911,7 +911,7 @@ struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) EXPORT_SYMBOL_GPL(bpf_prog_inc); /* prog_idr_lock should have been held */ -static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) +struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) { int refold; @@ -927,6 +927,7 @@ static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) return prog; } +EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type) { -- cgit v1.2.3-55-g7522 From 174a79ff9515f400b9a6115643dafd62a635b7e6 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:32:47 -0700 Subject: bpf: sockmap with sk redirect support Recently we added a new map type called dev map used to forward XDP packets between ports (6093ec2dc313). This patches introduces a similar notion for sockets. A sockmap allows users to add participating sockets to a map. When sockets are added to the map enough context is stored with the map entry to use the entry with a new helper bpf_sk_redirect_map(map, key, flags) This helper (analogous to bpf_redirect_map in XDP) is given the map and an entry in the map. When called from a sockmap program, discussed below, the skb will be sent on the socket using skb_send_sock(). With the above we need a bpf program to call the helper from that will then implement the send logic. The initial site implemented in this series is the recv_sock hook. For this to work we implemented a map attach command to add attributes to a map. In sockmap we add two programs a parse program and a verdict program. The parse program uses strparser to build messages and pass them to the verdict program. The parse programs use the normal strparser semantics. The verdict program is of type SK_SKB. The verdict program returns a verdict SK_DROP, or SK_REDIRECT for now. Additional actions may be added later. When SK_REDIRECT is returned, expected when bpf program uses bpf_sk_redirect_map(), the sockmap logic will consult per cpu variables set by the helper routine and pull the sock entry out of the sock map. This pattern follows the existing redirect logic in cls and xdp programs. This gives the flow, recv_sock -> str_parser (parse_prog) -> verdict_prog -> skb_send_sock \ -> kfree_skb As an example use case a message based load balancer may use specific logic in the verdict program to select the sock to send on. Sample programs are provided in future patches that hopefully illustrate the user interfaces. Also selftests are in follow-on patches. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/linux/bpf.h | 7 +- include/linux/bpf_types.h | 1 + include/linux/filter.h | 2 + include/uapi/linux/bpf.h | 33 +- kernel/bpf/Makefile | 2 +- kernel/bpf/sockmap.c | 792 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/syscall.c | 51 ++- kernel/bpf/verifier.c | 14 + net/core/filter.c | 43 +++ 9 files changed, 940 insertions(+), 5 deletions(-) create mode 100644 kernel/bpf/sockmap.c diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d6e1de8ce0fc..a4145e9c74b5 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -16,6 +16,7 @@ #include struct perf_event; +struct bpf_prog; struct bpf_map; /* map is generic key/value storage optionally accesible by eBPF programs */ @@ -37,6 +38,8 @@ struct bpf_map_ops { void (*map_fd_put_ptr)(void *ptr); u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); u32 (*map_fd_sys_lookup_elem)(void *ptr); + int (*map_attach)(struct bpf_map *map, + struct bpf_prog *p1, struct bpf_prog *p2); }; struct bpf_map { @@ -138,8 +141,6 @@ enum bpf_reg_type { PTR_TO_PACKET_END, /* skb->data + headlen */ }; -struct bpf_prog; - /* The information passed from prog-specific *_is_valid_access * back to the verifier. */ @@ -312,6 +313,7 @@ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); /* Map specifics */ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); +struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); void __dev_map_insert_ctx(struct bpf_map *map, u32 index); void __dev_map_flush(struct bpf_map *map); @@ -391,6 +393,7 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_skb_vlan_push_proto; extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; +extern const struct bpf_func_proto bpf_sock_map_update_proto; /* Shared helpers among cBPF and eBPF. */ void bpf_user_rnd_init_once(void); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 4b72db30dacf..fa805074d168 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -38,4 +38,5 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) #endif diff --git a/include/linux/filter.h b/include/linux/filter.h index d19ed3c15e1e..7015116331af 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -727,6 +727,8 @@ void xdp_do_flush_map(void); void bpf_warn_invalid_xdp_action(u32 act); void bpf_warn_invalid_xdp_redirect(u32 ifindex); +struct sock *do_sk_redirect_map(void); + #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; extern int bpf_jit_harden; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 2e796e384aeb..7f774769e3f5 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -110,6 +110,7 @@ enum bpf_map_type { BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_DEVMAP, + BPF_MAP_TYPE_SOCKMAP, }; enum bpf_prog_type { @@ -135,11 +136,15 @@ enum bpf_attach_type { BPF_CGROUP_INET_EGRESS, BPF_CGROUP_INET_SOCK_CREATE, BPF_CGROUP_SOCK_OPS, + BPF_CGROUP_SMAP_INGRESS, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE +/* If BPF_SOCKMAP_STRPARSER is used sockmap will use strparser on receive */ +#define BPF_SOCKMAP_STRPARSER (1U << 0) + /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command * to the given target_fd cgroup the descendent cgroup will be able to * override effective bpf program that was inherited from this cgroup @@ -211,6 +216,7 @@ union bpf_attr { __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; __u32 attach_flags; + __u32 attach_bpf_fd2; }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ @@ -557,6 +563,23 @@ union bpf_attr { * @mode: operation mode (enum bpf_adj_room_mode) * @flags: reserved for future use * Return: 0 on success or negative error code + * + * int bpf_sk_redirect_map(map, key, flags) + * Redirect skb to a sock in map using key as a lookup key for the + * sock in map. + * @map: pointer to sockmap + * @key: key to lookup sock in map + * @flags: reserved for future use + * Return: SK_REDIRECT + * + * int bpf_sock_map_update(skops, map, key, flags, map_flags) + * @skops: pointer to bpf_sock_ops + * @map: pointer to sockmap to update + * @key: key to insert/update sock in map + * @flags: same flags as map update elem + * @map_flags: sock map specific flags + * bit 1: Enable strparser + * other bits: reserved */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -610,7 +633,9 @@ union bpf_attr { FN(set_hash), \ FN(setsockopt), \ FN(skb_adjust_room), \ - FN(redirect_map), + FN(redirect_map), \ + FN(sk_redirect_map), \ + FN(sock_map_update), \ /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -747,6 +772,12 @@ struct xdp_md { __u32 data_end; }; +enum sk_action { + SK_ABORTED = 0, + SK_DROP, + SK_REDIRECT, +}; + #define BPF_TAG_SIZE 8 struct bpf_prog_info { diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 2f0bcda40e90..aa24287db888 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -3,7 +3,7 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o ifeq ($(CONFIG_NET),y) -obj-$(CONFIG_BPF_SYSCALL) += devmap.o +obj-$(CONFIG_BPF_SYSCALL) += devmap.o sockmap.o endif ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c new file mode 100644 index 000000000000..792f0addfafa --- /dev/null +++ b/kernel/bpf/sockmap.c @@ -0,0 +1,792 @@ +/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +/* A BPF sock_map is used to store sock objects. This is primarly used + * for doing socket redirect with BPF helper routines. + * + * A sock map may have two BPF programs attached to it, a program used + * to parse packets and a program to provide a verdict and redirect + * decision on the packet. If no BPF parse program is provided it is + * assumed that every skb is a "message" (skb->len). Otherwise the + * parse program is attached to strparser and used to build messages + * that may span multiple skbs. The verdict program will either select + * a socket to send/receive the skb on or provide the drop code indicating + * the skb should be dropped. More actions may be added later as needed. + * The default program will drop packets. + * + * For reference this program is similar to devmap used in XDP context + * reviewing these together may be useful. For an example please review + * ./samples/bpf/sockmap/. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct bpf_stab { + struct bpf_map map; + struct sock **sock_map; + struct bpf_prog *bpf_parse; + struct bpf_prog *bpf_verdict; + refcount_t refcnt; +}; + +enum smap_psock_state { + SMAP_TX_RUNNING, +}; + +struct smap_psock { + struct rcu_head rcu; + + /* datapath variables */ + struct sk_buff_head rxqueue; + bool strp_enabled; + + /* datapath error path cache across tx work invocations */ + int save_rem; + int save_off; + struct sk_buff *save_skb; + + struct strparser strp; + struct bpf_prog *bpf_parse; + struct bpf_prog *bpf_verdict; + struct bpf_stab *stab; + + /* Back reference used when sock callback trigger sockmap operations */ + int key; + struct sock *sock; + unsigned long state; + + struct work_struct tx_work; + struct work_struct gc_work; + + void (*save_data_ready)(struct sock *sk); + void (*save_write_space)(struct sock *sk); + void (*save_state_change)(struct sock *sk); +}; + +static inline struct smap_psock *smap_psock_sk(const struct sock *sk) +{ + return (struct smap_psock *)rcu_dereference_sk_user_data(sk); +} + +static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) +{ + struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); + int rc; + + if (unlikely(!prog)) + return SK_DROP; + + skb_orphan(skb); + skb->sk = psock->sock; + bpf_compute_data_end(skb); + rc = (*prog->bpf_func)(skb, prog->insnsi); + skb->sk = NULL; + + return rc; +} + +static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) +{ + struct sock *sock; + int rc; + + /* Because we use per cpu values to feed input from sock redirect + * in BPF program to do_sk_redirect_map() call we need to ensure we + * are not preempted. RCU read lock is not sufficient in this case + * with CONFIG_PREEMPT_RCU enabled so we must be explicit here. + */ + preempt_disable(); + rc = smap_verdict_func(psock, skb); + switch (rc) { + case SK_REDIRECT: + sock = do_sk_redirect_map(); + preempt_enable(); + if (likely(sock)) { + struct smap_psock *peer = smap_psock_sk(sock); + + if (likely(peer && + test_bit(SMAP_TX_RUNNING, &peer->state) && + sk_stream_memory_free(peer->sock))) { + peer->sock->sk_wmem_queued += skb->truesize; + sk_mem_charge(peer->sock, skb->truesize); + skb_queue_tail(&peer->rxqueue, skb); + schedule_work(&peer->tx_work); + break; + } + } + /* Fall through and free skb otherwise */ + case SK_DROP: + default: + preempt_enable(); + kfree_skb(skb); + } +} + +static void smap_report_sk_error(struct smap_psock *psock, int err) +{ + struct sock *sk = psock->sock; + + sk->sk_err = err; + sk->sk_error_report(sk); +} + +static void smap_release_sock(struct sock *sock); + +/* Called with lock_sock(sk) held */ +static void smap_state_change(struct sock *sk) +{ + struct smap_psock *psock; + struct sock *osk; + + rcu_read_lock(); + + /* Allowing transitions into an established syn_recv states allows + * for early binding sockets to a smap object before the connection + * is established. + */ + switch (sk->sk_state) { + case TCP_SYN_RECV: + case TCP_ESTABLISHED: + break; + case TCP_CLOSE_WAIT: + case TCP_CLOSING: + case TCP_LAST_ACK: + case TCP_FIN_WAIT1: + case TCP_FIN_WAIT2: + case TCP_LISTEN: + break; + case TCP_CLOSE: + /* Only release if the map entry is in fact the sock in + * question. There is a case where the operator deletes + * the sock from the map, but the TCP sock is closed before + * the psock is detached. Use cmpxchg to verify correct + * sock is removed. + */ + psock = smap_psock_sk(sk); + if (unlikely(!psock)) + break; + osk = cmpxchg(&psock->stab->sock_map[psock->key], sk, NULL); + if (osk == sk) + smap_release_sock(sk); + break; + default: + smap_report_sk_error(psock, EPIPE); + break; + } + rcu_read_unlock(); +} + +static void smap_read_sock_strparser(struct strparser *strp, + struct sk_buff *skb) +{ + struct smap_psock *psock; + + rcu_read_lock(); + psock = container_of(strp, struct smap_psock, strp); + smap_do_verdict(psock, skb); + rcu_read_unlock(); +} + +/* Called with lock held on socket */ +static void smap_data_ready(struct sock *sk) +{ + struct smap_psock *psock; + + write_lock_bh(&sk->sk_callback_lock); + psock = smap_psock_sk(sk); + if (likely(psock)) + strp_data_ready(&psock->strp); + write_unlock_bh(&sk->sk_callback_lock); +} + +static void smap_tx_work(struct work_struct *w) +{ + struct smap_psock *psock; + struct sk_buff *skb; + int rem, off, n; + + psock = container_of(w, struct smap_psock, tx_work); + + /* lock sock to avoid losing sk_socket at some point during loop */ + lock_sock(psock->sock); + if (psock->save_skb) { + skb = psock->save_skb; + rem = psock->save_rem; + off = psock->save_off; + psock->save_skb = NULL; + goto start; + } + + while ((skb = skb_dequeue(&psock->rxqueue))) { + rem = skb->len; + off = 0; +start: + do { + if (likely(psock->sock->sk_socket)) + n = skb_send_sock_locked(psock->sock, + skb, off, rem); + else + n = -EINVAL; + if (n <= 0) { + if (n == -EAGAIN) { + /* Retry when space is available */ + psock->save_skb = skb; + psock->save_rem = rem; + psock->save_off = off; + goto out; + } + /* Hard errors break pipe and stop xmit */ + smap_report_sk_error(psock, n ? -n : EPIPE); + clear_bit(SMAP_TX_RUNNING, &psock->state); + sk_mem_uncharge(psock->sock, skb->truesize); + psock->sock->sk_wmem_queued -= skb->truesize; + kfree_skb(skb); + goto out; + } + rem -= n; + off += n; + } while (rem); + sk_mem_uncharge(psock->sock, skb->truesize); + psock->sock->sk_wmem_queued -= skb->truesize; + kfree_skb(skb); + } +out: + release_sock(psock->sock); +} + +static void smap_write_space(struct sock *sk) +{ + struct smap_psock *psock; + + rcu_read_lock(); + psock = smap_psock_sk(sk); + if (likely(psock && test_bit(SMAP_TX_RUNNING, &psock->state))) + schedule_work(&psock->tx_work); + rcu_read_unlock(); +} + +static void smap_stop_sock(struct smap_psock *psock, struct sock *sk) +{ + write_lock_bh(&sk->sk_callback_lock); + if (!psock->strp_enabled) + goto out; + sk->sk_data_ready = psock->save_data_ready; + sk->sk_write_space = psock->save_write_space; + sk->sk_state_change = psock->save_state_change; + psock->save_data_ready = NULL; + psock->save_write_space = NULL; + psock->save_state_change = NULL; + strp_stop(&psock->strp); + psock->strp_enabled = false; +out: + write_unlock_bh(&sk->sk_callback_lock); +} + +static void smap_destroy_psock(struct rcu_head *rcu) +{ + struct smap_psock *psock = container_of(rcu, + struct smap_psock, rcu); + + /* Now that a grace period has passed there is no longer + * any reference to this sock in the sockmap so we can + * destroy the psock, strparser, and bpf programs. But, + * because we use workqueue sync operations we can not + * do it in rcu context + */ + schedule_work(&psock->gc_work); +} + +static void smap_release_sock(struct sock *sock) +{ + struct smap_psock *psock = smap_psock_sk(sock); + + smap_stop_sock(psock, sock); + clear_bit(SMAP_TX_RUNNING, &psock->state); + rcu_assign_sk_user_data(sock, NULL); + call_rcu_sched(&psock->rcu, smap_destroy_psock); +} + +static int smap_parse_func_strparser(struct strparser *strp, + struct sk_buff *skb) +{ + struct smap_psock *psock; + struct bpf_prog *prog; + int rc; + + rcu_read_lock(); + psock = container_of(strp, struct smap_psock, strp); + prog = READ_ONCE(psock->bpf_parse); + + if (unlikely(!prog)) { + rcu_read_unlock(); + return skb->len; + } + + /* Attach socket for bpf program to use if needed we can do this + * because strparser clones the skb before handing it to a upper + * layer, meaning skb_orphan has been called. We NULL sk on the + * way out to ensure we don't trigger a BUG_ON in skb/sk operations + * later and because we are not charging the memory of this skb to + * any socket yet. + */ + skb->sk = psock->sock; + bpf_compute_data_end(skb); + rc = (*prog->bpf_func)(skb, prog->insnsi); + skb->sk = NULL; + rcu_read_unlock(); + return rc; +} + + +static int smap_read_sock_done(struct strparser *strp, int err) +{ + return err; +} + +static int smap_init_sock(struct smap_psock *psock, + struct sock *sk) +{ + struct strp_callbacks cb; + + memset(&cb, 0, sizeof(cb)); + cb.rcv_msg = smap_read_sock_strparser; + cb.parse_msg = smap_parse_func_strparser; + cb.read_sock_done = smap_read_sock_done; + return strp_init(&psock->strp, sk, &cb); +} + +static void smap_init_progs(struct smap_psock *psock, + struct bpf_stab *stab, + struct bpf_prog *verdict, + struct bpf_prog *parse) +{ + struct bpf_prog *orig_parse, *orig_verdict; + + orig_parse = xchg(&psock->bpf_parse, parse); + orig_verdict = xchg(&psock->bpf_verdict, verdict); + + if (orig_verdict) + bpf_prog_put(orig_verdict); + if (orig_parse) + bpf_prog_put(orig_parse); +} + +static void smap_start_sock(struct smap_psock *psock, struct sock *sk) +{ + if (sk->sk_data_ready == smap_data_ready) + return; + psock->save_data_ready = sk->sk_data_ready; + psock->save_write_space = sk->sk_write_space; + psock->save_state_change = sk->sk_state_change; + sk->sk_data_ready = smap_data_ready; + sk->sk_write_space = smap_write_space; + sk->sk_state_change = smap_state_change; + psock->strp_enabled = true; +} + +static void sock_map_remove_complete(struct bpf_stab *stab) +{ + bpf_map_area_free(stab->sock_map); + kfree(stab); +} + +static void smap_gc_work(struct work_struct *w) +{ + struct smap_psock *psock; + + psock = container_of(w, struct smap_psock, gc_work); + + /* no callback lock needed because we already detached sockmap ops */ + if (psock->strp_enabled) + strp_done(&psock->strp); + + cancel_work_sync(&psock->tx_work); + __skb_queue_purge(&psock->rxqueue); + + /* At this point all strparser and xmit work must be complete */ + if (psock->bpf_parse) + bpf_prog_put(psock->bpf_parse); + if (psock->bpf_verdict) + bpf_prog_put(psock->bpf_verdict); + + if (refcount_dec_and_test(&psock->stab->refcnt)) + sock_map_remove_complete(psock->stab); + + sock_put(psock->sock); + kfree(psock); +} + +static struct smap_psock *smap_init_psock(struct sock *sock, + struct bpf_stab *stab) +{ + struct smap_psock *psock; + + psock = kzalloc(sizeof(struct smap_psock), GFP_ATOMIC | __GFP_NOWARN); + if (!psock) + return ERR_PTR(-ENOMEM); + + psock->sock = sock; + skb_queue_head_init(&psock->rxqueue); + INIT_WORK(&psock->tx_work, smap_tx_work); + INIT_WORK(&psock->gc_work, smap_gc_work); + + rcu_assign_sk_user_data(sock, psock); + sock_hold(sock); + return psock; +} + +static struct bpf_map *sock_map_alloc(union bpf_attr *attr) +{ + struct bpf_stab *stab; + int err = -EINVAL; + u64 cost; + + /* check sanity of attributes */ + if (attr->max_entries == 0 || attr->key_size != 4 || + attr->value_size != 4 || attr->map_flags) + return ERR_PTR(-EINVAL); + + if (attr->value_size > KMALLOC_MAX_SIZE) + return ERR_PTR(-E2BIG); + + stab = kzalloc(sizeof(*stab), GFP_USER); + if (!stab) + return ERR_PTR(-ENOMEM); + + /* mandatory map attributes */ + stab->map.map_type = attr->map_type; + stab->map.key_size = attr->key_size; + stab->map.value_size = attr->value_size; + stab->map.max_entries = attr->max_entries; + stab->map.map_flags = attr->map_flags; + + /* make sure page count doesn't overflow */ + cost = (u64) stab->map.max_entries * sizeof(struct sock *); + if (cost >= U32_MAX - PAGE_SIZE) + goto free_stab; + + stab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + + /* if map size is larger than memlock limit, reject it early */ + err = bpf_map_precharge_memlock(stab->map.pages); + if (err) + goto free_stab; + + stab->sock_map = bpf_map_area_alloc(stab->map.max_entries * + sizeof(struct sock *)); + if (!stab->sock_map) + goto free_stab; + + refcount_set(&stab->refcnt, 1); + return &stab->map; +free_stab: + kfree(stab); + return ERR_PTR(err); +} + +static void sock_map_free(struct bpf_map *map) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + int i; + + synchronize_rcu(); + + /* At this point no update, lookup or delete operations can happen. + * However, be aware we can still get a socket state event updates, + * and data ready callabacks that reference the psock from sk_user_data + * Also psock worker threads are still in-flight. So smap_release_sock + * will only free the psock after cancel_sync on the worker threads + * and a grace period expire to ensure psock is really safe to remove. + */ + rcu_read_lock(); + for (i = 0; i < stab->map.max_entries; i++) { + struct sock *sock; + + sock = xchg(&stab->sock_map[i], NULL); + if (!sock) + continue; + + smap_release_sock(sock); + } + rcu_read_unlock(); + + if (stab->bpf_verdict) + bpf_prog_put(stab->bpf_verdict); + if (stab->bpf_parse) + bpf_prog_put(stab->bpf_parse); + + if (refcount_dec_and_test(&stab->refcnt)) + sock_map_remove_complete(stab); +} + +static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next_key) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + u32 i = key ? *(u32 *)key : U32_MAX; + u32 *next = (u32 *)next_key; + + if (i >= stab->map.max_entries) { + *next = 0; + return 0; + } + + if (i == stab->map.max_entries - 1) + return -ENOENT; + + *next = i + 1; + return 0; +} + +struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + + if (key >= map->max_entries) + return NULL; + + return READ_ONCE(stab->sock_map[key]); +} + +static int sock_map_delete_elem(struct bpf_map *map, void *key) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + int k = *(u32 *)key; + struct sock *sock; + + if (k >= map->max_entries) + return -EINVAL; + + sock = xchg(&stab->sock_map[k], NULL); + if (!sock) + return -EINVAL; + + smap_release_sock(sock); + return 0; +} + +/* Locking notes: Concurrent updates, deletes, and lookups are allowed and are + * done inside rcu critical sections. This ensures on updates that the psock + * will not be released via smap_release_sock() until concurrent updates/deletes + * complete. All operations operate on sock_map using cmpxchg and xchg + * operations to ensure we do not get stale references. Any reads into the + * map must be done with READ_ONCE() because of this. + * + * A psock is destroyed via call_rcu and after any worker threads are cancelled + * and syncd so we are certain all references from the update/lookup/delete + * operations as well as references in the data path are no longer in use. + * + * A psock object holds a refcnt on the sockmap it is attached to and this is + * not decremented until after a RCU grace period and garbage collection occurs. + * This ensures the map is not free'd until psocks linked to it are removed. The + * map link is used when the independent sock events trigger map deletion. + * + * Psocks may only participate in one sockmap at a time. Users that try to + * join a single sock to multiple maps will get an error. + * + * Last, but not least, it is possible the socket is closed while running + * an update on an existing psock. This will release the psock, but again + * not until the update has completed due to rcu grace period rules. + */ +static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops, + struct bpf_map *map, + void *key, u64 flags, u64 map_flags) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + struct bpf_prog *verdict, *parse; + struct smap_psock *psock = NULL; + struct sock *old_sock, *sock; + u32 i = *(u32 *)key; + bool update = false; + int err = 0; + + if (unlikely(flags > BPF_EXIST)) + return -EINVAL; + + if (unlikely(i >= stab->map.max_entries)) + return -E2BIG; + + if (unlikely(map_flags > BPF_SOCKMAP_STRPARSER)) + return -EINVAL; + + verdict = parse = NULL; + sock = READ_ONCE(stab->sock_map[i]); + + if (flags == BPF_EXIST || flags == BPF_ANY) { + if (!sock && flags == BPF_EXIST) { + return -ENOENT; + } else if (sock && sock != skops->sk) { + return -EINVAL; + } else if (sock) { + psock = smap_psock_sk(sock); + if (unlikely(!psock)) + return -EBUSY; + update = true; + } + } else if (sock && BPF_NOEXIST) { + return -EEXIST; + } + + /* reserve BPF programs early so can abort easily on failures */ + if (map_flags & BPF_SOCKMAP_STRPARSER) { + verdict = READ_ONCE(stab->bpf_verdict); + parse = READ_ONCE(stab->bpf_parse); + + if (!verdict || !parse) + return -ENOENT; + + /* bpf prog refcnt may be zero if a concurrent attach operation + * removes the program after the above READ_ONCE() but before + * we increment the refcnt. If this is the case abort with an + * error. + */ + verdict = bpf_prog_inc_not_zero(stab->bpf_verdict); + if (IS_ERR(verdict)) + return PTR_ERR(verdict); + + parse = bpf_prog_inc_not_zero(stab->bpf_parse); + if (IS_ERR(parse)) { + bpf_prog_put(verdict); + return PTR_ERR(parse); + } + } + + if (!psock) { + sock = skops->sk; + if (rcu_dereference_sk_user_data(sock)) + return -EEXIST; + psock = smap_init_psock(sock, stab); + if (IS_ERR(psock)) { + if (verdict) + bpf_prog_put(verdict); + if (parse) + bpf_prog_put(parse); + return PTR_ERR(psock); + } + psock->key = i; + psock->stab = stab; + refcount_inc(&stab->refcnt); + set_bit(SMAP_TX_RUNNING, &psock->state); + } + + if (map_flags & BPF_SOCKMAP_STRPARSER) { + write_lock_bh(&sock->sk_callback_lock); + if (psock->strp_enabled) + goto start_done; + err = smap_init_sock(psock, sock); + if (err) + goto out; + smap_init_progs(psock, stab, verdict, parse); + smap_start_sock(psock, sock); +start_done: + write_unlock_bh(&sock->sk_callback_lock); + } else if (update) { + smap_stop_sock(psock, sock); + } + + if (!update) { + old_sock = xchg(&stab->sock_map[i], skops->sk); + if (old_sock) + smap_release_sock(old_sock); + } + + return 0; +out: + write_unlock_bh(&sock->sk_callback_lock); + if (!update) + smap_release_sock(sock); + return err; +} + +static int sock_map_attach_prog(struct bpf_map *map, + struct bpf_prog *parse, + struct bpf_prog *verdict) +{ + struct bpf_stab *stab = container_of(map, struct bpf_stab, map); + struct bpf_prog *_parse, *_verdict; + + _parse = xchg(&stab->bpf_parse, parse); + _verdict = xchg(&stab->bpf_verdict, verdict); + + if (_parse) + bpf_prog_put(_parse); + if (_verdict) + bpf_prog_put(_verdict); + + return 0; +} + +static void *sock_map_lookup(struct bpf_map *map, void *key) +{ + return NULL; +} + +static int sock_map_update_elem(struct bpf_map *map, + void *key, void *value, u64 flags) +{ + struct bpf_sock_ops_kern skops; + u32 fd = *(u32 *)value; + struct socket *socket; + int err; + + socket = sockfd_lookup(fd, &err); + if (!socket) + return err; + + skops.sk = socket->sk; + if (!skops.sk) { + fput(socket->file); + return -EINVAL; + } + + err = sock_map_ctx_update_elem(&skops, map, key, + flags, BPF_SOCKMAP_STRPARSER); + fput(socket->file); + return err; +} + +const struct bpf_map_ops sock_map_ops = { + .map_alloc = sock_map_alloc, + .map_free = sock_map_free, + .map_lookup_elem = sock_map_lookup, + .map_get_next_key = sock_map_get_next_key, + .map_update_elem = sock_map_update_elem, + .map_delete_elem = sock_map_delete_elem, + .map_attach = sock_map_attach_prog, +}; + +BPF_CALL_5(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, + struct bpf_map *, map, void *, key, u64, flags, u64, map_flags) +{ + WARN_ON_ONCE(!rcu_read_lock_held()); + return sock_map_ctx_update_elem(bpf_sock, map, key, flags, map_flags); +} + +const struct bpf_func_proto bpf_sock_map_update_proto = { + .func = bpf_sock_map_update, + .gpl_only = false, + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_PTR_TO_MAP_KEY, + .arg4_type = ARG_ANYTHING, + .arg5_type = ARG_ANYTHING, +}; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 17e29f596de1..d2f2bdf71ffa 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1087,7 +1087,50 @@ static int bpf_obj_get(const union bpf_attr *attr) #ifdef CONFIG_CGROUP_BPF -#define BPF_PROG_ATTACH_LAST_FIELD attach_flags +#define BPF_PROG_ATTACH_LAST_FIELD attach_bpf_fd2 + +static int sockmap_get_from_fd(const union bpf_attr *attr, int ptype) +{ + struct bpf_prog *prog1, *prog2; + int ufd = attr->target_fd; + struct bpf_map *map; + struct fd f; + int err; + + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); + + if (!map->ops->map_attach) { + fdput(f); + return -EOPNOTSUPP; + } + + prog1 = bpf_prog_get_type(attr->attach_bpf_fd, ptype); + if (IS_ERR(prog1)) { + fdput(f); + return PTR_ERR(prog1); + } + + prog2 = bpf_prog_get_type(attr->attach_bpf_fd2, ptype); + if (IS_ERR(prog2)) { + fdput(f); + bpf_prog_put(prog1); + return PTR_ERR(prog2); + } + + err = map->ops->map_attach(map, prog1, prog2); + if (err) { + fdput(f); + bpf_prog_put(prog1); + bpf_prog_put(prog2); + return PTR_ERR(map); + } + + fdput(f); + return err; +} static int bpf_prog_attach(const union bpf_attr *attr) { @@ -1116,10 +1159,16 @@ static int bpf_prog_attach(const union bpf_attr *attr) case BPF_CGROUP_SOCK_OPS: ptype = BPF_PROG_TYPE_SOCK_OPS; break; + case BPF_CGROUP_SMAP_INGRESS: + ptype = BPF_PROG_TYPE_SK_SKB; + break; default: return -EINVAL; } + if (attr->attach_type == BPF_CGROUP_SMAP_INGRESS) + return sockmap_get_from_fd(attr, ptype); + prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); if (IS_ERR(prog)) return PTR_ERR(prog); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7dd96d064be1..a71bc0996572 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1522,6 +1522,12 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; + case BPF_MAP_TYPE_SOCKMAP: + if (func_id != BPF_FUNC_sk_redirect_map && + func_id != BPF_FUNC_sock_map_update && + func_id != BPF_FUNC_map_delete_elem) + goto error; + break; default: break; } @@ -1550,6 +1556,14 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) if (map->map_type != BPF_MAP_TYPE_DEVMAP) goto error; break; + case BPF_FUNC_sk_redirect_map: + if (map->map_type != BPF_MAP_TYPE_SOCKMAP) + goto error; + break; + case BPF_FUNC_sock_map_update: + if (map->map_type != BPF_MAP_TYPE_SOCKMAP) + goto error; + break; default: break; } diff --git a/net/core/filter.c b/net/core/filter.c index 46321033ae0e..8e136578488c 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1858,6 +1858,45 @@ static const struct bpf_func_proto bpf_redirect_map_proto = { .arg3_type = ARG_ANYTHING, }; +BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + + if (unlikely(flags)) + return SK_ABORTED; + + ri->ifindex = key; + ri->flags = flags; + ri->map = map; + + return SK_REDIRECT; +} + +struct sock *do_sk_redirect_map(void) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + struct sock *sk = NULL; + + if (ri->map) { + sk = __sock_map_lookup_elem(ri->map, ri->ifindex); + + ri->ifindex = 0; + ri->map = NULL; + /* we do not clear flags for future lookup */ + } + + return sk; +} + +static const struct bpf_func_proto bpf_sk_redirect_map_proto = { + .func = bpf_sk_redirect_map, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) { return task_get_classid(skb); @@ -3229,6 +3268,8 @@ static const struct bpf_func_proto * switch (func_id) { case BPF_FUNC_setsockopt: return &bpf_setsockopt_proto; + case BPF_FUNC_sock_map_update: + return &bpf_sock_map_update_proto; default: return bpf_base_func_proto(func_id); } @@ -3243,6 +3284,8 @@ static const struct bpf_func_proto *sk_skb_func_proto(enum bpf_func_id func_id) return &bpf_get_socket_cookie_proto; case BPF_FUNC_get_socket_uid: return &bpf_get_socket_uid_proto; + case BPF_FUNC_sk_redirect_map: + return &bpf_sk_redirect_map_proto; default: return bpf_base_func_proto(func_id); } -- cgit v1.2.3-55-g7522 From 8a31db5615667956c513d205cfb06885c3ec6d0b Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:33:09 -0700 Subject: bpf: add access to sock fields and pkt data from sk_skb programs Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- include/uapi/linux/bpf.h | 9 +++ kernel/bpf/verifier.c | 1 + net/core/filter.c | 169 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 7f774769e3f5..5ecbe812a2cc 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -712,6 +712,15 @@ struct __sk_buff { __u32 data; __u32 data_end; __u32 napi_id; + + /* accessed by BPF_PROG_TYPE_sk_skb types */ + __u32 family; + __u32 remote_ip4; /* Stored in network byte order */ + __u32 local_ip4; /* Stored in network byte order */ + __u32 remote_ip6[4]; /* Stored in network byte order */ + __u32 local_ip6[4]; /* Stored in network byte order */ + __u32 remote_port; /* Stored in network byte order */ + __u32 local_port; /* stored in host byte order */ }; struct bpf_tunnel_key { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index a71bc0996572..958ba84a9995 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -886,6 +886,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: + case BPF_PROG_TYPE_SK_SKB: if (meta) return meta->pkt_access; diff --git a/net/core/filter.c b/net/core/filter.c index 8e136578488c..e9f8dcef6c57 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3278,8 +3278,16 @@ static const struct bpf_func_proto * static const struct bpf_func_proto *sk_skb_func_proto(enum bpf_func_id func_id) { switch (func_id) { + case BPF_FUNC_skb_store_bytes: + return &bpf_skb_store_bytes_proto; case BPF_FUNC_skb_load_bytes: return &bpf_skb_load_bytes_proto; + case BPF_FUNC_skb_pull_data: + return &bpf_skb_pull_data_proto; + case BPF_FUNC_skb_change_tail: + return &bpf_skb_change_tail_proto; + case BPF_FUNC_skb_change_head: + return &bpf_skb_change_head_proto; case BPF_FUNC_get_socket_cookie: return &bpf_get_socket_cookie_proto; case BPF_FUNC_get_socket_uid: @@ -3343,6 +3351,10 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type if (off + size > offsetofend(struct __sk_buff, cb[4])) return false; break; + case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): + case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): + case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): + case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) @@ -3371,6 +3383,7 @@ static bool sk_filter_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_end): + case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -3392,6 +3405,7 @@ static bool lwt_is_valid_access(int off, int size, { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): + case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } @@ -3505,6 +3519,8 @@ static bool tc_cls_act_is_valid_access(int off, int size, case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; + case bpf_ctx_range_till(struct __sk_buff, family, local_port): + return false; } return bpf_skb_is_valid_access(off, size, type, info); @@ -3582,11 +3598,63 @@ static bool sock_ops_is_valid_access(int off, int size, return __is_valid_sock_ops_access(off, size); } +static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + struct bpf_insn *insn = insn_buf; + + if (!direct_write) + return 0; + + /* if (!skb->cloned) + * goto start; + * + * (Fast-path, otherwise approximation that we might be + * a clone, do the rest in helper.) + */ + *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); + *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); + *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); + + /* ret = bpf_skb_pull_data(skb, 0); */ + *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); + *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); + *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_skb_pull_data); + /* if (!ret) + * goto restore; + * return SK_DROP; + */ + *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); + *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, SK_DROP); + *insn++ = BPF_EXIT_INSN(); + + /* restore: */ + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); + /* start: */ + *insn++ = prog->insnsi[0]; + + return insn - insn_buf; +} + static bool sk_skb_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { + if (type == BPF_WRITE) { + switch (off) { + case bpf_ctx_range(struct __sk_buff, mark): + case bpf_ctx_range(struct __sk_buff, tc_index): + case bpf_ctx_range(struct __sk_buff, priority): + break; + default: + return false; + } + } + switch (off) { + case bpf_ctx_range(struct __sk_buff, tc_classid): + return false; case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; @@ -3783,6 +3851,106 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #endif break; + case offsetof(struct __sk_buff, family): + BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); + + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, sk)); + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, + bpf_target_off(struct sock_common, + skc_family, + 2, target_size)); + break; + case offsetof(struct __sk_buff, remote_ip4): + BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); + + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, sk)); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, + bpf_target_off(struct sock_common, + skc_daddr, + 4, target_size)); + break; + case offsetof(struct __sk_buff, local_ip4): + BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + skc_rcv_saddr) != 4); + + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, sk)); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, + bpf_target_off(struct sock_common, + skc_rcv_saddr, + 4, target_size)); + break; + case offsetof(struct __sk_buff, remote_ip6[0]) ... + offsetof(struct __sk_buff, remote_ip6[3]): +#if IS_ENABLED(CONFIG_IPV6) + BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + skc_v6_daddr.s6_addr32[0]) != 4); + + off = si->off; + off -= offsetof(struct __sk_buff, remote_ip6[0]); + + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, sk)); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, + offsetof(struct sock_common, + skc_v6_daddr.s6_addr32[0]) + + off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + case offsetof(struct __sk_buff, local_ip6[0]) ... + offsetof(struct __sk_buff, local_ip6[3]): +#if IS_ENABLED(CONFIG_IPV6) + BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, + skc_v6_rcv_saddr.s6_addr32[0]) != 4); + + off = si->off; + off -= offsetof(struct __sk_buff, local_ip6[0]); + + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, sk)); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, + offsetof(struct sock_common, + skc_v6_rcv_saddr.s6_addr32[0]) + + off); +#else + *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); +#endif + break; + + case offsetof(struct __sk_buff, remote_port): + BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); + + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, sk)); + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, + bpf_target_off(struct sock_common, + skc_dport, + 2, target_size)); +#ifndef __BIG_ENDIAN_BITFIELD + *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); +#endif + break; + + case offsetof(struct __sk_buff, local_port): + BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); + + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), + si->dst_reg, si->src_reg, + offsetof(struct sk_buff, sk)); + *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, + bpf_target_off(struct sock_common, + skc_num, 2, target_size)); + break; } return insn - insn_buf; @@ -4071,6 +4239,7 @@ const struct bpf_verifier_ops sk_skb_prog_ops = { .get_func_proto = sk_skb_func_proto, .is_valid_access = sk_skb_is_valid_access, .convert_ctx_access = bpf_convert_ctx_access, + .gen_prologue = sk_skb_prologue, }; int sk_detach_filter(struct sock *sk) -- cgit v1.2.3-55-g7522 From 69e8cc134bcbf0ccfcf852c400b8e6788d1d0038 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:33:32 -0700 Subject: bpf: sockmap sample program This program binds a program to a cgroup and then matches hard coded IP addresses and adds these to a sockmap. This will receive messages from the backend and send them to the client. client:X <---> frontend:10000 client:X <---> backend:10001 To keep things simple this is only designed for 1:1 connections using hard coded values. A more complete example would allow many backends and clients. To run, # sockmap Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- samples/bpf/bpf_load.c | 8 +- samples/sockmap/Makefile | 78 ++++++++ samples/sockmap/sockmap_kern.c | 110 ++++++++++++ samples/sockmap/sockmap_user.c | 286 ++++++++++++++++++++++++++++++ tools/include/uapi/linux/bpf.h | 46 ++++- tools/lib/bpf/bpf.c | 14 +- tools/lib/bpf/bpf.h | 4 + tools/testing/selftests/bpf/bpf_helpers.h | 7 + 8 files changed, 547 insertions(+), 6 deletions(-) create mode 100644 samples/sockmap/Makefile create mode 100644 samples/sockmap/sockmap_kern.c create mode 100644 samples/sockmap/sockmap_user.c diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 899f40310bc3..a8552b8a2ab6 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -65,6 +65,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0; bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0; bool is_sockops = strncmp(event, "sockops", 7) == 0; + bool is_sk_skb = strncmp(event, "sk_skb", 6) == 0; size_t insns_cnt = size / sizeof(struct bpf_insn); enum bpf_prog_type prog_type; char buf[256]; @@ -92,6 +93,8 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) prog_type = BPF_PROG_TYPE_CGROUP_SOCK; } else if (is_sockops) { prog_type = BPF_PROG_TYPE_SOCK_OPS; + } else if (is_sk_skb) { + prog_type = BPF_PROG_TYPE_SK_SKB; } else { printf("Unknown event '%s'\n", event); return -1; @@ -109,7 +112,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk) return 0; - if (is_socket || is_sockops) { + if (is_socket || is_sockops || is_sk_skb) { if (is_socket) event += 6; else @@ -567,7 +570,8 @@ static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map) memcmp(shname, "perf_event", 10) == 0 || memcmp(shname, "socket", 6) == 0 || memcmp(shname, "cgroup/", 7) == 0 || - memcmp(shname, "sockops", 7) == 0) { + memcmp(shname, "sockops", 7) == 0 || + memcmp(shname, "sk_skb", 6) == 0) { ret = load_and_attach(shname, data->d_buf, data->d_size); if (ret != 0) diff --git a/samples/sockmap/Makefile b/samples/sockmap/Makefile new file mode 100644 index 000000000000..9291ab8e0f8c --- /dev/null +++ b/samples/sockmap/Makefile @@ -0,0 +1,78 @@ +# kbuild trick to avoid linker error. Can be omitted if a module is built. +obj- := dummy.o + +# List of programs to build +hostprogs-y := sockmap + +# Libbpf dependencies +LIBBPF := ../../tools/lib/bpf/bpf.o + +HOSTCFLAGS += -I$(objtree)/usr/include +HOSTCFLAGS += -I$(srctree)/tools/lib/ +HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ +HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include +HOSTCFLAGS += -I$(srctree)/tools/perf + +sockmap-objs := ../bpf/bpf_load.o $(LIBBPF) sockmap_user.o + +# Tell kbuild to always build the programs +always := $(hostprogs-y) +always += sockmap_kern.o + +HOSTLOADLIBES_sockmap += -lelf -lpthread + +# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: +# make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang +LLC ?= llc +CLANG ?= clang + +# Trick to allow make to be run from this directory +all: + $(MAKE) -C ../../ $(CURDIR)/ + +clean: + $(MAKE) -C ../../ M=$(CURDIR) clean + @rm -f *~ + +$(obj)/syscall_nrs.s: $(src)/syscall_nrs.c + $(call if_changed_dep,cc_s_c) + +$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE + $(call filechk,offsets,__SYSCALL_NRS_H__) + +clean-files += syscall_nrs.h + +FORCE: + + +# Verify LLVM compiler tools are available and bpf target is supported by llc +.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC) + +verify_cmds: $(CLANG) $(LLC) + @for TOOL in $^ ; do \ + if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \ + echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\ + exit 1; \ + else true; fi; \ + done + +verify_target_bpf: verify_cmds + @if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \ + echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\ + echo " NOTICE: LLVM version >= 3.7.1 required" ;\ + exit 2; \ + else true; fi + +$(src)/*.c: verify_target_bpf + +# asm/sysreg.h - inline assembly used by it is incompatible with llvm. +# But, there is no easy way to fix it, so just exclude it since it is +# useless for BPF samples. +$(obj)/%.o: $(src)/%.c + $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ + -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ + -Wno-compare-distinct-pointer-types \ + -Wno-gnu-variable-sized-type-not-at-end \ + -Wno-address-of-packed-member -Wno-tautological-compare \ + -Wno-unknown-warning-option \ + -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ diff --git a/samples/sockmap/sockmap_kern.c b/samples/sockmap/sockmap_kern.c new file mode 100644 index 000000000000..6ff986f7059b --- /dev/null +++ b/samples/sockmap/sockmap_kern.c @@ -0,0 +1,110 @@ +/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include "../../tools/testing/selftests/bpf/bpf_helpers.h" +#include "../../tools/testing/selftests/bpf/bpf_endian.h" + +/* Sockmap sample program connects a client and a backend together + * using cgroups. + * + * client:X <---> frontend:80 client:X <---> backend:80 + * + * For simplicity we hard code values here and bind 1:1. The hard + * coded values are part of the setup in sockmap.sh script that + * is associated with this BPF program. + * + * The bpf_printk is verbose and prints information as connections + * are established and verdicts are decided. + */ + +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +struct bpf_map_def SEC("maps") sock_map = { + .type = BPF_MAP_TYPE_SOCKMAP, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 20, +}; + +SEC("sk_skb1") +int bpf_prog1(struct __sk_buff *skb) +{ + return skb->len; +} + +SEC("sk_skb2") +int bpf_prog2(struct __sk_buff *skb) +{ + __u32 lport = skb->local_port; + __u32 rport = skb->remote_port; + int ret = 0; + + if (lport == 10000) + ret = 10; + else + ret = 1; + + bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret); + return bpf_sk_redirect_map(&sock_map, ret, 0); +} + +SEC("sockops") +int bpf_sockmap(struct bpf_sock_ops *skops) +{ + __u32 lport, rport; + int op, err = 0, index, key, ret; + + + op = (int) skops->op; + + switch (op) { + case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: + lport = skops->local_port; + rport = skops->remote_port; + + if (lport == 10000) { + ret = 1; + err = bpf_sock_map_update(skops, &sock_map, &ret, + BPF_NOEXIST, + BPF_SOCKMAP_STRPARSER); + bpf_printk("passive(%i -> %i) map ctx update err: %d\n", + lport, bpf_ntohl(rport), err); + } + break; + case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: + lport = skops->local_port; + rport = skops->remote_port; + + if (bpf_ntohl(rport) == 10001) { + ret = 10; + err = bpf_sock_map_update(skops, &sock_map, &ret, + BPF_NOEXIST, + BPF_SOCKMAP_STRPARSER); + bpf_printk("active(%i -> %i) map ctx update err: %d\n", + lport, bpf_ntohl(rport), err); + } + break; + default: + break; + } + + return 0; +} +char _license[] SEC("license") = "GPL"; diff --git a/samples/sockmap/sockmap_user.c b/samples/sockmap/sockmap_user.c new file mode 100644 index 000000000000..fb78f5abefb4 --- /dev/null +++ b/samples/sockmap/sockmap_user.c @@ -0,0 +1,286 @@ +/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "../bpf/bpf_load.h" +#include "../bpf/bpf_util.h" +#include "../bpf/libbpf.h" + +int running; +void running_handler(int a); + +/* randomly selected ports for testing on lo */ +#define S1_PORT 10000 +#define S2_PORT 10001 + +static int sockmap_test_sockets(int rate, int dot) +{ + int i, sc, err, max_fd, one = 1; + int s1, s2, c1, c2, p1, p2; + struct sockaddr_in addr; + struct timeval timeout; + char buf[1024] = {0}; + int *fds[4] = {&s1, &s2, &c1, &c2}; + fd_set w; + + s1 = s2 = p1 = p2 = c1 = c2 = 0; + + /* Init sockets */ + for (i = 0; i < 4; i++) { + *fds[i] = socket(AF_INET, SOCK_STREAM, 0); + if (*fds[i] < 0) { + perror("socket s1 failed()"); + err = *fds[i]; + goto out; + } + } + + /* Allow reuse */ + for (i = 0; i < 2; i++) { + err = setsockopt(*fds[i], SOL_SOCKET, SO_REUSEADDR, + (char *)&one, sizeof(one)); + if (err) { + perror("setsockopt failed()"); + goto out; + } + } + + /* Non-blocking sockets */ + for (i = 0; i < 4; i++) { + err = ioctl(*fds[i], FIONBIO, (char *)&one); + if (err < 0) { + perror("ioctl s1 failed()"); + goto out; + } + } + + /* Bind server sockets */ + memset(&addr, 0, sizeof(struct sockaddr_in)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = inet_addr("127.0.0.1"); + + addr.sin_port = htons(S1_PORT); + err = bind(s1, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0) { + perror("bind s1 failed()\n"); + goto out; + } + + addr.sin_port = htons(S2_PORT); + err = bind(s2, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0) { + perror("bind s2 failed()\n"); + goto out; + } + + /* Listen server sockets */ + addr.sin_port = htons(S1_PORT); + err = listen(s1, 32); + if (err < 0) { + perror("listen s1 failed()\n"); + goto out; + } + + addr.sin_port = htons(S2_PORT); + err = listen(s2, 32); + if (err < 0) { + perror("listen s1 failed()\n"); + goto out; + } + + /* Initiate Connect */ + addr.sin_port = htons(S1_PORT); + err = connect(c1, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0 && errno != EINPROGRESS) { + perror("connect c1 failed()\n"); + goto out; + } + + addr.sin_port = htons(S2_PORT); + err = connect(c2, (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0 && errno != EINPROGRESS) { + perror("connect c2 failed()\n"); + goto out; + } + + /* Accept Connecrtions */ + p1 = accept(s1, NULL, NULL); + if (p1 < 0) { + perror("accept s1 failed()\n"); + goto out; + } + + p2 = accept(s2, NULL, NULL); + if (p2 < 0) { + perror("accept s1 failed()\n"); + goto out; + } + + max_fd = p2; + timeout.tv_sec = 10; + timeout.tv_usec = 0; + + printf("connected sockets: c1 <-> p1, c2 <-> p2\n"); + printf("cgroups binding: c1(%i) <-> s1(%i) - - - c2(%i) <-> s2(%i)\n", + c1, s1, c2, s2); + + /* Ping/Pong data from client to server */ + sc = send(c1, buf, sizeof(buf), 0); + if (sc < 0) { + perror("send failed()\n"); + goto out; + } + + do { + int s, rc, i; + + /* FD sets */ + FD_ZERO(&w); + FD_SET(c1, &w); + FD_SET(c2, &w); + FD_SET(p1, &w); + FD_SET(p2, &w); + + s = select(max_fd + 1, &w, NULL, NULL, &timeout); + if (s == -1) { + perror("select()"); + break; + } else if (!s) { + fprintf(stderr, "unexpected timeout\n"); + break; + } + + for (i = 0; i <= max_fd && s > 0; ++i) { + if (!FD_ISSET(i, &w)) + continue; + + s--; + + rc = recv(i, buf, sizeof(buf), 0); + if (rc < 0) { + if (errno != EWOULDBLOCK) { + perror("recv failed()\n"); + break; + } + } + + if (rc == 0) { + close(i); + break; + } + + sc = send(i, buf, rc, 0); + if (sc < 0) { + perror("send failed()\n"); + break; + } + } + sleep(rate); + if (dot) { + printf("."); + fflush(stdout); + + } + } while (running); + +out: + close(s1); + close(s2); + close(p1); + close(p2); + close(c1); + close(c2); + return err; +} + +int main(int argc, char **argv) +{ + int rate = 1, dot = 1; + char filename[256]; + int err, cg_fd; + char *cg_path; + + cg_path = argv[argc - 1]; + snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]); + + running = 1; + + /* catch SIGINT */ + signal(SIGINT, running_handler); + + if (load_bpf_file(filename)) { + fprintf(stderr, "load_bpf_file: (%s) %s\n", + filename, strerror(errno)); + return 1; + } + + /* Cgroup configuration */ + cg_fd = open(cg_path, O_DIRECTORY, O_RDONLY); + if (cg_fd < 0) { + fprintf(stderr, "ERROR: (%i) open cg path failed: %s\n", + cg_fd, cg_path); + return cg_fd; + } + + /* Attach programs to sockmap */ + err = __bpf_prog_attach(prog_fd[0], prog_fd[1], map_fd[0], + BPF_CGROUP_SMAP_INGRESS, 0); + if (err) { + fprintf(stderr, "ERROR: bpf_prog_attach (sockmap): %d (%s)\n", + err, strerror(errno)); + return err; + } + + /* Attach to cgroups */ + err = bpf_prog_attach(prog_fd[2], cg_fd, BPF_CGROUP_SOCK_OPS, 0); + if (err) { + fprintf(stderr, "ERROR: bpf_prog_attach (groups): %d (%s)\n", + err, strerror(errno)); + return err; + } + + err = sockmap_test_sockets(rate, dot); + if (err) { + fprintf(stderr, "ERROR: test socket failed: %d\n", err); + return err; + } + return 0; +} + +void running_handler(int a) +{ + running = 0; +} diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index bf3b2e230455..2d97dd27c8f6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -110,6 +110,7 @@ enum bpf_map_type { BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_DEVMAP, + BPF_MAP_TYPE_SOCKMAP, }; enum bpf_prog_type { @@ -127,6 +128,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_LWT_OUT, BPF_PROG_TYPE_LWT_XMIT, BPF_PROG_TYPE_SOCK_OPS, + BPF_PROG_TYPE_SK_SKB, }; enum bpf_attach_type { @@ -134,11 +136,18 @@ enum bpf_attach_type { BPF_CGROUP_INET_EGRESS, BPF_CGROUP_INET_SOCK_CREATE, BPF_CGROUP_SOCK_OPS, + BPF_CGROUP_SMAP_INGRESS, __MAX_BPF_ATTACH_TYPE }; #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE +enum bpf_sockmap_flags { + BPF_SOCKMAP_UNSPEC, + BPF_SOCKMAP_STRPARSER, + __MAX_BPF_SOCKMAP_FLAG +}; + /* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command * to the given target_fd cgroup the descendent cgroup will be able to * override effective bpf program that was inherited from this cgroup @@ -210,6 +219,7 @@ union bpf_attr { __u32 attach_bpf_fd; /* eBPF program to attach */ __u32 attach_type; __u32 attach_flags; + __u32 attach_bpf_fd2; }; struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ @@ -545,6 +555,23 @@ union bpf_attr { * @mode: operation mode (enum bpf_adj_room_mode) * @flags: reserved for future use * Return: 0 on success or negative error code + * + * int bpf_sk_redirect_map(map, key, flags) + * Redirect skb to a sock in map using key as a lookup key for the + * sock in map. + * @map: pointer to sockmap + * @key: key to lookup sock in map + * @flags: reserved for future use + * Return: SK_REDIRECT + * + * int bpf_sock_map_update(skops, map, key, flags, map_flags) + * @skops: pointer to bpf_sock_ops + * @map: pointer to sockmap to update + * @key: key to insert/update sock in map + * @flags: same flags as map update elem + * @map_flags: sock map specific flags + * bit 1: Enable strparser + * other bits: reserved */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -598,7 +625,9 @@ union bpf_attr { FN(set_hash), \ FN(setsockopt), \ FN(skb_adjust_room), \ - FN(redirect_map), + FN(redirect_map), \ + FN(sk_redirect_map), \ + FN(sock_map_update), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call @@ -675,6 +704,15 @@ struct __sk_buff { __u32 data; __u32 data_end; __u32 napi_id; + + /* accessed by BPF_PROG_TYPE_sk_skb types */ + __u32 family; + __u32 remote_ip4; /* Stored in network byte order */ + __u32 local_ip4; /* Stored in network byte order */ + __u32 remote_ip6[4]; /* Stored in network byte order */ + __u32 local_ip6[4]; /* Stored in network byte order */ + __u32 remote_port; /* Stored in network byte order */ + __u32 local_port; /* stored in host byte order */ }; struct bpf_tunnel_key { @@ -734,6 +772,12 @@ struct xdp_md { __u32 data_end; }; +enum sk_action { + SK_ABORTED = 0, + SK_DROP, + SK_REDIRECT, +}; + #define BPF_TAG_SIZE 8 struct bpf_prog_info { diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index e5bbb090bf88..77660157a684 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -211,20 +211,28 @@ int bpf_obj_get(const char *pathname) return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr)); } -int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, - unsigned int flags) +int __bpf_prog_attach(int prog_fd1, int prog_fd2, int target_fd, + enum bpf_attach_type type, + unsigned int flags) { union bpf_attr attr; bzero(&attr, sizeof(attr)); attr.target_fd = target_fd; - attr.attach_bpf_fd = prog_fd; + attr.attach_bpf_fd = prog_fd1; + attr.attach_bpf_fd2 = prog_fd2; attr.attach_type = type; attr.attach_flags = flags; return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); } +int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type, + unsigned int flags) +{ + return __bpf_prog_attach(prog_fd, 0, target_fd, type, flags); +} + int bpf_prog_detach(int target_fd, enum bpf_attach_type type) { union bpf_attr attr; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 418c86e69bcb..eaee585c1cea 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -50,6 +50,10 @@ int bpf_obj_pin(int fd, const char *pathname); int bpf_obj_get(const char *pathname); int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type, unsigned int flags); +int __bpf_prog_attach(int prog1, int prog2, + int attachable_fd, + enum bpf_attach_type type, + unsigned int flags); int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type); int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size, void *data_out, __u32 *size_out, __u32 *retval, diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index acbd60519467..73092d4a898e 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -65,6 +65,13 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, int optlen) = (void *) BPF_FUNC_setsockopt; +static int (*bpf_sk_redirect_map)(void *map, int key, int flags) = + (void *) BPF_FUNC_sk_redirect_map; +static int (*bpf_sock_map_update)(void *map, void *key, void *value, + unsigned long long flags, + unsigned long long map_lags) = + (void *) BPF_FUNC_sock_map_update; + /* llvm builtin functions that eBPF C program may use to * emit BPF_LD_ABS and BPF_LD_IND instructions -- cgit v1.2.3-55-g7522 From 41bc94f535ef454e325a6d4db085ec345376de6c Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:33:56 -0700 Subject: bpf: selftests: add tests for new __sk_buff members This adds tests to access new __sk_buff members from sk skb program type. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 152 ++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 1b767127e141..c03542c417db 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -966,6 +966,158 @@ static struct bpf_test tests[] = { .errstr_unpriv = "R1 pointer comparison", .result = REJECT, }, + { + "invalid access __sk_buff family", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, family)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "invalid access __sk_buff remote_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip4)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "invalid access __sk_buff local_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip4)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "invalid access __sk_buff remote_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "invalid access __sk_buff local_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "invalid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_port)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "invalid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_port)), + BPF_EXIT_INSN(), + }, + .errstr = "invalid bpf_context access", + .result = REJECT, + }, + { + "valid access __sk_buff family", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, family)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + }, + { + "valid access __sk_buff remote_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip4)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + }, + { + "valid access __sk_buff local_ip4", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip4)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + }, + { + "valid access __sk_buff remote_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_ip6[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + }, + { + "valid access __sk_buff local_ip6", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[0])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[1])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[2])), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_ip6[3])), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + }, + { + "valid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, remote_port)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + }, + { + "valid access __sk_buff remote_port", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, + offsetof(struct __sk_buff, local_port)), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_SK_SKB, + }, { "check skb->mark is not writeable by sockets", .insns = { -- cgit v1.2.3-55-g7522 From 6f6d33f3b3d0f53799d120d28abd13ad90041549 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 22:34:22 -0700 Subject: bpf: selftests add sockmap tests This generates a set of sockets, attaches BPF programs, and sends some simple traffic using basic send/recv pattern. Additionally, we do a bunch of negative tests to ensure adding/removing socks out of the sockmap fail correctly. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- tools/lib/bpf/libbpf.c | 29 ++ tools/lib/bpf/libbpf.h | 2 + tools/testing/selftests/bpf/Makefile | 2 +- tools/testing/selftests/bpf/sockmap_parse_prog.c | 38 +++ tools/testing/selftests/bpf/sockmap_verdict_prog.c | 48 ++++ tools/testing/selftests/bpf/test_maps.c | 308 +++++++++++++++++++++ tools/testing/selftests/bpf/test_progs.c | 55 ++-- 7 files changed, 443 insertions(+), 39 deletions(-) create mode 100644 tools/testing/selftests/bpf/sockmap_parse_prog.c create mode 100644 tools/testing/selftests/bpf/sockmap_verdict_prog.c diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1a2c07eb7795..1cc3ea0ffdc3 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1744,3 +1744,32 @@ long libbpf_get_error(const void *ptr) return PTR_ERR(ptr); return 0; } + +int bpf_prog_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd) +{ + struct bpf_program *prog; + struct bpf_object *obj; + int err; + + obj = bpf_object__open(file); + if (IS_ERR(obj)) + return -ENOENT; + + prog = bpf_program__next(NULL, obj); + if (!prog) { + bpf_object__close(obj); + return -ENOENT; + } + + bpf_program__set_type(prog, type); + err = bpf_object__load(obj); + if (err) { + bpf_object__close(obj); + return -EINVAL; + } + + *pobj = obj; + *prog_fd = bpf_program__fd(prog); + return 0; +} diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 32c7252f734e..7959086eb9c9 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -243,4 +243,6 @@ int bpf_map__pin(struct bpf_map *map, const char *path); long libbpf_get_error(const void *ptr); +int bpf_prog_load(const char *file, enum bpf_prog_type type, + struct bpf_object **pobj, int *prog_fd); #endif diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 3c2e67da4b41..f4b23d697448 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -15,7 +15,7 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test test_align TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ - test_pkt_md_access.o test_xdp_redirect.o + test_pkt_md_access.o test_xdp_redirect.o sockmap_parse_prog.o sockmap_verdict_prog.o TEST_PROGS := test_kmod.sh test_xdp_redirect.sh diff --git a/tools/testing/selftests/bpf/sockmap_parse_prog.c b/tools/testing/selftests/bpf/sockmap_parse_prog.c new file mode 100644 index 000000000000..8b5453158399 --- /dev/null +++ b/tools/testing/selftests/bpf/sockmap_parse_prog.c @@ -0,0 +1,38 @@ +#include +#include "bpf_helpers.h" +#include "bpf_util.h" +#include "bpf_endian.h" + +int _version SEC("version") = 1; + +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +SEC("sk_skb1") +int bpf_prog1(struct __sk_buff *skb) +{ + void *data_end = (void *)(long) skb->data_end; + void *data = (void *)(long) skb->data; + __u32 lport = skb->local_port; + __u32 rport = skb->remote_port; + char *d = data; + + if (data + 8 > data_end) + return skb->len; + + /* This write/read is a bit pointless but tests the verifier and + * strparser handler for read/write pkt data and access into sk + * fields. + */ + d[0] = 1; + + bpf_printk("data[0] = (%u): local_port %i remote %i\n", + d[0], lport, bpf_ntohl(rport)); + return skb->len; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c new file mode 100644 index 000000000000..d5f9447b3808 --- /dev/null +++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c @@ -0,0 +1,48 @@ +#include +#include "bpf_helpers.h" +#include "bpf_util.h" +#include "bpf_endian.h" + +int _version SEC("version") = 1; + +#define bpf_printk(fmt, ...) \ +({ \ + char ____fmt[] = fmt; \ + bpf_trace_printk(____fmt, sizeof(____fmt), \ + ##__VA_ARGS__); \ +}) + +struct bpf_map_def SEC("maps") sock_map = { + .type = BPF_MAP_TYPE_SOCKMAP, + .key_size = sizeof(int), + .value_size = sizeof(int), + .max_entries = 20, +}; + +SEC("sk_skb2") +int bpf_prog2(struct __sk_buff *skb) +{ + void *data_end = (void *)(long) skb->data_end; + void *data = (void *)(long) skb->data; + __u32 lport = skb->local_port; + __u32 rport = skb->remote_port; + char *d = data; + + if (data + 8 > data_end) + return SK_DROP; + + d[0] = 0xd; + d[1] = 0xe; + d[2] = 0xa; + d[3] = 0xd; + d[4] = 0xb; + d[5] = 0xe; + d[6] = 0xe; + d[7] = 0xf; + + bpf_printk("data[0] = (%u): local_port %i remote %i\n", + d[0], lport, bpf_ntohl(rport)); + return bpf_sk_redirect_map(&sock_map, 5, 0); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index c991ab69a720..40b2d1faf02b 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -22,6 +22,7 @@ #include #include +#include #include "bpf_util.h" static int map_flags; @@ -453,6 +454,312 @@ static void test_devmap(int task, void *data) close(fd); } +#include +#include +#include +#include +#include +#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o" +#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o" +static void test_sockmap(int task, void *data) +{ + int ports[] = {50200, 50201, 50202, 50204}; + int err, i, fd, sfd[6] = {0xdeadbeef}; + char buf[] = "hello sockmap user\n"; + int one = 1, map_fd, s, sc, rc; + int parse_prog, verdict_prog; + struct bpf_map *bpf_map; + struct sockaddr_in addr; + struct bpf_object *obj; + struct timeval to; + __u32 key, value; + fd_set w; + + /* Create some sockets to use with sockmap */ + for (i = 0; i < 2; i++) { + sfd[i] = socket(AF_INET, SOCK_STREAM, 0); + if (sfd[i] < 0) + goto out; + err = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR, + (char *)&one, sizeof(one)); + if (err) { + printf("failed to setsockopt\n"); + goto out; + } + err = ioctl(sfd[i], FIONBIO, (char *)&one); + if (err < 0) { + printf("failed to ioctl\n"); + goto out; + } + memset(&addr, 0, sizeof(struct sockaddr_in)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = inet_addr("127.0.0.1"); + addr.sin_port = htons(ports[i]); + err = bind(sfd[i], (struct sockaddr *)&addr, sizeof(addr)); + if (err < 0) { + printf("failed to bind: err %i: %i:%i\n", + err, i, sfd[i]); + goto out; + } + err = listen(sfd[i], 32); + if (err < 0) { + printf("failed to listeen\n"); + goto out; + } + } + + for (i = 2; i < 4; i++) { + sfd[i] = socket(AF_INET, SOCK_STREAM, 0); + if (sfd[i] < 0) + goto out; + err = setsockopt(sfd[i], SOL_SOCKET, SO_REUSEADDR, + (char *)&one, sizeof(one)); + if (err) { + printf("set sock opt\n"); + goto out; + } + memset(&addr, 0, sizeof(struct sockaddr_in)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = inet_addr("127.0.0.1"); + addr.sin_port = htons(ports[i - 2]); + err = connect(sfd[i], (struct sockaddr *)&addr, sizeof(addr)); + if (err) { + printf("failed to conenct\n"); + goto out; + } + } + + + for (i = 4; i < 6; i++) { + sfd[i] = accept(sfd[i - 4], NULL, NULL); + if (sfd[i] < 0) { + printf("accept failed\n"); + goto out; + } + } + + /* Test sockmap with connected sockets */ + fd = bpf_create_map(BPF_MAP_TYPE_SOCKMAP, + sizeof(key), sizeof(value), + 6, 0); + if (fd < 0) { + printf("Failed to create sockmap %i\n", fd); + goto out_sockmap; + } + + /* Nothing attached so these should fail */ + for (i = 0; i < 6; i++) { + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); + if (!err) { + printf("Failed invalid update sockmap '%i:%i'\n", + i, sfd[i]); + goto out_sockmap; + } + } + + /* Test attaching bad fds */ + err = __bpf_prog_attach(-1, -2, fd, BPF_CGROUP_SMAP_INGRESS, 0); + if (!err) { + printf("Failed invalid prog attach\n"); + goto out_sockmap; + } + + /* Load SK_SKB program and Attach */ + err = bpf_prog_load(SOCKMAP_PARSE_PROG, + BPF_PROG_TYPE_SK_SKB, &obj, &parse_prog); + if (err) { + printf("Failed to load SK_SKB parse prog\n"); + goto out_sockmap; + } + + err = bpf_prog_load(SOCKMAP_VERDICT_PROG, + BPF_PROG_TYPE_SK_SKB, &obj, &verdict_prog); + if (err) { + printf("Failed to load SK_SKB verdict prog\n"); + goto out_sockmap; + } + + bpf_map = bpf_object__find_map_by_name(obj, "sock_map"); + if (IS_ERR(bpf_map)) { + printf("Failed to load map from verdict prog\n"); + goto out_sockmap; + } + + map_fd = bpf_map__fd(bpf_map); + if (map_fd < 0) { + printf("Failed to get map fd\n"); + goto out_sockmap; + } + + err = __bpf_prog_attach(parse_prog, verdict_prog, map_fd, + BPF_CGROUP_SMAP_INGRESS, 0); + if (err) { + printf("Failed bpf prog attach\n"); + goto out_sockmap; + } + + /* Test map update elem */ + for (i = 0; i < 6; i++) { + err = bpf_map_update_elem(map_fd, &i, &sfd[i], BPF_ANY); + if (err) { + printf("Failed map_fd update sockmap %i '%i:%i'\n", + err, i, sfd[i]); + goto out_sockmap; + } + } + + /* Test map delete elem and remove send/recv sockets */ + for (i = 2; i < 4; i++) { + err = bpf_map_delete_elem(map_fd, &i); + if (err) { + printf("Failed delete sockmap %i '%i:%i'\n", + err, i, sfd[i]); + goto out_sockmap; + } + } + + /* Test map send/recv */ + sc = send(sfd[2], buf, 10, 0); + if (sc < 0) { + printf("Failed sockmap send\n"); + goto out_sockmap; + } + + FD_ZERO(&w); + FD_SET(sfd[3], &w); + to.tv_sec = 1; + to.tv_usec = 0; + s = select(sfd[3] + 1, &w, NULL, NULL, &to); + if (s == -1) { + perror("Failed sockmap select()"); + goto out_sockmap; + } else if (!s) { + printf("Failed sockmap unexpected timeout\n"); + goto out_sockmap; + } + + if (!FD_ISSET(sfd[3], &w)) { + printf("Failed sockmap select/recv\n"); + goto out_sockmap; + } + + rc = recv(sfd[3], buf, sizeof(buf), 0); + if (rc < 0) { + printf("Failed sockmap recv\n"); + goto out_sockmap; + } + + /* Delete the reset of the elems include some NULL elems */ + for (i = 0; i < 6; i++) { + err = bpf_map_delete_elem(map_fd, &i); + if (err && (i == 0 || i == 1 || i >= 4)) { + printf("Failed delete sockmap %i '%i:%i'\n", + err, i, sfd[i]); + goto out_sockmap; + } else if (!err && (i == 2 || i == 3)) { + printf("Failed null delete sockmap %i '%i:%i'\n", + err, i, sfd[i]); + goto out_sockmap; + } + } + + /* Test having multiple SMAPs open and active on same fds */ + err = __bpf_prog_attach(parse_prog, verdict_prog, fd, + BPF_CGROUP_SMAP_INGRESS, 0); + if (err) { + printf("Failed fd bpf prog attach\n"); + goto out_sockmap; + } + + for (i = 0; i < 6; i++) { + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); + if (err) { + printf("Failed fd update sockmap %i '%i:%i'\n", + err, i, sfd[i]); + goto out_sockmap; + } + } + + /* Test duplicate socket add of NOEXIST, ANY and EXIST */ + i = 0; + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST); + if (!err) { + printf("Failed BPF_NOEXIST create\n"); + goto out_sockmap; + } + + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); + if (err) { + printf("Failed sockmap update BPF_ANY\n"); + goto out_sockmap; + } + + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST); + if (err) { + printf("Failed sockmap update BPF_EXIST\n"); + goto out_sockmap; + } + + /* The above were pushing fd into same slot try different slot now */ + i = 2; + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_NOEXIST); + if (!err) { + printf("Failed BPF_NOEXIST create\n"); + goto out_sockmap; + } + + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); + if (err) { + printf("Failed sockmap update BPF_ANY\n"); + goto out_sockmap; + } + + err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_EXIST); + if (err) { + printf("Failed sockmap update BPF_EXIST\n"); + goto out_sockmap; + } + + /* Try pushing fd into different map, this is not allowed at the + * moment. Which programs would we use? + */ + err = bpf_map_update_elem(map_fd, &i, &sfd[i], BPF_NOEXIST); + if (!err) { + printf("Failed BPF_NOEXIST create\n"); + goto out_sockmap; + } + + err = bpf_map_update_elem(map_fd, &i, &sfd[i], BPF_ANY); + if (!err) { + printf("Failed sockmap update BPF_ANY\n"); + goto out_sockmap; + } + + err = bpf_map_update_elem(map_fd, &i, &sfd[i], BPF_EXIST); + if (!err) { + printf("Failed sockmap update BPF_EXIST\n"); + goto out_sockmap; + } + + /* Test map close sockets */ + for (i = 0; i < 6; i++) + close(sfd[i]); + close(fd); + close(map_fd); + bpf_object__close(obj); + return; +out: + for (i = 0; i < 6; i++) + close(sfd[i]); + printf("Failed to create sockmap '%i:%s'!\n", i, strerror(errno)); + exit(1); +out_sockmap: + for (i = 0; i < 6; i++) + close(sfd[i]); + close(fd); + exit(1); +} + #define MAP_SIZE (32 * 1024) static void test_map_large(void) @@ -621,6 +928,7 @@ static void run_all_tests(void) test_arraymap_percpu_many_keys(); test_devmap(0, NULL); + test_sockmap(0, NULL); test_map_large(); test_map_parallel(); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 1f7dd35551b9..1cb037803679 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -75,39 +75,6 @@ static struct { __ret; \ }) -static int bpf_prog_load(const char *file, enum bpf_prog_type type, - struct bpf_object **pobj, int *prog_fd) -{ - struct bpf_program *prog; - struct bpf_object *obj; - int err; - - obj = bpf_object__open(file); - if (IS_ERR(obj)) { - error_cnt++; - return -ENOENT; - } - - prog = bpf_program__next(NULL, obj); - if (!prog) { - bpf_object__close(obj); - error_cnt++; - return -ENOENT; - } - - bpf_program__set_type(prog, type); - err = bpf_object__load(obj); - if (err) { - bpf_object__close(obj); - error_cnt++; - return -EINVAL; - } - - *pobj = obj; - *prog_fd = bpf_program__fd(prog); - return 0; -} - static int bpf_find_map(const char *test, struct bpf_object *obj, const char *name) { @@ -130,8 +97,10 @@ static void test_pkt_access(void) int err, prog_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) + if (err) { + error_cnt++; return; + } err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), NULL, NULL, &retval, &duration); @@ -162,8 +131,10 @@ static void test_xdp(void) int err, prog_fd, map_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); - if (err) + if (err) { + error_cnt++; return; + } map_fd = bpf_find_map(__func__, obj, "vip2tnl"); if (map_fd < 0) @@ -223,8 +194,10 @@ static void test_l4lb(void) u32 *magic = (u32 *)buf; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) + if (err) { + error_cnt++; return; + } map_fd = bpf_find_map(__func__, obj, "vip_map"); if (map_fd < 0) @@ -280,8 +253,10 @@ static void test_tcp_estats(void) err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); CHECK(err, "", "err %d errno %d\n", err, errno); - if (err) + if (err) { + error_cnt++; return; + } bpf_object__close(obj); } @@ -336,6 +311,8 @@ static void test_bpf_obj_id(void) /* test_obj_id.o is a dumb prog. It should never fail * to load. */ + if (err) + error_cnt++; assert(!err); /* Check getting prog info */ @@ -496,8 +473,10 @@ static void test_pkt_md_access(void) int err, prog_fd; err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); - if (err) + if (err) { + error_cnt++; return; + } err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), NULL, NULL, &retval, &duration); -- cgit v1.2.3-55-g7522 From cf9d01405925e3f8144c99d7bf7b184449794066 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 15 Aug 2017 23:35:12 -0700 Subject: bpf: devmap: remove unnecessary value size check In the devmap alloc map logic we check to ensure that the sizeof the values are not greater than KMALLOC_MAX_SIZE. But, in the dev map case we ensure the value size is 4bytes earlier in the function because all values should be netdev ifindex values. The second check is harmless but is not needed so remove it. Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/devmap.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 7192fb67d4de..18a72a8add43 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -83,12 +83,6 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) attr->value_size != 4 || attr->map_flags) return ERR_PTR(-EINVAL); - /* if value_size is bigger, the user space won't be able to - * access the elements. - */ - if (attr->value_size > KMALLOC_MAX_SIZE) - return ERR_PTR(-E2BIG); - dtab = kzalloc(sizeof(*dtab), GFP_USER); if (!dtab) return ERR_PTR(-ENOMEM); -- cgit v1.2.3-55-g7522 From b985f870a5f08da0a2d6b45a3ea33f5558cf7e4e Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 16 Aug 2017 09:37:43 +0200 Subject: nfp: process control messages in workqueue in flower app Processing of control messages is not time-critical and future processing of some messages will require taking the RTNL which is not possible in a BH handler. It seems simplest to move all control message processing to a workqueue. Signed-off-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 24 +++++++++++++++++++++++- drivers/net/ethernet/netronome/nfp/flower/cmsg.h | 1 + drivers/net/ethernet/netronome/nfp/flower/main.c | 14 ++++++++++++-- drivers/net/ethernet/netronome/nfp/flower/main.h | 5 +++++ 4 files changed, 41 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index aa46b23cdfb1..6c8c22491fe7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -34,10 +34,12 @@ #include #include #include +#include #include #include "main.h" #include "../nfpcore/nfp_cpp.h" +#include "../nfp_net.h" #include "../nfp_net_repr.h" #include "./cmsg.h" @@ -155,7 +157,8 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) rcu_read_unlock(); } -void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +static void +nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb) { struct nfp_flower_cmsg_hdr *cmsg_hdr; enum nfp_flower_cmsg_type_port type; @@ -184,3 +187,22 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) out: dev_kfree_skb_any(skb); } + +void nfp_flower_cmsg_process_rx(struct work_struct *work) +{ + struct nfp_flower_priv *priv; + struct sk_buff *skb; + + priv = container_of(work, struct nfp_flower_priv, cmsg_work); + + while ((skb = skb_dequeue(&priv->cmsg_skbs))) + nfp_flower_cmsg_process_one_rx(priv->nn->app, skb); +} + +void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb) +{ + struct nfp_flower_priv *priv = app->priv; + + skb_queue_tail(&priv->cmsg_skbs, skb); + schedule_work(&priv->cmsg_work); +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index aa92a8711a02..a2ec60344236 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -330,6 +330,7 @@ nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx, unsigned int nbi, unsigned int nbi_port, unsigned int phys_port); int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok); +void nfp_flower_cmsg_process_rx(struct work_struct *work); void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb); struct sk_buff * nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index b905454b30ca..3088e959f2a3 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -332,6 +332,7 @@ err_invalid_port: static int nfp_flower_init(struct nfp_app *app) { const struct nfp_pf *pf = app->pf; + struct nfp_flower_priv *app_priv; u64 version; int err; @@ -362,10 +363,14 @@ static int nfp_flower_init(struct nfp_app *app) return -EINVAL; } - app->priv = vzalloc(sizeof(struct nfp_flower_priv)); - if (!app->priv) + app_priv = vzalloc(sizeof(struct nfp_flower_priv)); + if (!app_priv) return -ENOMEM; + app->priv = app_priv; + skb_queue_head_init(&app_priv->cmsg_skbs); + INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); + err = nfp_flower_metadata_init(app); if (err) goto err_free_app_priv; @@ -379,6 +384,11 @@ err_free_app_priv: static void nfp_flower_clean(struct nfp_app *app) { + struct nfp_flower_priv *app_priv = app->priv; + + skb_queue_purge(&app_priv->cmsg_skbs); + flush_work(&app_priv->cmsg_work); + nfp_flower_metadata_cleanup(app); vfree(app->priv); app->priv = NULL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 71e4f4f4e9ba..b7043ca9b9fc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -39,6 +39,7 @@ #include #include #include +#include struct net_device; struct nfp_app; @@ -78,6 +79,8 @@ struct nfp_fl_stats_id { * @mask_ids: List of free mask ids * @mask_table: Hash table used to store masks * @flow_table: Hash table used to store flower rules + * @cmsg_work: Workqueue for control messages processing + * @cmsg_skbs: List of skbs for control message processing */ struct nfp_flower_priv { struct nfp_net *nn; @@ -87,6 +90,8 @@ struct nfp_flower_priv { struct nfp_fl_mask_id mask_ids; DECLARE_HASHTABLE(mask_table, NFP_FLOWER_MASK_HASH_BITS); DECLARE_HASHTABLE(flow_table, NFP_FLOWER_HASH_BITS); + struct work_struct cmsg_work; + struct sk_buff_head cmsg_skbs; }; struct nfp_fl_key_ls { -- cgit v1.2.3-55-g7522 From 2dff1962242111cc4517119f690587b87573ffc5 Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 16 Aug 2017 09:37:44 +0200 Subject: nfp: process MTU updates from firmware flower app Now that control message processing occurs in a workqueue rather than a BH handler MTU updates received from the firmware may be safely processed. Signed-off-by: Simon Horman Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/cmsg.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c index 6c8c22491fe7..806924b82adc 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c @@ -150,10 +150,17 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb) return; } - if (link) + if (link) { + u16 mtu = be16_to_cpu(msg->mtu); + netif_carrier_on(netdev); - else + + /* An MTU of 0 from the firmware should be ignored */ + if (mtu) + dev_set_mtu(netdev, mtu); + } else { netif_carrier_off(netdev); + } rcu_read_unlock(); } -- cgit v1.2.3-55-g7522 From e543002f77f463501d47fab43acf7ba881e9dcaf Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 15 Aug 2017 21:11:03 +0200 Subject: qdisc: add tracepoint qdisc:qdisc_dequeue for dequeued SKBs The main purpose of this tracepoint is to monitor bulk dequeue in the network qdisc layer, as it cannot be deducted from the existing qdisc stats. The txq_state can be used for determining the reason for zero packet dequeues, see enum netdev_queue_state_t. Notice all packets doesn't necessary activate this tracepoint. As qdiscs with flag TCQ_F_CAN_BYPASS, can directly invoke sch_direct_xmit() when qdisc_qlen is zero. Remember that perf record supports filters like: perf record -e qdisc:qdisc_dequeue \ --filter 'ifindex == 4 && (packets > 1 || txq_state > 0)' Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/trace/events/qdisc.h | 50 ++++++++++++++++++++++++++++++++++++++++++++ net/core/net-traces.c | 1 + net/sched/sch_generic.c | 8 +++++-- 3 files changed, 57 insertions(+), 2 deletions(-) create mode 100644 include/trace/events/qdisc.h diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h new file mode 100644 index 000000000000..60d0d8bd336d --- /dev/null +++ b/include/trace/events/qdisc.h @@ -0,0 +1,50 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM qdisc + +#if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_QDISC_H_ + +#include +#include +#include +#include + +TRACE_EVENT(qdisc_dequeue, + + TP_PROTO(struct Qdisc *qdisc, const struct netdev_queue *txq, + int packets, struct sk_buff *skb), + + TP_ARGS(qdisc, txq, packets, skb), + + TP_STRUCT__entry( + __field( struct Qdisc *, qdisc ) + __field(const struct netdev_queue *, txq ) + __field( int, packets ) + __field( void *, skbaddr ) + __field( int, ifindex ) + __field( u32, handle ) + __field( u32, parent ) + __field( unsigned long, txq_state) + ), + + /* skb==NULL indicate packets dequeued was 0, even when packets==1 */ + TP_fast_assign( + __entry->qdisc = qdisc; + __entry->txq = txq; + __entry->packets = skb ? packets : 0; + __entry->skbaddr = skb; + __entry->ifindex = txq->dev ? txq->dev->ifindex : 0; + __entry->handle = qdisc->handle; + __entry->parent = qdisc->parent; + __entry->txq_state = txq->state; + ), + + TP_printk("dequeue ifindex=%d qdisc handle=0x%X parent=0x%X txq_state=0x%lX packets=%d skbaddr=%p", + __entry->ifindex, __entry->handle, __entry->parent, + __entry->txq_state, __entry->packets, __entry->skbaddr ) +); + +#endif /* _TRACE_QDISC_H_ */ + +/* This part must be outside protection */ +#include diff --git a/net/core/net-traces.c b/net/core/net-traces.c index 92da5e4ceb4f..4f1468ccd056 100644 --- a/net/core/net-traces.c +++ b/net/core/net-traces.c @@ -32,6 +32,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_IPV6) #include EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup); diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 57ba406f1437..c6b89a34e8d2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -29,6 +29,7 @@ #include #include #include +#include /* Qdisc to use by default */ const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; @@ -126,7 +127,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, q->q.qlen--; } else skb = NULL; - return skb; + goto trace; } *validate = true; skb = q->skb_bad_txq; @@ -139,7 +140,8 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, q->q.qlen--; goto bulk; } - return NULL; + skb = NULL; + goto trace; } if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) @@ -151,6 +153,8 @@ bulk: else try_bulk_dequeue_skb_slow(q, skb, packets); } +trace: + trace_qdisc_dequeue(q, txq, *packets, skb); return skb; } -- cgit v1.2.3-55-g7522 From ba5c4dac033b3b2f9e6a99793d1284c2dcf98b3f Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 16 Aug 2017 10:05:11 +0100 Subject: net/mlx4: fix spelling mistake: "availible" -> "available" Trivial fix to spelling mistakes in the mlx4 driver Signed-off-by: Colin Ian King Reviewed-by: Yuval Shaia Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 16 ++++++++-------- drivers/net/ethernet/mellanox/mlx4/fw_qos.c | 6 +++--- drivers/net/ethernet/mellanox/mlx4/fw_qos.h | 10 +++++----- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 97aed30ead21..4ec1ef61a472 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1958,19 +1958,19 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) int i; int err; int num_vfs; - u16 availible_vpp; + u16 available_vpp; u8 vpp_param[MLX4_NUM_UP]; struct mlx4_qos_manager *port_qos; struct mlx4_priv *priv = mlx4_priv(dev); - err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param); if (err) { - mlx4_info(dev, "Failed query availible VPPs\n"); + mlx4_info(dev, "Failed query available VPPs\n"); return; } port_qos = &priv->mfunc.master.qos_ctl[port]; - num_vfs = (availible_vpp / + num_vfs = (available_vpp / bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP)); for (i = 0; i < MLX4_NUM_UP; i++) { @@ -1985,14 +1985,14 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port) } /* Query actual allocated VPP, just to make sure */ - err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param); + err = mlx4_ALLOCATE_VPP_get(dev, port, &available_vpp, vpp_param); if (err) { - mlx4_info(dev, "Failed query availible VPPs\n"); + mlx4_info(dev, "Failed query available VPPs\n"); return; } port_qos->num_of_qos_vfs = num_vfs; - mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp); + mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, available_vpp); for (i = 0; i < MLX4_NUM_UP; i++) mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i, @@ -2891,7 +2891,7 @@ static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port, memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP); if (slave > port_qos->num_of_qos_vfs) { - mlx4_info(dev, "No availible VPP resources for this VF\n"); + mlx4_info(dev, "No available VPP resources for this VF\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c index 8f2fde0487c4..3a09d7122d3b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.c @@ -65,7 +65,7 @@ struct mlx4_set_port_scheduler_context { /* Granular Qos (per VF) section */ struct mlx4_alloc_vpp_param { - __be32 availible_vpp; + __be32 available_vpp; __be32 vpp_p_up[MLX4_NUM_UP]; }; @@ -157,7 +157,7 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, - u16 *availible_vpp, u8 *vpp_p_up) + u16 *available_vpp, u8 *vpp_p_up) { int i; int err; @@ -179,7 +179,7 @@ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, goto out; /* Total number of supported VPPs */ - *availible_vpp = (u16)be32_to_cpu(out_param->availible_vpp); + *available_vpp = (u16)be32_to_cpu(out_param->available_vpp); for (i = 0; i < MLX4_NUM_UP; i++) vpp_p_up[i] = (u8)be32_to_cpu(out_param->vpp_p_up[i]); diff --git a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h index ac1f331878e6..582997577a04 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw_qos.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw_qos.h @@ -84,23 +84,23 @@ int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, u8 *pg, u16 *ratelimit); /** - * mlx4_ALLOCATE_VPP_get - Query port VPP availible resources and allocation. - * Before distribution of VPPs to priorities, only availible_vpp is returned. + * mlx4_ALLOCATE_VPP_get - Query port VPP available resources and allocation. + * Before distribution of VPPs to priorities, only available_vpp is returned. * After initialization it returns the distribution of VPPs among priorities. * * @dev: mlx4_dev. * @port: Physical port number. - * @availible_vpp: Pointer to variable where number of availible VPPs is stored + * @available_vpp: Pointer to variable where number of available VPPs is stored * @vpp_p_up: Distribution of VPPs to priorities is stored in this array * * Returns 0 on success or a negative mlx4_core errno code. **/ int mlx4_ALLOCATE_VPP_get(struct mlx4_dev *dev, u8 port, - u16 *availible_vpp, u8 *vpp_p_up); + u16 *available_vpp, u8 *vpp_p_up); /** * mlx4_ALLOCATE_VPP_set - Distribution of VPPs among differnt priorities. * The total number of VPPs assigned to all for a port must not exceed - * the value reported by availible_vpp in mlx4_ALLOCATE_VPP_get. + * the value reported by available_vpp in mlx4_ALLOCATE_VPP_get. * VPP allocation is allowed only after the port type has been set, * and while no QPs are open for this port. * -- cgit v1.2.3-55-g7522 From 0bbd7dad34f81e5e724cb08252160a1796c388b2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 16 Aug 2017 22:14:33 +0800 Subject: tun: make tun_build_skb() thread safe tun_build_skb() is not thread safe since it uses per queue page frag, this will break things when multiple threads are sending through same queue. Switch to use per-thread generator (no lock involved). Fixes: 66ccbc9c87c2 ("tap: use build_skb() for small packet") Tested-by: Jason Wang Signed-off-by: Eric Dumazet Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/tun.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index f5017121cd57..19cbbbb1b63b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -175,7 +175,6 @@ struct tun_file { struct list_head next; struct tun_struct *detached; struct skb_array tx_array; - struct page_frag alloc_frag; }; struct tun_flow_entry { @@ -578,8 +577,6 @@ static void __tun_detach(struct tun_file *tfile, bool clean) } if (tun) skb_array_cleanup(&tfile->tx_array); - if (tfile->alloc_frag.page) - put_page(tfile->alloc_frag.page); sock_put(&tfile->sk); } } @@ -1272,7 +1269,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, struct virtio_net_hdr *hdr, int len, int *generic_xdp) { - struct page_frag *alloc_frag = &tfile->alloc_frag; + struct page_frag *alloc_frag = ¤t->task_frag; struct sk_buff *skb; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(len + TUN_RX_PAD) + @@ -2580,8 +2577,6 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; - tfile->alloc_frag.page = NULL; - file->private_data = tfile; INIT_LIST_HEAD(&tfile->next); -- cgit v1.2.3-55-g7522 From d978db8dbebb60a51492da966689dfc0c9216c44 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 16 Aug 2017 17:15:18 +0200 Subject: net: sched: cls_flower: fix ndo_setup_tc type for stats call I made a stupid mistake using TC_CLSFLOWER_STATS instead of TC_SETUP_CLSFLOWER. Funny thing is that both are defined as "2" so it actually did not cause any harm. Anyway, fixing it now. Fixes: 2572ac53c46f ("net: sched: make type an argument for ndo_setup_tc") Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/sched/cls_flower.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 052e902dc71c..bd9dab41f8af 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -289,7 +289,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) cls_flower.cookie = (unsigned long) f; cls_flower.exts = &f->exts; - dev->netdev_ops->ndo_setup_tc(dev, TC_CLSFLOWER_STATS, + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower); } -- cgit v1.2.3-55-g7522 From cf56e3b98c5358883c8df5ed8e04661481225a8f Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 16 Aug 2017 15:02:12 -0700 Subject: bpf: sockmap state change warning fix psock will uninitialized in default case we need to do the same psock lookup and check as in other branch. Fixes compile warning below. kernel/bpf/sockmap.c: In function ‘smap_state_change’: kernel/bpf/sockmap.c:156:21: warning: ‘psock’ may be used uninitialized in this function [-Wmaybe-uninitialized] struct smap_psock *psock; Fixes: 174a79ff9515 ("bpf: sockmap with sk redirect support") Reported-by: David Miller Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/sockmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 792f0addfafa..f7e5e6cf124a 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -188,6 +188,9 @@ static void smap_state_change(struct sock *sk) smap_release_sock(sk); break; default: + psock = smap_psock_sk(sk); + if (unlikely(!psock)) + break; smap_report_sk_error(psock, EPIPE); break; } -- cgit v1.2.3-55-g7522 From 6bdc9c4c31c81688e19cb186d49be01bbb6a1618 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 16 Aug 2017 15:02:32 -0700 Subject: bpf: sock_map fixes for !CONFIG_BPF_SYSCALL and !STREAM_PARSER Resolve issues with !CONFIG_BPF_SYSCALL and !STREAM_PARSER net/core/filter.c: In function ‘do_sk_redirect_map’: net/core/filter.c:1881:3: error: implicit declaration of function ‘__sock_map_lookup_elem’ [-Werror=implicit-function-declaration] sk = __sock_map_lookup_elem(ri->map, ri->ifindex); ^ net/core/filter.c:1881:6: warning: assignment makes pointer from integer without a cast [enabled by default] sk = __sock_map_lookup_elem(ri->map, ri->ifindex); Fixes: 174a79ff9515 ("bpf: sockmap with sk redirect support") Reported-by: Eric Dumazet Signed-off-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf.h | 10 +++++++++- include/linux/bpf_types.h | 2 ++ kernel/bpf/Makefile | 5 ++++- kernel/bpf/core.c | 1 + 4 files changed, 16 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index a4145e9c74b5..1cc6c5ff61ec 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -313,7 +313,6 @@ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); /* Map specifics */ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); -struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); void __dev_map_insert_ctx(struct bpf_map *map, u32 index); void __dev_map_flush(struct bpf_map *map); @@ -377,6 +376,15 @@ static inline void __dev_map_flush(struct bpf_map *map) } #endif /* CONFIG_BPF_SYSCALL */ +#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) +struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); +#else +static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) +{ + return NULL; +} +#endif + /* verifier prototypes for helper functions called from eBPF programs */ extern const struct bpf_func_proto bpf_map_lookup_elem_proto; extern const struct bpf_func_proto bpf_map_update_elem_proto; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index fa805074d168..6f1a567667b8 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -38,5 +38,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +#ifdef CONFIG_STREAM_PARSER BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) #endif +#endif diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index aa24287db888..897daa005b23 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -3,7 +3,10 @@ obj-y := core.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o ifeq ($(CONFIG_NET),y) -obj-$(CONFIG_BPF_SYSCALL) += devmap.o sockmap.o +obj-$(CONFIG_BPF_SYSCALL) += devmap.o +ifeq ($(CONFIG_STREAM_PARSER),y) +obj-$(CONFIG_BPF_SYSCALL) += sockmap.o +endif endif ifeq ($(CONFIG_PERF_EVENTS),y) obj-$(CONFIG_BPF_SYSCALL) += stackmap.o diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index c69e7f5bfde7..917cc04a0a94 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1438,6 +1438,7 @@ const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; const struct bpf_func_proto bpf_get_current_comm_proto __weak; +const struct bpf_func_proto bpf_sock_map_update_proto __weak; const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) { -- cgit v1.2.3-55-g7522 From 774c46732ddba4632fa735beb17589aac90d5b49 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Wed, 16 Aug 2017 15:40:44 -0700 Subject: tcp: Export tcp_{sendpage,sendmsg}_locked() for ipv6. Signed-off-by: David S. Miller --- net/ipv4/tcp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 71b25567e787..d25e3bcca66b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1058,6 +1058,7 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, return do_tcp_sendpages(sk, page, offset, size, flags); } +EXPORT_SYMBOL_GPL(tcp_sendpage_locked); int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) @@ -1436,6 +1437,7 @@ out_err: } return err; } +EXPORT_SYMBOL_GPL(tcp_sendmsg_locked); int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { -- cgit v1.2.3-55-g7522 From 9a603b8e1136f2b55f780fefbcbf84d31844ff2b Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 16 Aug 2017 08:56:24 -0700 Subject: vmbus: remove unused vmbus_sendpacket_multipagebuffer This function is not used anywhere in current code. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/hv/channel.c | 56 -------------------------------------------------- include/linux/hyperv.h | 6 ------ 2 files changed, 62 deletions(-) diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index e57cc40cb768..756a1e841142 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -814,62 +814,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, } EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); -/* - * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet - * using a GPADL Direct packet type. - */ -int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, - struct hv_multipage_buffer *multi_pagebuffer, - void *buffer, u32 bufferlen, u64 requestid) -{ - struct vmbus_channel_packet_multipage_buffer desc; - u32 descsize; - u32 packetlen; - u32 packetlen_aligned; - struct kvec bufferlist[3]; - u64 aligned_data = 0; - u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, - multi_pagebuffer->len); - - if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT) - return -EINVAL; - - /* - * Adjust the size down since vmbus_channel_packet_multipage_buffer is - * the largest size we support - */ - descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) - - ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) * - sizeof(u64)); - packetlen = descsize + bufferlen; - packetlen_aligned = ALIGN(packetlen, sizeof(u64)); - - - /* Setup the descriptor */ - desc.type = VM_PKT_DATA_USING_GPA_DIRECT; - desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; - desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ - desc.length8 = (u16)(packetlen_aligned >> 3); - desc.transactionid = requestid; - desc.rangecount = 1; - - desc.range.len = multi_pagebuffer->len; - desc.range.offset = multi_pagebuffer->offset; - - memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array, - pfncount * sizeof(u64)); - - bufferlist[0].iov_base = &desc; - bufferlist[0].iov_len = descsize; - bufferlist[1].iov_base = buffer; - bufferlist[1].iov_len = bufferlen; - bufferlist[2].iov_base = &aligned_data; - bufferlist[2].iov_len = (packetlen_aligned - packetlen); - - return hv_ringbuffer_write(channel, bufferlist, 3); -} -EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); - /** * vmbus_recvpacket() - Retrieve the user packet on the specified channel * @channel: Pointer to vmbus_channel structure. diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index b7d7bbec74e0..39a080ce17da 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1052,12 +1052,6 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, u64 requestid, u32 flags); -extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, - struct hv_multipage_buffer *mpb, - void *buffer, - u32 bufferlen, - u64 requestid); - extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *mpb, u32 desc_size, -- cgit v1.2.3-55-g7522 From 5a668d8cddbe8bf14379ce110c49ca088a1e9fae Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 16 Aug 2017 08:56:25 -0700 Subject: vmbus: remove unused vmubs_sendpacket_pagebuffer_ctl The function vmbus_sendpacket_pagebuffer_ctl was never used directly. Just have vmbus_send_pagebuffer Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/hv/channel.c | 30 ++++++------------------------ drivers/net/hyperv/netvsc.c | 10 ++++------ include/linux/hyperv.h | 8 -------- 3 files changed, 10 insertions(+), 38 deletions(-) diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 756a1e841142..9223fe8823e0 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -702,16 +702,16 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, EXPORT_SYMBOL(vmbus_sendpacket); /* - * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer + * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer * packets using a GPADL Direct packet type. This interface allows you * to control notifying the host. This will be useful for sending * batched data. Also the sender can control the send flags * explicitly. */ -int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, void *buffer, u32 bufferlen, - u64 requestid, u32 flags) +int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, + struct hv_page_buffer pagebuffers[], + u32 pagecount, void *buffer, u32 bufferlen, + u64 requestid) { int i; struct vmbus_channel_packet_page_buffer desc; @@ -736,7 +736,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, /* Setup the descriptor */ desc.type = VM_PKT_DATA_USING_GPA_DIRECT; - desc.flags = flags; + desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ desc.length8 = (u16)(packetlen_aligned >> 3); desc.transactionid = requestid; @@ -757,24 +757,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, return hv_ringbuffer_write(channel, bufferlist, 3); } -EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); - -/* - * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer - * packets using a GPADL Direct packet type. - */ -int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, void *buffer, u32 bufferlen, - u64 requestid) -{ - u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; - - return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount, - buffer, bufferlen, - requestid, flags); - -} EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); /* diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 0530e7d729e1..6031102cbba3 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -775,12 +775,10 @@ static inline int netvsc_send_pkt( if (packet->cp_partial) pb += packet->rmsg_pgcnt; - ret = vmbus_sendpacket_pagebuffer_ctl(out_channel, - pb, packet->page_buf_cnt, - &nvmsg, - sizeof(struct nvsp_message), - req_id, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket_pagebuffer(out_channel, + pb, packet->page_buf_cnt, + &nvmsg, sizeof(nvmsg), + req_id); } else { ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, sizeof(struct nvsp_message), diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 39a080ce17da..9692592d43a3 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1044,14 +1044,6 @@ extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, u32 bufferlen, u64 requestid); -extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, - void *buffer, - u32 bufferlen, - u64 requestid, - u32 flags); - extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *mpb, u32 desc_size, -- cgit v1.2.3-55-g7522 From 5dd0fb9b9ffc0ef9b312d05604f4ad0fffc50505 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Wed, 16 Aug 2017 08:56:26 -0700 Subject: vmbus: remove unused vmbus_sendpacket_ctl The only usage of vmbus_sendpacket_ctl was by vmbus_sendpacket. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/hv/channel.c | 43 +++++++++++++++++-------------------------- drivers/net/hyperv/netvsc.c | 9 ++++----- include/linux/hyperv.h | 7 ------- 3 files changed, 21 insertions(+), 38 deletions(-) diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 9223fe8823e0..d9e9676e2b40 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -647,9 +647,23 @@ void vmbus_close(struct vmbus_channel *channel) } EXPORT_SYMBOL_GPL(vmbus_close); -int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, - u32 bufferlen, u64 requestid, - enum vmbus_packet_type type, u32 flags) +/** + * vmbus_sendpacket() - Send the specified buffer on the given channel + * @channel: Pointer to vmbus_channel structure. + * @buffer: Pointer to the buffer you want to receive the data into. + * @bufferlen: Maximum size of what the the buffer will hold + * @requestid: Identifier of the request + * @type: Type of packet that is being send e.g. negotiate, time + * packet etc. + * + * Sends data in @buffer directly to hyper-v via the vmbus + * This will send the data unparsed to hyper-v. + * + * Mainly used by Hyper-V drivers. + */ +int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, + u32 bufferlen, u64 requestid, + enum vmbus_packet_type type, u32 flags) { struct vmpacket_descriptor desc; u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen; @@ -676,29 +690,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer, return hv_ringbuffer_write(channel, bufferlist, num_vecs); } -EXPORT_SYMBOL(vmbus_sendpacket_ctl); - -/** - * vmbus_sendpacket() - Send the specified buffer on the given channel - * @channel: Pointer to vmbus_channel structure. - * @buffer: Pointer to the buffer you want to receive the data into. - * @bufferlen: Maximum size of what the the buffer will hold - * @requestid: Identifier of the request - * @type: Type of packet that is being send e.g. negotiate, time - * packet etc. - * - * Sends data in @buffer directly to hyper-v via the vmbus - * This will send the data unparsed to hyper-v. - * - * Mainly used by Hyper-V drivers. - */ -int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, - u32 bufferlen, u64 requestid, - enum vmbus_packet_type type, u32 flags) -{ - return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid, - type, flags); -} EXPORT_SYMBOL(vmbus_sendpacket); /* diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 6031102cbba3..0062b802676f 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -780,11 +780,10 @@ static inline int netvsc_send_pkt( &nvmsg, sizeof(nvmsg), req_id); } else { - ret = vmbus_sendpacket_ctl(out_channel, &nvmsg, - sizeof(struct nvsp_message), - req_id, - VM_PKT_DATA_INBAND, - VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); + ret = vmbus_sendpacket(out_channel, + &nvmsg, sizeof(nvmsg), + req_id, VM_PKT_DATA_INBAND, + VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); } if (ret == 0) { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 9692592d43a3..a5f961c4149e 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1030,13 +1030,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel, enum vmbus_packet_type type, u32 flags); -extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel, - void *buffer, - u32 bufferLen, - u64 requestid, - enum vmbus_packet_type type, - u32 flags); - extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, struct hv_page_buffer pagebuffers[], u32 pagecount, -- cgit v1.2.3-55-g7522 From 2af2c2c77e5a89a02f508571c3994d9e78f9013d Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Thu, 10 Aug 2017 18:11:25 +0530 Subject: mwifiex: do not use random MAC for pre-association scanning Driver should use random MAC address only if the scan is requested by user(provided NL80211_SCAN_FLAG_RANDOM_ADDR is set in scan request). It should not be used for a scan performed before association. Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index d8e8b857ddfb..79b4aff3509e 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2790,7 +2790,6 @@ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv, if (!scan_cfg) return -ENOMEM; - ether_addr_copy(scan_cfg->random_mac, priv->random_mac); scan_cfg->ssid_list = req_ssid; scan_cfg->num_ssids = 1; -- cgit v1.2.3-55-g7522 From 89001c1c8dc32698112df0119ad5c632405892ca Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Thu, 10 Aug 2017 18:11:26 +0530 Subject: mwifiex: check for NL80211_SCAN_FLAG_RANDOM_ADDR during hidden SSID scan At the end of user scan request, driver will perform an active scan for hidden SSIDs in passive channels. While doing this, driver unconditionally adding random_mac in scan command, which is no expected. It should add random_mac only if scan_request has NL80211_SCAN_FLAG_RANDOM_ADDR flag set. Signed-off-by: Ganapathi Bhat Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 79b4aff3509e..c9d41ed77fc7 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1947,7 +1947,8 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv) } adapter->active_scan_triggered = true; - ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); + if (priv->scan_request->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) + ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); user_scan_cfg->num_ssids = priv->scan_request->n_ssids; user_scan_cfg->ssid_list = priv->scan_request->ssids; -- cgit v1.2.3-55-g7522 From 0db63e37992cdf159ac1f27e0cbe0f0bae62ca3a Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:31 +0300 Subject: qtnfmac: remove unused qtnf_rx_frame declaration Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/bus.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h index dda05003d522..56e5fed92a2a 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/bus.h +++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h @@ -130,7 +130,6 @@ static __always_inline void qtnf_bus_unlock(struct qtnf_bus *bus) /* interface functions from common layer */ -void qtnf_rx_frame(struct device *dev, struct sk_buff *rxp); int qtnf_core_attach(struct qtnf_bus *bus); void qtnf_core_detach(struct qtnf_bus *bus); void qtnf_txflowblock(struct device *dev, bool state); -- cgit v1.2.3-55-g7522 From 7376947dfb8094b7c8307d61f49ce48d4d6aec09 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:32 +0300 Subject: qtnfmac: switch to napi_gro_receive Use napi_gro_receive() rather than netif_receive_skb() to improve performance when GRO is enabled. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index ae8acc1bf291..08b35dc30bc8 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -762,7 +762,7 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget) ndev->stats.rx_bytes += skb->len; skb->protocol = eth_type_trans(skb, ndev); - netif_receive_skb(skb); + napi_gro_receive(napi, skb); } else { pr_debug("drop untagged skb\n"); bus->mux_dev.stats.rx_dropped++; -- cgit v1.2.3-55-g7522 From c58730cab8ea02b9f09404d548de7eab7cc33302 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:33 +0300 Subject: qtnfmac: use __netdev_alloc_skb_ip_align Replace __dev_alloc_skb and explicit NET_IP_ALIGN alignment by built-in __netdev_alloc_skb_ip_align function. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 08b35dc30bc8..079aa1693ff5 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -429,8 +429,7 @@ static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index) struct sk_buff *skb; dma_addr_t paddr; - skb = __dev_alloc_skb(SKB_BUF_SIZE + NET_IP_ALIGN, - GFP_ATOMIC); + skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); if (!skb) { priv->rx_skb[rx_bd_index] = NULL; return -ENOMEM; @@ -438,8 +437,6 @@ static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index) priv->rx_skb[rx_bd_index] = skb; - skb_reserve(skb, NET_IP_ALIGN); - rxbd = &priv->rx_bd_vbase[rx_bd_index]; paddr = pci_map_single(priv->pdev, skb->data, -- cgit v1.2.3-55-g7522 From 867ba964fa6990686116b706919f90848912e7ed Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:34 +0300 Subject: qtnfmac: skb2rbd_attach cleanup Update PCIE_HDP_TX_HOST_Q_WR_PTR register in skb2rbd_attach as a part of procedure of passing new Rx buffer to hardware. Sync up all the the qtnf_rx_bd descriptor updates before passing Rx buffer to hardware. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- .../net/wireless/quantenna/qtnfmac/pearl/pcie.c | 31 +++++++++------------- 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 079aa1693ff5..a0b65d487ddb 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -411,11 +411,6 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16, PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base)); - priv->hw_txproc_wr_ptr = priv->rx_bd_num - rx_bd_reserved_param; - - writel(priv->hw_txproc_wr_ptr, - PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); - pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); priv->rx_bd_index = 0; @@ -423,7 +418,7 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) return 0; } -static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index) +static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index) { struct qtnf_rx_bd *rxbd; struct sk_buff *skb; @@ -431,13 +426,12 @@ static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index) skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); if (!skb) { - priv->rx_skb[rx_bd_index] = NULL; + priv->rx_skb[index] = NULL; return -ENOMEM; } - priv->rx_skb[rx_bd_index] = skb; - - rxbd = &priv->rx_bd_vbase[rx_bd_index]; + priv->rx_skb[index] = skb; + rxbd = &priv->rx_bd_vbase[index]; paddr = pci_map_single(priv->pdev, skb->data, SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); @@ -446,17 +440,20 @@ static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 rx_bd_index) return -ENOMEM; } - writel(QTN_HOST_LO32(paddr), - PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base)); - writel(QTN_HOST_HI32(paddr), - PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base)); - /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */ rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr)); rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr)); - rxbd->info = 0x0; + /* sync up all descriptor updates */ + wmb(); + + writel(QTN_HOST_HI32(paddr), + PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base)); + writel(QTN_HOST_LO32(paddr), + PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base)); + + writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); return 0; } @@ -787,8 +784,6 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget) break; } - writel(priv->hw_txproc_wr_ptr, - PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); } if (processed < budget) { -- cgit v1.2.3-55-g7522 From dfb13db68f3eaad21c9bbb9ad5e7df502a8deb83 Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:35 +0300 Subject: qtnfmac: decrease default Tx queue size Avoid extra buffering in driver by default. Use max hardware Tx queue size. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index a0b65d487ddb..f18e8a724c68 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -36,7 +36,7 @@ static bool use_msi = true; module_param(use_msi, bool, 0644); MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); -static unsigned int tx_bd_size_param = 256; +static unsigned int tx_bd_size_param = 32; module_param(tx_bd_size_param, uint, 0644); MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size"); -- cgit v1.2.3-55-g7522 From 3cbc3a0f19ac73e7711dcc1b6d1528035a5d9cdc Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:36 +0300 Subject: qtnfmac: switch to kernel circ_buf implementation Current code for both Rx and Tx queue management is a custom and incomplete circular buffer implementation. It makes a lot of sense to switch to kernel built-in circ_buf implementation. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- .../net/wireless/quantenna/qtnfmac/pearl/pcie.c | 206 +++++++++++++-------- .../quantenna/qtnfmac/pearl/pcie_bus_priv.h | 10 +- 2 files changed, 136 insertions(+), 80 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index f18e8a724c68..f8207ab25576 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "qtn_hw_ids.h" #include "pcie_bus_priv.h" @@ -44,10 +45,6 @@ static unsigned int rx_bd_size_param = 256; module_param(rx_bd_size_param, uint, 0644); MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size"); -static unsigned int rx_bd_reserved_param = 16; -module_param(rx_bd_reserved_param, uint, 0644); -MODULE_PARM_DESC(rx_bd_reserved_param, "Reserved RX descriptors"); - static u8 flashboot = 1; module_param(flashboot, byte, 0644); MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS"); @@ -392,9 +389,8 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); - priv->tx_bd_reclaim_start = 0; - priv->tx_bd_index = 0; - priv->tx_queue_len = 0; + priv->tx_bd_r_index = 0; + priv->tx_bd_w_index = 0; /* rx bd */ @@ -413,8 +409,6 @@ static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); - priv->rx_bd_index = 0; - return 0; } @@ -445,6 +439,8 @@ static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index) rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr)); rxbd->info = 0x0; + priv->rx_bd_w_index = index; + /* sync up all descriptor updates */ wmb(); @@ -510,6 +506,8 @@ static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv) priv->tx_bd_num = tx_bd_size_param; priv->rx_bd_num = rx_bd_size_param; + priv->rx_bd_w_index = 0; + priv->rx_bd_r_index = 0; ret = alloc_skb_array(priv); if (ret) { @@ -532,67 +530,69 @@ static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv) return ret; } -static int qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) +static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) { struct qtnf_tx_bd *txbd; struct sk_buff *skb; dma_addr_t paddr; - int last_sent; - int count; + u32 tx_done_index; + int count = 0; int i; - last_sent = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) - % priv->tx_bd_num; - i = priv->tx_bd_reclaim_start; - count = 0; - while (i != last_sent) { - skb = priv->tx_skb[i]; - if (!skb) - break; + tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) + & (priv->tx_bd_num - 1); - txbd = &priv->tx_bd_vbase[i]; - paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), - le32_to_cpu(txbd->addr)); - pci_unmap_single(priv->pdev, paddr, skb->len, PCI_DMA_TODEVICE); + i = priv->tx_bd_r_index; - if (skb->dev) { - skb->dev->stats.tx_packets++; - skb->dev->stats.tx_bytes += skb->len; + while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) { + skb = priv->tx_skb[i]; + if (likely(skb)) { + txbd = &priv->tx_bd_vbase[i]; + paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), + le32_to_cpu(txbd->addr)); + pci_unmap_single(priv->pdev, paddr, skb->len, + PCI_DMA_TODEVICE); + + if (skb->dev) { + skb->dev->stats.tx_packets++; + skb->dev->stats.tx_bytes += skb->len; + + if (netif_queue_stopped(skb->dev)) + netif_wake_queue(skb->dev); + } - if (netif_queue_stopped(skb->dev)) - netif_wake_queue(skb->dev); + dev_kfree_skb_any(skb); } - dev_kfree_skb_any(skb); priv->tx_skb[i] = NULL; - priv->tx_queue_len--; count++; if (++i >= priv->tx_bd_num) i = 0; } - priv->tx_bd_reclaim_start = i; priv->tx_reclaim_done += count; priv->tx_reclaim_req++; + priv->tx_bd_r_index = i; - return count; } -static bool qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) +static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) { - if (priv->tx_queue_len >= priv->tx_bd_num - 1) { + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { pr_err_ratelimited("reclaim full Tx queue\n"); qtnf_pcie_data_tx_reclaim(priv); - if (priv->tx_queue_len >= priv->tx_bd_num - 1) { + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { priv->tx_full_count++; - return false; + return 0; } } - return true; + return 1; } static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) @@ -617,7 +617,7 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) return NETDEV_TX_BUSY; } - i = priv->tx_bd_index; + i = priv->tx_bd_w_index; priv->tx_skb[i] = skb; len = skb->len; @@ -649,8 +649,7 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) if (++i >= priv->tx_bd_num) i = 0; - priv->tx_bd_index = i; - priv->tx_queue_len++; + priv->tx_bd_w_index = i; tx_done: if (ret && skb) { @@ -709,16 +708,19 @@ irq_done: return IRQ_HANDLED; } -static inline void hw_txproc_wr_ptr_inc(struct qtnf_pcie_bus_priv *priv) +static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv) { - u32 index; + u16 index = priv->rx_bd_r_index; + struct qtnf_rx_bd *rxbd; + u32 descw; - index = priv->hw_txproc_wr_ptr; + rxbd = &priv->rx_bd_vbase[index]; + descw = le32_to_cpu(rxbd->info); - if (++index >= priv->rx_bd_num) - index = 0; + if (descw & QTN_TXDONE_MASK) + return 1; - priv->hw_txproc_wr_ptr = index; + return 0; } static int qtnf_rx_poll(struct napi_struct *napi, int budget) @@ -730,26 +732,52 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget) int processed = 0; struct qtnf_rx_bd *rxbd; dma_addr_t skb_paddr; + int consume; u32 descw; - u16 index; + u32 psize; + u16 r_idx; + u16 w_idx; int ret; - index = priv->rx_bd_index; - rxbd = &priv->rx_bd_vbase[index]; + while (processed < budget) { - descw = le32_to_cpu(rxbd->info); - while ((descw & QTN_TXDONE_MASK) && (processed < budget)) { - skb = priv->rx_skb[index]; + if (!qtnf_rx_data_ready(priv)) + goto rx_out; - if (likely(skb)) { - skb_put(skb, QTN_GET_LEN(descw)); + r_idx = priv->rx_bd_r_index; + rxbd = &priv->rx_bd_vbase[r_idx]; + descw = le32_to_cpu(rxbd->info); + + skb = priv->rx_skb[r_idx]; + psize = QTN_GET_LEN(descw); + consume = 1; + if (!(descw & QTN_TXDONE_MASK)) { + pr_warn("skip invalid rxbd[%d]\n", r_idx); + consume = 0; + } + + if (!skb) { + pr_warn("skip missing rx_skb[%d]\n", r_idx); + consume = 0; + } + + if (skb && (skb_tailroom(skb) < psize)) { + pr_err("skip packet with invalid length: %u > %u\n", + psize, skb_tailroom(skb)); + consume = 0; + } + + if (skb) { skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), le32_to_cpu(rxbd->addr)); pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); + } + if (consume) { + skb_put(skb, psize); ndev = qtnf_classify_skb(bus, skb); if (likely(ndev)) { ndev->stats.rx_packets++; @@ -762,30 +790,38 @@ static int qtnf_rx_poll(struct napi_struct *napi, int budget) bus->mux_dev.stats.rx_dropped++; dev_kfree_skb_any(skb); } - - processed++; } else { - pr_err("missing rx_skb[%d]\n", index); + if (skb) { + bus->mux_dev.stats.rx_dropped++; + dev_kfree_skb_any(skb); + } } - /* attached rx buffer is passed upstream: map a new one */ - ret = skb2rbd_attach(priv, index); - if (likely(!ret)) { - if (++index >= priv->rx_bd_num) - index = 0; + priv->rx_skb[r_idx] = NULL; + if (++r_idx >= priv->rx_bd_num) + r_idx = 0; - priv->rx_bd_index = index; - hw_txproc_wr_ptr_inc(priv); + priv->rx_bd_r_index = r_idx; - rxbd = &priv->rx_bd_vbase[index]; - descw = le32_to_cpu(rxbd->info); - } else { - pr_err("failed to allocate new rx_skb[%d]\n", index); - break; + /* repalce processed buffer by a new one */ + w_idx = priv->rx_bd_w_index; + while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num) > 0) { + if (++w_idx >= priv->rx_bd_num) + w_idx = 0; + + ret = skb2rbd_attach(priv, w_idx); + if (ret) { + pr_err("failed to allocate new rx_skb[%d]\n", + w_idx); + break; + } } + processed++; } +rx_out: if (processed < budget) { napi_complete(napi); qtnf_en_rxdone_irq(priv); @@ -1056,10 +1092,18 @@ static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) { struct qtnf_bus *bus = dev_get_drvdata(s->private); struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); + u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base)); + u32 status; seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count); seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count); + status = reg & PCIE_HDP_INT_TX_BITS; + seq_printf(s, "pcie_irq_tx_status(%s)\n", + (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count); + status = reg & PCIE_HDP_INT_RX_BITS; + seq_printf(s, "pcie_irq_rx_status(%s)\n", + (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); return 0; } @@ -1073,10 +1117,24 @@ static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); - seq_printf(s, "tx_bd_reclaim_start(%u)\n", priv->tx_bd_reclaim_start); - seq_printf(s, "tx_bd_index(%u)\n", priv->tx_bd_index); - seq_printf(s, "rx_bd_index(%u)\n", priv->rx_bd_index); - seq_printf(s, "tx_queue_len(%u)\n", priv->tx_queue_len); + + seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); + seq_printf(s, "tx_bd_p_index(%u)\n", + readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) + & (priv->tx_bd_num - 1)); + seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); + seq_printf(s, "tx queue len(%u)\n", + CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)); + + seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); + seq_printf(s, "rx_bd_p_index(%u)\n", + readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base)) + & (priv->rx_bd_num - 1)); + seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); + seq_printf(s, "rx alloc queue len(%u)\n", + CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num)); return 0; } diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h index 2a897db2bd79..1b37914299e9 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h @@ -66,13 +66,11 @@ struct qtnf_pcie_bus_priv { void *bd_table_vaddr; u32 bd_table_len; - u32 hw_txproc_wr_ptr; + u32 rx_bd_w_index; + u32 rx_bd_r_index; - u16 tx_bd_reclaim_start; - u16 tx_bd_index; - u32 tx_queue_len; - - u16 rx_bd_index; + u32 tx_bd_w_index; + u32 tx_bd_r_index; u32 pcie_irq_mask; -- cgit v1.2.3-55-g7522 From cc75f9e5bc66dd63ff5770c24f0e34a69177ddeb Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:37 +0300 Subject: qtnfmac: introduce counter for Rx underflow events Introduce counter for Rx underflow events. Export this counter via debugfs. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 16 ++++++++++++++-- .../net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h | 1 + drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h | 1 + .../wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h | 1 + 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index f8207ab25576..72730aff2a41 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -686,14 +686,21 @@ static irqreturn_t qtnf_interrupt(int irq, void *data) if (!(status & priv->pcie_irq_mask)) goto irq_done; - if (status & PCIE_HDP_INT_RX_BITS) { + if (status & PCIE_HDP_INT_RX_BITS) priv->pcie_irq_rx_count++; + + if (status & PCIE_HDP_INT_TX_BITS) + priv->pcie_irq_tx_count++; + + if (status & PCIE_HDP_INT_HHBM_UF) + priv->pcie_irq_uf_count++; + + if (status & PCIE_HDP_INT_RX_BITS) { qtnf_dis_rxdone_irq(priv); napi_schedule(&bus->mux_napi); } if (status & PCIE_HDP_INT_TX_BITS) { - priv->pcie_irq_tx_count++; qtnf_dis_txdone_irq(priv); tasklet_hi_schedule(&priv->reclaim_tq); } @@ -1104,6 +1111,10 @@ static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) status = reg & PCIE_HDP_INT_RX_BITS; seq_printf(s, "pcie_irq_rx_status(%s)\n", (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); + seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count); + status = reg & PCIE_HDP_INT_HHBM_UF; + seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", + (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); return 0; } @@ -1189,6 +1200,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) pcie_priv->pcie_irq_count = 0; pcie_priv->pcie_irq_rx_count = 0; pcie_priv->pcie_irq_tx_count = 0; + pcie_priv->pcie_irq_uf_count = 0; pcie_priv->tx_reclaim_done = 0; pcie_priv->tx_reclaim_req = 0; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h index 1b37914299e9..698e42132ed4 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h @@ -78,6 +78,7 @@ struct qtnf_pcie_bus_priv { u32 pcie_irq_count; u32 pcie_irq_rx_count; u32 pcie_irq_tx_count; + u32 pcie_irq_uf_count; u32 tx_full_count; u32 tx_done_count; u32 tx_reclaim_done; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h index e00d508fbcf0..667f5ec457e3 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h @@ -50,6 +50,7 @@ #define PCIE_HDP_INT_RX_BITS (0 \ | PCIE_HDP_INT_EP_TXDMA \ | PCIE_HDP_INT_EP_TXEMPTY \ + | PCIE_HDP_INT_HHBM_UF \ ) #define PCIE_HDP_INT_TX_BITS (0 \ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h index 78715b8a8ef9..69696f118769 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h @@ -333,6 +333,7 @@ #define PCIE_HDP_INT_RX_LEN_ERR (BIT(2)) #define PCIE_HDP_INT_RX_HDR_LEN_ERR (BIT(3)) #define PCIE_HDP_INT_EP_TXDMA (BIT(12)) +#define PCIE_HDP_INT_HHBM_UF (BIT(13)) #define PCIE_HDP_INT_EP_TXEMPTY (BIT(15)) #define PCIE_HDP_INT_IPC (BIT(29)) -- cgit v1.2.3-55-g7522 From 0593da274d4d7fec6edc8a5ad6993e88ab36c8be Mon Sep 17 00:00:00 2001 From: Sergey Matyukevich Date: Tue, 15 Aug 2017 16:06:38 +0300 Subject: qtnfmac: modify tx reclaim locking Perform additional reclaim from qtnf_pcie_data_tx. Lock tx_lock serves only reclaim synchronization purposes. Rename it accordingly and improve granularity moving this lock to qtnf_pcie_data_tx_reclaim. Signed-off-by: Sergey Matyukevich Signed-off-by: Kalle Valo --- drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | 17 ++++++----------- .../wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h | 4 ++-- 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 72730aff2a41..cd2f2b667643 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c @@ -534,11 +534,13 @@ static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) { struct qtnf_tx_bd *txbd; struct sk_buff *skb; + unsigned long flags; dma_addr_t paddr; u32 tx_done_index; int count = 0; int i; + spin_lock_irqsave(&priv->tx_reclaim_lock, flags); tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) & (priv->tx_bd_num - 1); @@ -576,6 +578,7 @@ static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) priv->tx_reclaim_req++; priv->tx_bd_r_index = i; + spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags); } static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) @@ -600,20 +603,14 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); dma_addr_t txbd_paddr, skb_paddr; struct qtnf_tx_bd *txbd; - unsigned long flags; int len, i; u32 info; int ret = 0; - spin_lock_irqsave(&priv->tx_lock, flags); - - priv->tx_done_count++; - if (!qtnf_tx_queue_ready(priv)) { if (skb->dev) netif_stop_queue(skb->dev); - spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_BUSY; } @@ -659,7 +656,8 @@ tx_done: dev_kfree_skb_any(skb); } - spin_unlock_irqrestore(&priv->tx_lock, flags); + qtnf_pcie_data_tx_reclaim(priv); + priv->tx_done_count++; return NETDEV_TX_OK; } @@ -1067,11 +1065,8 @@ static int qtnf_bringup_fw(struct qtnf_bus *bus) static void qtnf_reclaim_tasklet_fn(unsigned long data) { struct qtnf_pcie_bus_priv *priv = (void *)data; - unsigned long flags; - spin_lock_irqsave(&priv->tx_lock, flags); qtnf_pcie_data_tx_reclaim(priv); - spin_unlock_irqrestore(&priv->tx_lock, flags); qtnf_en_txdone_irq(priv); } @@ -1192,7 +1187,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) init_completion(&bus->request_firmware_complete); mutex_init(&bus->bus_lock); spin_lock_init(&pcie_priv->irq_lock); - spin_lock_init(&pcie_priv->tx_lock); + spin_lock_init(&pcie_priv->tx_reclaim_lock); /* init stats */ pcie_priv->tx_full_count = 0; diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h index 698e42132ed4..e76a23716ee0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h @@ -32,8 +32,8 @@ struct qtnf_pcie_bus_priv { /* lock for irq configuration changes */ spinlock_t irq_lock; - /* lock for tx operations */ - spinlock_t tx_lock; + /* lock for tx reclaim operations */ + spinlock_t tx_reclaim_lock; u8 msi_enabled; int mps; -- cgit v1.2.3-55-g7522 From 6da1e00a539d978339d6a6471b4346c6d82797ff Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:08 +0530 Subject: rsi: advertise ap mode support AP mode support is advertised to cfg80211. Necessary wiphy parameters are initialized. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 28 +++++++++++++++++++++++++++- drivers/net/wireless/rsi/rsi_main.h | 10 ++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 210ad79038ed..2da54932070d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -122,6 +122,23 @@ const u16 rsi_mcsrates[8] = { RSI_RATE_MCS4, RSI_RATE_MCS5, RSI_RATE_MCS6, RSI_RATE_MCS7 }; +static const u32 rsi_max_ap_stas[16] = { + 32, /* 1 - Wi-Fi alone */ + 0, /* 2 */ + 0, /* 3 */ + 0, /* 4 - BT EDR alone */ + 4, /* 5 - STA + BT EDR */ + 32, /* 6 - AP + BT EDR */ + 0, /* 7 */ + 0, /* 8 - BT LE alone */ + 4, /* 9 - STA + BE LE */ + 0, /* 10 */ + 0, /* 11 */ + 0, /* 12 */ + 1, /* 13 - STA + BT Dual */ + 4, /* 14 - AP + BT Dual */ +}; + /** * rsi_is_cipher_wep() - This function determines if the cipher is WEP or not. * @common: Pointer to the driver private structure. @@ -1348,7 +1365,8 @@ int rsi_mac80211_attach(struct rsi_common *common) SET_IEEE80211_PERM_ADDR(hw, common->mac_addr); ether_addr_copy(hw->wiphy->addr_mask, addr_mask); - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP); wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->retry_short = RETRY_SHORT; wiphy->retry_long = RETRY_LONG; @@ -1363,6 +1381,14 @@ int rsi_mac80211_attach(struct rsi_common *common) wiphy->bands[NL80211_BAND_5GHZ] = &adapter->sbands[NL80211_BAND_5GHZ]; + /* AP Parameters */ + wiphy->max_ap_assoc_sta = rsi_max_ap_stas[common->oper_mode - 1]; + common->max_stations = wiphy->max_ap_assoc_sta; + rsi_dbg(ERR_ZONE, "Max Stations Allowed = %d\n", common->max_stations); + hw->sta_data_size = sizeof(struct rsi_sta); + wiphy->flags = WIPHY_FLAG_REPORTS_OBSS; + wiphy->flags |= WIPHY_FLAG_AP_UAPSD; + wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER; wiphy->reg_notifier = rsi_reg_notify; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index d05b5e0847bc..0077888190c3 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -21,6 +21,13 @@ #include #include +struct rsi_sta { + struct ieee80211_sta *sta; + s16 sta_id; + u16 seq_start[IEEE80211_NUM_TIDS]; + bool start_tx_aggr[IEEE80211_NUM_TIDS]; +}; + struct rsi_hw; #include "rsi_ps.h" @@ -253,6 +260,9 @@ struct rsi_common { u16 beacon_interval; u8 dtim_cnt; + + /* AP mode parameters */ + int max_stations; }; enum host_intf { -- cgit v1.2.3-55-g7522 From 03c34c0d73ae872e6e962e02e3c4e98ae364379b Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:09 +0530 Subject: rsi: add interface changes for ap mode AP mode is handled in add_interface callback of mac80211. Also for AP mode, sending rx filter frame to disallow beacons to host is added. Station structures are initialized to NULL. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 43 +++++++++++++++++++++++------ drivers/net/wireless/rsi/rsi_91x_mgmt.c | 5 ++-- drivers/net/wireless/rsi/rsi_main.h | 3 ++ drivers/net/wireless/rsi/rsi_mgmt.h | 6 ++-- 4 files changed, 44 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 2da54932070d..edcba567bfe8 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -342,25 +342,51 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + enum opmode intf_mode; int ret = -EOPNOTSUPP; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; mutex_lock(&common->mutex); + + if (adapter->sc_nvifs > 1) { + mutex_unlock(&common->mutex); + return -EOPNOTSUPP; + } + switch (vif->type) { case NL80211_IFTYPE_STATION: - if (!adapter->sc_nvifs) { - ++adapter->sc_nvifs; - adapter->vifs[0] = vif; - ret = rsi_set_vap_capabilities(common, - STA_OPMODE, - VAP_ADD); - } + rsi_dbg(INFO_ZONE, "Station Mode"); + intf_mode = STA_OPMODE; + break; + case NL80211_IFTYPE_AP: + rsi_dbg(INFO_ZONE, "AP Mode"); + intf_mode = AP_OPMODE; break; default: rsi_dbg(ERR_ZONE, "%s: Interface type %d not supported\n", __func__, vif->type); + goto out; } + + adapter->vifs[adapter->sc_nvifs++] = vif; + ret = rsi_set_vap_capabilities(common, intf_mode, common->mac_addr, + 0, VAP_ADD); + if (ret) { + rsi_dbg(ERR_ZONE, "Failed to set VAP capabilities\n"); + goto out; + } + + if (vif->type == NL80211_IFTYPE_AP) { + int i; + + rsi_send_rx_filter_frame(common, DISALLOW_BEACONS); + common->min_rate = RSI_RATE_AUTO; + for (i = 0; i < common->max_stations; i++) + common->stations[i].sta = NULL; + } + +out: mutex_unlock(&common->mutex); return ret; @@ -383,7 +409,8 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw, mutex_lock(&common->mutex); if (vif->type == NL80211_IFTYPE_STATION) { adapter->sc_nvifs--; - rsi_set_vap_capabilities(common, STA_OPMODE, VAP_DELETE); + rsi_set_vap_capabilities(common, STA_OPMODE, vif->addr, + 0, VAP_DELETE); } if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif))) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index f93499d0b8fa..233a418555bb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -624,6 +624,8 @@ static int rsi_program_bb_rf(struct rsi_common *common) */ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, + u8 *mac_addr, + u8 vap_id, u8 vap_status) { struct sk_buff *skb = NULL; @@ -632,7 +634,6 @@ int rsi_set_vap_capabilities(struct rsi_common *common, struct ieee80211_hw *hw = adapter->hw; struct ieee80211_conf *conf = &hw->conf; u16 frame_len = sizeof(struct rsi_vap_caps); - u16 vap_id = 0; rsi_dbg(MGMT_TX_ZONE, "%s: Sending VAP capabilities frame\n", __func__); @@ -656,7 +657,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, vap_caps->radioid_macid = ((common->mac_id & 0xf) << 4) | (common->radio_id & 0xf); - memcpy(vap_caps->mac_addr, common->mac_addr, IEEE80211_ADDR_LEN); + memcpy(vap_caps->mac_addr, mac_addr, IEEE80211_ADDR_LEN); vap_caps->keep_alive_period = cpu_to_le16(90); vap_caps->frag_threshold = cpu_to_le16(IEEE80211_MAX_FRAG_THRESHOLD); diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 0077888190c3..9f5f33f616b6 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -90,6 +90,7 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define IEEE80211_MGMT_FRAME 0x00 #define IEEE80211_CTL_FRAME 0x04 +#define RSI_MAX_ASSOC_STAS 32 #define IEEE80211_QOS_TID 0x0f #define IEEE80211_NONQOS_TID 16 @@ -262,6 +263,8 @@ struct rsi_common { u8 dtim_cnt; /* AP mode parameters */ + struct rsi_sta stations[RSI_MAX_ASSOC_STAS + 1]; + int num_stations; int max_stations; }; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 201a46572c69..9093ba685fb0 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -188,8 +188,8 @@ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) enum opmode { - STA_OPMODE = 1, - AP_OPMODE = 2 + AP_OPMODE = 0, + STA_OPMODE, }; enum vap_status { @@ -591,7 +591,7 @@ static inline void rsi_set_len_qno(__le16 *addr, u16 len, u8 qno) int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg); int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, - u8 vap_status); + u8 *mac_addr, u8 vap_id, u8 vap_status); int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, u16 ssn, u8 buf_size, u8 event); int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, -- cgit v1.2.3-55-g7522 From 75ca0049aad68136c8f673013792063dde530810 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:10 +0530 Subject: rsi: remove interface changes for AP mode remove_interface callback of mac80211 is handled for AP mode. Same is notified to firmware through vap_capabilities frame with VAP status VAP_DELETE. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index edcba567bfe8..99446bbc0516 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -405,13 +405,31 @@ static void rsi_mac80211_remove_interface(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + enum opmode opmode; + + rsi_dbg(INFO_ZONE, "Remove Interface Called\n"); mutex_lock(&common->mutex); - if (vif->type == NL80211_IFTYPE_STATION) { - adapter->sc_nvifs--; - rsi_set_vap_capabilities(common, STA_OPMODE, vif->addr, - 0, VAP_DELETE); + + if (adapter->sc_nvifs <= 0) { + mutex_unlock(&common->mutex); + return; + } + + switch (vif->type) { + case NL80211_IFTYPE_STATION: + opmode = STA_OPMODE; + break; + case NL80211_IFTYPE_AP: + opmode = AP_OPMODE; + break; + default: + mutex_unlock(&common->mutex); + return; } + rsi_set_vap_capabilities(common, opmode, vif->addr, + 0, VAP_DELETE); + adapter->sc_nvifs--; if (!memcmp(adapter->vifs[0], vif, sizeof(struct ieee80211_vif))) adapter->vifs[0] = NULL; -- cgit v1.2.3-55-g7522 From d26a9559403c7c3ec3b430f5825bc22c3d40abdb Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:11 +0530 Subject: rsi: add beacon changes for AP mode Mac80211 config parameter BEACON_ENABLE is handled. When VAP capabilities frame with AP mode is configured to firmware, beacon events start coming to host at each PreTBTT. At this time, beacon is taken from mac80211, descriptor is prepared and send to firmware. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_core.c | 15 +++++-- drivers/net/wireless/rsi/rsi_91x_hal.c | 65 ++++++++++++++++++++++++++++- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 12 ++++++ drivers/net/wireless/rsi/rsi_91x_mgmt.c | 48 ++++++++++++++++++--- drivers/net/wireless/rsi/rsi_hal.h | 2 + drivers/net/wireless/rsi/rsi_main.h | 13 +++--- drivers/net/wireless/rsi/rsi_mgmt.h | 11 +++++ 7 files changed, 152 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 88a1a56a20ab..6cfda8626cfe 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -16,6 +16,7 @@ #include "rsi_mgmt.h" #include "rsi_common.h" +#include "rsi_hal.h" /** * rsi_determine_min_weight_queue() - This function determines the queue with @@ -136,6 +137,10 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common) u8 q_num = INVALID_QUEUE; u8 ii = 0; + if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) { + q_num = MGMT_BEACON_Q; + return q_num; + } if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { if (!common->mgmt_q_block) q_num = MGMT_SOFT_Q; @@ -291,10 +296,14 @@ void rsi_core_qos_processor(struct rsi_common *common) break; } - if (q_num == MGMT_SOFT_Q) + if (q_num == MGMT_SOFT_Q) { status = rsi_send_mgmt_pkt(common, skb); - else + } else if (q_num == MGMT_BEACON_Q) { + status = rsi_send_pkt_to_bus(common, skb); + dev_kfree_skb(skb); + } else { status = rsi_send_data_pkt(common, skb); + } if (status) { mutex_unlock(&common->tx_lock); @@ -358,7 +367,7 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) tx_params->sta_id = 0; } - if ((q_num != MGMT_SOFT_Q) && + if ((q_num < MGMT_SOFT_Q) && ((skb_queue_len(&common->tx_queue[q_num]) + 1) >= DATA_QUEUE_WATER_MARK)) { rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__); diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 4addcc0826db..1ed73320e19f 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -25,7 +25,15 @@ static struct ta_metadata metadata_flash_content[] = { {"rsi/rs9113_wlan_qspi.rps", 0x00010000}, }; -/*This function prepares descriptor for given management packet*/ +int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = common->priv; + int status; + + status = adapter->host_intf_ops->write_pkt(common->priv, + skb->data, skb->len); + return status; +} static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) { @@ -306,6 +314,61 @@ err: return status; } +int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb) +{ + struct rsi_hw *adapter = (struct rsi_hw *)common->priv; + struct rsi_data_desc *bcn_frm; + struct ieee80211_hw *hw = common->priv->hw; + struct ieee80211_conf *conf = &hw->conf; + struct sk_buff *mac_bcn; + u8 vap_id = 0; + u16 tim_offset; + + mac_bcn = ieee80211_beacon_get_tim(adapter->hw, + adapter->vifs[adapter->sc_nvifs - 1], + &tim_offset, NULL); + if (!mac_bcn) { + rsi_dbg(ERR_ZONE, "Failed to get beacon from mac80211\n"); + return -EINVAL; + } + + common->beacon_cnt++; + bcn_frm = (struct rsi_data_desc *)skb->data; + rsi_set_len_qno(&bcn_frm->len_qno, mac_bcn->len, RSI_WIFI_DATA_Q); + bcn_frm->header_len = MIN_802_11_HDR_LEN; + bcn_frm->frame_info = cpu_to_le16(RSI_DATA_DESC_MAC_BBP_INFO | + RSI_DATA_DESC_NO_ACK_IND | + RSI_DATA_DESC_BEACON_FRAME | + RSI_DATA_DESC_INSERT_TSF | + RSI_DATA_DESC_INSERT_SEQ_NO | + RATE_INFO_ENABLE); + bcn_frm->rate_info = cpu_to_le16(vap_id << 14); + bcn_frm->qid_tid = BEACON_HW_Q; + + if (conf_is_ht40_plus(conf)) { + bcn_frm->bbp_info = cpu_to_le16(LOWER_20_ENABLE); + bcn_frm->bbp_info |= cpu_to_le16(LOWER_20_ENABLE >> 12); + } else if (conf_is_ht40_minus(conf)) { + bcn_frm->bbp_info = cpu_to_le16(UPPER_20_ENABLE); + bcn_frm->bbp_info |= cpu_to_le16(UPPER_20_ENABLE >> 12); + } + + if (common->band == NL80211_BAND_2GHZ) + bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_1); + else + bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_6); + + if (mac_bcn->data[tim_offset + 2] == 0) + bcn_frm->frame_info |= cpu_to_le16(RSI_DATA_DESC_DTIM_BEACON); + + memcpy(&skb->data[FRAME_DESC_SZ], mac_bcn->data, mac_bcn->len); + skb_put(skb, mac_bcn->len + FRAME_DESC_SZ); + + dev_kfree_skb(mac_bcn); + + return 0; +} + static void bl_cmd_timeout(unsigned long priv) { struct rsi_hw *adapter = (struct rsi_hw *)priv; diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 99446bbc0516..6038a2fc9eda 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -652,6 +652,18 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, common->cqm_info.rssi_thold, common->cqm_info.rssi_hyst); } + + if ((changed & BSS_CHANGED_BEACON_ENABLED) && + (vif->type == NL80211_IFTYPE_AP)) { + if (bss->enable_beacon) { + rsi_dbg(INFO_ZONE, "===> BEACON ENABLED <===\n"); + common->beacon_enabled = 1; + } else { + rsi_dbg(INFO_ZONE, "===> BEACON DISABLED <===\n"); + common->beacon_enabled = 0; + } + } + mutex_unlock(&common->mutex); } diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 233a418555bb..e47fc0d96475 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -18,6 +18,7 @@ #include "rsi_mgmt.h" #include "rsi_common.h" #include "rsi_ps.h" +#include "rsi_hal.h" static struct bootup_params boot_params_20 = { .magic_number = cpu_to_le16(0x5aa5), @@ -1518,6 +1519,31 @@ int rsi_set_antenna(struct rsi_common *common, u8 antenna) return rsi_send_internal_mgmt_frame(common, skb); } +static int rsi_send_beacon(struct rsi_common *common) +{ + struct sk_buff *skb = NULL; + u8 dword_align_bytes = 0; + + skb = dev_alloc_skb(MAX_MGMT_PKT_SIZE); + if (!skb) + return -ENOMEM; + + memset(skb->data, 0, MAX_MGMT_PKT_SIZE); + + dword_align_bytes = ((unsigned long)skb->data & 0x3f); + if (dword_align_bytes) + skb_pull(skb, (64 - dword_align_bytes)); + if (rsi_prepare_beacon(common, skb)) { + rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n"); + return -EINVAL; + } + skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb); + rsi_set_event(&common->tx_thread.event); + rsi_dbg(DATA_TX_ZONE, "%s: Added to beacon queue\n", __func__); + + return 0; +} + /** * rsi_handle_ta_confirm_type() - This function handles the confirm frames. * @common: Pointer to the driver private structure. @@ -1722,21 +1748,33 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg) rsi_dbg(FSM_ZONE, "%s: Msg Len: %d, Msg Type: %4x\n", __func__, msg_len, msg_type); - if (msg_type == TA_CONFIRM_TYPE) { + switch (msg_type) { + case TA_CONFIRM_TYPE: return rsi_handle_ta_confirm_type(common, msg); - } else if (msg_type == CARD_READY_IND) { + case CARD_READY_IND: rsi_dbg(FSM_ZONE, "%s: Card ready indication received\n", __func__); return rsi_handle_card_ready(common, msg); - } else if (msg_type == TX_STATUS_IND) { + case TX_STATUS_IND: if (msg[15] == PROBEREQ_CONFIRM) { common->mgmt_q_block = false; rsi_dbg(FSM_ZONE, "%s: Probe confirm received\n", __func__); } - } else if (msg_type == RX_DOT11_MGMT) { + break; + case BEACON_EVENT_IND: + rsi_dbg(INFO_ZONE, "Beacon event\n"); + if (common->fsm_state != FSM_MAC_INIT_DONE) + return -1; + if (common->iface_down) + return -1; + if (!common->beacon_enabled) + return -1; + rsi_send_beacon(common); + break; + case RX_DOT11_MGMT: return rsi_mgmt_pkt_to_core(common, msg, msg_len); - } else { + default: rsi_dbg(INFO_ZONE, "Received packet type: 0x%x\n", msg_type); } return 0; diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 00c6a0c5a891..297f4ce2c39e 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -144,5 +144,7 @@ struct rsi_data_desc { } __packed; int rsi_hal_device_init(struct rsi_hw *adapter); +int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb); +int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 9f5f33f616b6..169e2f9e3c5e 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -72,7 +72,7 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...); #define MULTICAST_WATER_MARK 200 #define MAC_80211_HDR_FRAME_CONTROL 0 #define WME_NUM_AC 4 -#define NUM_SOFT_QUEUES 5 +#define NUM_SOFT_QUEUES 6 #define MAX_HW_QUEUES 12 #define INVALID_QUEUE 0xff #define MAX_CONTINUOUS_VO_PKTS 8 @@ -131,7 +131,8 @@ enum edca_queue { BE_Q, VI_Q, VO_Q, - MGMT_SOFT_Q + MGMT_SOFT_Q, + MGMT_BEACON_Q }; struct security_info { @@ -148,8 +149,8 @@ struct wmm_qinfo { }; struct transmit_q_stats { - u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 1]; - u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 1]; + u32 total_tx_pkt_send[NUM_EDCA_QUEUES + 2]; + u32 total_tx_pkt_freed[NUM_EDCA_QUEUES + 2]; }; struct vif_priv { @@ -199,7 +200,7 @@ struct rsi_common { struct version_info fw_ver; struct rsi_thread tx_thread; - struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 1]; + struct sk_buff_head tx_queue[NUM_EDCA_QUEUES + 2]; /* Mutex declaration */ struct mutex mutex; /* Mutex used for tx thread */ @@ -263,6 +264,8 @@ struct rsi_common { u8 dtim_cnt; /* AP mode parameters */ + u8 beacon_enabled; + u16 beacon_cnt; struct rsi_sta stations[RSI_MAX_ASSOC_STAS + 1]; int num_stations; int max_stations; diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 9093ba685fb0..a00aa10b1be5 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -49,6 +49,7 @@ #define TA_CONFIRM_TYPE 0x01 #define RX_DOT11_MGMT 0x02 #define TX_STATUS_IND 0x04 +#define BEACON_EVENT_IND 0x08 #define PROBEREQ_CONFIRM 2 #define CARD_READY_IND 0x00 @@ -187,6 +188,16 @@ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) +#define RSI_DATA_DESC_MAC_BBP_INFO BIT(0) +#define RSI_DATA_DESC_NO_ACK_IND BIT(9) +#define RSI_DATA_DESC_QOS_EN BIT(12) +#define RSI_DATA_DESC_NORMAL_FRAME 0x00 +#define RSI_DATA_DESC_DTIM_BEACON_GATED_FRAME BIT(10) +#define RSI_DATA_DESC_BEACON_FRAME BIT(11) +#define RSI_DATA_DESC_DTIM_BEACON (BIT(10) | BIT(11)) +#define RSI_DATA_DESC_INSERT_TSF BIT(15) +#define RSI_DATA_DESC_INSERT_SEQ_NO BIT(2) + enum opmode { AP_OPMODE = 0, STA_OPMODE, -- cgit v1.2.3-55-g7522 From 3528608f3a7919ab34a4fe60aaea5996260ef75e Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:12 +0530 Subject: rsi: handle station connection in AP mode Station structures are maintained in driver with required fields. When mac80211 callback sta_add is called, driver iterates through list of connected stations to check available index and assigns station id which is important for further communication to that station. Then peer notify frame is send to firmware to inform the firmware about new station connection. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 78 ++++++++++++++++++++++++++--- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 49 +++++++++++------- drivers/net/wireless/rsi/rsi_mgmt.h | 9 +++- 3 files changed, 110 insertions(+), 26 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 6038a2fc9eda..b1c08491c754 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -626,10 +626,12 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw, rsi_send_rx_filter_frame(common, rx_filter_word); } rsi_inform_bss_status(common, + STA_OPMODE, bss_conf->assoc, bss_conf->bssid, bss_conf->qos, - bss_conf->aid); + bss_conf->aid, + NULL, 0); adapter->ps_info.dtim_interval_duration = bss->dtim_period; adapter->ps_info.listen_interval = conf->listen_interval; @@ -1157,18 +1159,80 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + bool sta_exist = false; + struct rsi_sta *rsta; + + rsi_dbg(INFO_ZONE, "Station Add: %pM\n", sta->addr); mutex_lock(&common->mutex); - rsi_set_min_rate(hw, sta, common); + if (vif->type == NL80211_IFTYPE_AP) { + u8 cnt; + int sta_idx = -1; + int free_index = -1; + + /* Check if max stations reached */ + if (common->num_stations >= common->max_stations) { + rsi_dbg(ERR_ZONE, "Reject: Max Stations exists\n"); + mutex_unlock(&common->mutex); + return -EOPNOTSUPP; + } + for (cnt = 0; cnt < common->max_stations; cnt++) { + rsta = &common->stations[cnt]; - if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || - (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) { - common->vif_info[0].sgi = true; + if (!rsta->sta) { + if (free_index < 0) + free_index = cnt; + continue; + } + if (!memcmp(rsta->sta->addr, sta->addr, ETH_ALEN)) { + rsi_dbg(INFO_ZONE, "Station exists\n"); + sta_idx = cnt; + sta_exist = true; + break; + } + } + if (!sta_exist) { + if (free_index >= 0) + sta_idx = free_index; + } + if (sta_idx < 0) { + rsi_dbg(ERR_ZONE, + "%s: Some problem reaching here...\n", + __func__); + return -EINVAL; + } + rsta = &common->stations[sta_idx]; + rsta->sta = sta; + rsta->sta_id = sta_idx; + for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) + rsta->start_tx_aggr[cnt] = false; + for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) + rsta->seq_start[cnt] = 0; + if (!sta_exist) { + rsi_dbg(INFO_ZONE, "New Station\n"); + + /* Send peer notify to device */ + rsi_dbg(INFO_ZONE, "Indicate bss status to device\n"); + rsi_inform_bss_status(common, AP_OPMODE, 1, sta->addr, + sta->wme, sta->aid, sta, sta_idx); + + common->num_stations++; + } } - if (sta->ht_cap.ht_supported) - ieee80211_start_tx_ba_session(sta, 0, 0); + if (vif->type == NL80211_IFTYPE_STATION) { + rsi_set_min_rate(hw, sta, common); + if (sta->ht_cap.ht_supported) { + common->vif_info[0].is_ht = true; + common->bitrate_mask[NL80211_BAND_2GHZ] = + sta->supp_rates[NL80211_BAND_2GHZ]; + if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + common->vif_info[0].sgi = true; + ieee80211_start_tx_ba_session(sta, 0, 0); + } + } mutex_unlock(&common->mutex); diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index e47fc0d96475..7c0f27a21699 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -455,12 +455,14 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common, * Return: status: 0 on success, corresponding negative error code on failure. */ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, - u8 opmode, + enum opmode opmode, u8 notify_event, const unsigned char *bssid, u8 qos_enable, - u16 aid) + u16 aid, + u16 sta_id) { + struct ieee80211_vif *vif = common->priv->vifs[0]; struct sk_buff *skb = NULL; struct rsi_peer_notify *peer_notify; u16 vap_id = 0; @@ -480,7 +482,10 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, memset(skb->data, 0, frame_len); peer_notify = (struct rsi_peer_notify *)skb->data; - peer_notify->command = cpu_to_le16(opmode << 1); + if (opmode == STA_OPMODE) + peer_notify->command = cpu_to_le16(PEER_TYPE_AP << 1); + else if (opmode == AP_OPMODE) + peer_notify->command = cpu_to_le16(PEER_TYPE_STA << 1); switch (notify_event) { case STA_CONNECTED: @@ -502,13 +507,15 @@ static int rsi_hal_send_sta_notify_frame(struct rsi_common *common, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); peer_notify->desc.desc_dword0.frame_type = PEER_NOTIFY; + peer_notify->desc.desc_dword3.qid_tid = sta_id; peer_notify->desc.desc_dword3.sta_id = vap_id; skb_put(skb, frame_len); status = rsi_send_internal_mgmt_frame(common, skb); - if (!status && qos_enable) { + if ((vif->type == NL80211_IFTYPE_STATION) && + (!status && qos_enable)) { rsi_set_contention_vals(common); status = rsi_load_radio_caps(common); } @@ -1279,32 +1286,40 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) * Return: None. */ void rsi_inform_bss_status(struct rsi_common *common, + enum opmode opmode, u8 status, - const unsigned char *bssid, + const u8 *addr, u8 qos_enable, - u16 aid) + u16 aid, + struct ieee80211_sta *sta, + u16 sta_id) { if (status) { - common->hw_data_qs_blocked = true; + if (opmode == STA_OPMODE) + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, - RSI_IFTYPE_STATION, + opmode, STA_CONNECTED, - bssid, + addr, qos_enable, - aid); + aid, sta_id); if (common->min_rate == 0xffff) rsi_send_auto_rate_request(common); - if (!rsi_send_block_unblock_frame(common, false)) - common->hw_data_qs_blocked = false; + if (opmode == STA_OPMODE) { + if (!rsi_send_block_unblock_frame(common, false)) + common->hw_data_qs_blocked = false; + } } else { - common->hw_data_qs_blocked = true; + if (opmode == STA_OPMODE) + common->hw_data_qs_blocked = true; rsi_hal_send_sta_notify_frame(common, - RSI_IFTYPE_STATION, + opmode, STA_DISCONNECTED, - bssid, + addr, qos_enable, - aid); - rsi_send_block_unblock_frame(common, true); + aid, sta_id); + if (opmode == STA_OPMODE) + rsi_send_block_unblock_frame(common, true); } } diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index a00aa10b1be5..a2e377f3519b 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -209,6 +209,10 @@ enum vap_status { VAP_UPDATE = 3 }; +enum peer_type { + PEER_TYPE_AP, + PEER_TYPE_STA, +}; extern struct ieee80211_rate rsi_rates[12]; extern const u16 rsi_mcsrates[8]; @@ -611,8 +615,9 @@ int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel); int rsi_send_vap_dynamic_update(struct rsi_common *common); int rsi_send_block_unblock_frame(struct rsi_common *common, bool event); -void rsi_inform_bss_status(struct rsi_common *common, u8 status, - const u8 *bssid, u8 qos_enable, u16 aid); +void rsi_inform_bss_status(struct rsi_common *common, enum opmode opmode, + u8 status, const u8 *addr, u8 qos_enable, u16 aid, + struct ieee80211_sta *sta, u16 sta_id); void rsi_indicate_pkt_to_os(struct rsi_common *common, struct sk_buff *skb); int rsi_mac80211_attach(struct rsi_common *common); void rsi_indicate_tx_status(struct rsi_hw *common, struct sk_buff *skb, -- cgit v1.2.3-55-g7522 From 571b050b42ae8475741332393d0da916e9f96717 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:13 +0530 Subject: rsi: handle station disconnection in AP mode When sta_remove of mac80211 is called, driver iterates through list of existing stations to get the station id. Then peer notify is prepared and send to firmare. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 56 +++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index b1c08491c754..be10d508d740 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -1254,21 +1254,55 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw, { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; + struct ieee80211_bss_conf *bss = &vif->bss_conf; + struct rsi_sta *rsta; + + rsi_dbg(INFO_ZONE, "Station Remove: %pM\n", sta->addr); mutex_lock(&common->mutex); - /* Resetting all the fields to default values */ - common->bitrate_mask[NL80211_BAND_2GHZ] = 0; - common->bitrate_mask[NL80211_BAND_5GHZ] = 0; - common->min_rate = 0xffff; - common->vif_info[0].is_ht = false; - common->vif_info[0].sgi = false; - common->vif_info[0].seq_start = 0; - common->secinfo.ptk_cipher = 0; - common->secinfo.gtk_cipher = 0; + if (vif->type == NL80211_IFTYPE_AP) { + u8 sta_idx, cnt; - rsi_send_rx_filter_frame(common, 0); - + /* Send peer notify to device */ + rsi_dbg(INFO_ZONE, "Indicate bss status to device\n"); + for (sta_idx = 0; sta_idx < common->max_stations; sta_idx++) { + rsta = &common->stations[sta_idx]; + + if (!rsta->sta) + continue; + if (!memcmp(rsta->sta->addr, sta->addr, ETH_ALEN)) { + rsi_inform_bss_status(common, AP_OPMODE, 0, + sta->addr, sta->wme, + sta->aid, sta, sta_idx); + rsta->sta = NULL; + rsta->sta_id = -1; + for (cnt = 0; cnt < IEEE80211_NUM_TIDS; cnt++) + rsta->start_tx_aggr[cnt] = false; + if (common->num_stations > 0) + common->num_stations--; + break; + } + } + if (sta_idx >= common->max_stations) + rsi_dbg(ERR_ZONE, "%s: No station found\n", __func__); + } + + if (vif->type == NL80211_IFTYPE_STATION) { + /* Resetting all the fields to default values */ + memcpy((u8 *)bss->bssid, (u8 *)sta->addr, ETH_ALEN); + bss->qos = sta->wme; + common->bitrate_mask[NL80211_BAND_2GHZ] = 0; + common->bitrate_mask[NL80211_BAND_5GHZ] = 0; + common->min_rate = 0xffff; + common->vif_info[0].is_ht = false; + common->vif_info[0].sgi = false; + common->vif_info[0].seq_start = 0; + common->secinfo.ptk_cipher = 0; + common->secinfo.gtk_cipher = 0; + if (!common->iface_down) + rsi_send_rx_filter_frame(common, 0); + } mutex_unlock(&common->mutex); return 0; -- cgit v1.2.3-55-g7522 From 19844c0a9a19ab6e9139b5e988449de9d2f559e1 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:14 +0530 Subject: rsi: data and managemet path changes for AP mode Station id needs to be get for data and management frames to fill in the descruptor for AP mode. Few other changes related to AP mode are covered here. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_core.c | 55 ++++++++++++++++++++++------ drivers/net/wireless/rsi/rsi_91x_hal.c | 64 ++++++++++++++++++++++++--------- drivers/net/wireless/rsi/rsi_common.h | 1 + drivers/net/wireless/rsi/rsi_hal.h | 2 +- drivers/net/wireless/rsi/rsi_mgmt.h | 1 + 5 files changed, 96 insertions(+), 27 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index 6cfda8626cfe..2b0516d2f63d 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -320,6 +320,20 @@ void rsi_core_qos_processor(struct rsi_common *common) } } +struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr) +{ + int i; + + for (i = 0; i < common->max_stations; i++) { + if (!common->stations[i].sta) + continue; + if (!(memcmp(common->stations[i].sta->addr, + mac_addr, ETH_ALEN))) + return &common->stations[i]; + } + return NULL; +} + /** * rsi_core_xmit() - This function transmits the packets received from mac80211 * @common: Pointer to the driver private structure. @@ -332,39 +346,60 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) struct rsi_hw *adapter = common->priv; struct ieee80211_tx_info *info; struct skb_info *tx_params; - struct ieee80211_hdr *tmp_hdr = NULL; + struct ieee80211_hdr *wh; + struct ieee80211_vif *vif = adapter->vifs[0]; u8 q_num, tid = 0; + struct rsi_sta *rsta = NULL; if ((!skb) || (!skb->len)) { rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n", __func__); goto xmit_fail; } - info = IEEE80211_SKB_CB(skb); - tx_params = (struct skb_info *)info->driver_data; - tmp_hdr = (struct ieee80211_hdr *)&skb->data[0]; - if (common->fsm_state != FSM_MAC_INIT_DONE) { rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__); goto xmit_fail; } - if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) || - (ieee80211_is_ctl(tmp_hdr->frame_control)) || - (ieee80211_is_qos_nullfunc(tmp_hdr->frame_control))) { + info = IEEE80211_SKB_CB(skb); + tx_params = (struct skb_info *)info->driver_data; + wh = (struct ieee80211_hdr *)&skb->data[0]; + tx_params->sta_id = 0; + + if ((ieee80211_is_mgmt(wh->frame_control)) || + (ieee80211_is_ctl(wh->frame_control)) || + (ieee80211_is_qos_nullfunc(wh->frame_control))) { q_num = MGMT_SOFT_Q; skb->priority = q_num; } else { - if (ieee80211_is_data_qos(tmp_hdr->frame_control)) { + if (ieee80211_is_data_qos(wh->frame_control)) { tid = (skb->data[24] & IEEE80211_QOS_TID); skb->priority = TID_TO_WME_AC(tid); } else { tid = IEEE80211_NONQOS_TID; skb->priority = BE_Q; } + q_num = skb->priority; tx_params->tid = tid; - tx_params->sta_id = 0; + + if ((vif->type == NL80211_IFTYPE_AP) && + (!is_broadcast_ether_addr(wh->addr1)) && + (!is_multicast_ether_addr(wh->addr1))) { + rsta = rsi_find_sta(common, wh->addr1); + if (!rsta) + goto xmit_fail; + tx_params->sta_id = rsta->sta_id; + } + + if (rsta) { + /* Start aggregation if not done for this tid */ + if (!rsta->start_tx_aggr[tid]) { + rsta->start_tx_aggr[tid] = true; + ieee80211_start_tx_ba_session(rsta->sta, + tid, 0); + } + } } if ((q_num < MGMT_SOFT_Q) && diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 1ed73320e19f..070dfd68bb83 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -18,6 +18,7 @@ #include "rsi_mgmt.h" #include "rsi_hal.h" #include "rsi_sdio.h" +#include "rsi_common.h" /* FLASH Firmware */ static struct ta_metadata metadata_flash_content[] = { @@ -41,7 +42,7 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) struct ieee80211_hdr *wh = NULL; struct ieee80211_tx_info *info; struct ieee80211_conf *conf = &adapter->hw->conf; - struct ieee80211_vif *vif = NULL; + struct ieee80211_vif *vif = adapter->vifs[0]; struct rsi_mgmt_desc *mgmt_desc; struct skb_info *tx_params; struct ieee80211_bss_conf *bss = NULL; @@ -49,6 +50,11 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) u8 header_size; u32 dword_align_bytes = 0; + if (skb->len > MAX_MGMT_PKT_SIZE) { + rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); + return -EINVAL; + } + info = IEEE80211_SKB_CB(skb); tx_params = (struct skb_info *)info->driver_data; @@ -74,15 +80,10 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) memset(&skb->data[0], 0, header_size); bss = &info->control.vif->bss_conf; wh = (struct ieee80211_hdr *)&skb->data[header_size]; - vif = adapter->vifs[0]; mgmt_desc = (struct rsi_mgmt_desc *)skb->data; xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; - if (skb->len > MAX_MGMT_PKT_SIZE) { - rsi_dbg(INFO_ZONE, "%s: Dropping mgmt pkt > 512\n", __func__); - return -EINVAL; - } rsi_set_len_qno(&mgmt_desc->len_qno, (skb->len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); mgmt_desc->frame_type = TX_DOT11_MGMT; @@ -113,6 +114,22 @@ static int rsi_prepare_mgmt_desc(struct rsi_common *common, struct sk_buff *skb) } } + if (ieee80211_is_probe_resp(wh->frame_control)) { + mgmt_desc->misc_flags |= (RSI_ADD_DELTA_TSF_VAP_ID | + RSI_FETCH_RETRY_CNT_FRM_HST); +#define PROBE_RESP_RETRY_CNT 3 + xtend_desc->retry_cnt = PROBE_RESP_RETRY_CNT; + } + + if ((vif->type == NL80211_IFTYPE_AP) && + (ieee80211_is_action(wh->frame_control))) { + struct rsi_sta *rsta = rsi_find_sta(common, wh->addr1); + + if (rsta) + mgmt_desc->sta_id = tx_params->sta_id; + else + return -EINVAL; + } return 0; } @@ -157,7 +174,7 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) xtend_desc = (struct xtended_desc *)&skb->data[FRAME_DESC_SZ]; wh = (struct ieee80211_hdr *)&skb->data[header_size]; - seq_num = (le16_to_cpu(wh->seq_ctrl) >> 4); + seq_num = IEEE80211_SEQ_TO_SN(le16_to_cpu(wh->seq_ctrl)); vif = adapter->vifs[0]; data_desc->xtend_desc_size = header_size - FRAME_DESC_SZ; @@ -191,12 +208,11 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) if (conf_is_ht40(&common->priv->hw->conf)) data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE); - if (common->vif_info[0].sgi) { - if (common->min_rate & 0x100) /* Only MCS rates */ - data_desc->rate_info |= - cpu_to_le16(ENABLE_SHORTGI_RATE); + if ((common->vif_info[0].sgi) && (common->min_rate & 0x100)) { + /* Only MCS rates */ + data_desc->rate_info |= + cpu_to_le16(ENABLE_SHORTGI_RATE); } - } if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { @@ -223,7 +239,17 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); data_desc->frame_info |= cpu_to_le16(RSI_BROADCAST_PKT); data_desc->sta_id = vap_id; + + if (vif->type == NL80211_IFTYPE_AP) { + if (common->band == NL80211_BAND_5GHZ) + data_desc->rate_info = cpu_to_le16(RSI_RATE_6); + else + data_desc->rate_info = cpu_to_le16(RSI_RATE_1); + } } + if ((vif->type == NL80211_IFTYPE_AP) && + (ieee80211_has_moredata(wh->frame_control))) + data_desc->frame_info |= cpu_to_le16(MORE_DATA_PRESENT); return 0; } @@ -232,17 +258,23 @@ static int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb) { struct rsi_hw *adapter = common->priv; + struct ieee80211_vif *vif = adapter->vifs[0]; struct ieee80211_tx_info *info; struct ieee80211_bss_conf *bss; - int status = -EIO; + int status = -EINVAL; + + if (!skb) + return 0; + if (common->iface_down) + goto err; info = IEEE80211_SKB_CB(skb); + if (!info->control.vif) + goto err; bss = &info->control.vif->bss_conf; - if (!bss->assoc) { - status = -EINVAL; + if ((vif->type == NL80211_IFTYPE_STATION) && (!bss->assoc)) goto err; - } status = rsi_prepare_data_desc(common, skb); if (status) diff --git a/drivers/net/wireless/rsi/rsi_common.h b/drivers/net/wireless/rsi/rsi_common.h index 44349696f5de..e579d694d13c 100644 --- a/drivers/net/wireless/rsi/rsi_common.h +++ b/drivers/net/wireless/rsi/rsi_common.h @@ -83,4 +83,5 @@ u16 rsi_get_connected_channel(struct rsi_hw *adapter); struct rsi_hw *rsi_91x_init(void); void rsi_91x_deinit(struct rsi_hw *adapter); int rsi_read_pkt(struct rsi_common *common, s32 rcv_pkt_len); +struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr); #endif diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h index 297f4ce2c39e..7c145053da6d 100644 --- a/drivers/net/wireless/rsi/rsi_hal.h +++ b/drivers/net/wireless/rsi/rsi_hal.h @@ -126,7 +126,7 @@ struct rsi_mgmt_desc { __le16 bbp_info; __le16 seq_ctrl; u8 reserved2; - u8 vap_info; + u8 sta_id; } __packed; struct rsi_data_desc { diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index a2e377f3519b..9c59250eb4de 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -63,6 +63,7 @@ #define BBP_REG_WRITE 0 #define RF_RESET_ENABLE BIT(3) #define RATE_INFO_ENABLE BIT(0) +#define MORE_DATA_PRESENT BIT(1) #define RSI_BROADCAST_PKT BIT(9) #define RSI_DESC_REQUIRE_CFM_TO_HOST BIT(2) #define RSI_ADD_DELTA_TSF_VAP_ID BIT(3) -- cgit v1.2.3-55-g7522 From 6572f054e9648ec242b673270b4d199d959d7cc7 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:15 +0530 Subject: rsi: use common descriptor for auto rate frame TX command frame auto rate request is modified to use common descriptor struture. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 18 +++++++----------- drivers/net/wireless/rsi/rsi_mgmt.h | 2 +- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 7c0f27a21699..c5d48cd42cbe 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1164,8 +1164,9 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) u32 rate_bitmap = common->bitrate_mask[band]; u16 *selected_rates, min_rate; + u16 frame_len = sizeof(struct rsi_auto_rate); - skb = dev_alloc_skb(sizeof(struct rsi_auto_rate)); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", __func__); @@ -1180,8 +1181,6 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) return -ENOMEM; } - memset(skb->data, 0, sizeof(struct rsi_auto_rate)); - auto_rate = (struct rsi_auto_rate *)skb->data; auto_rate->aarf_rssi = cpu_to_le16(((u16)3 << 6) | (u16)(18 & 0x3f)); @@ -1190,10 +1189,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) auto_rate->initial_boundary = cpu_to_le16(3); auto_rate->max_threshold_limt = cpu_to_le16(27); - auto_rate->desc_word[1] = cpu_to_le16(AUTO_RATE_IND); + auto_rate->desc.desc_dword0.frame_type = AUTO_RATE_IND; if (common->channel_width == BW_40MHZ) - auto_rate->desc_word[7] |= cpu_to_le16(1); + auto_rate->desc.desc_dword3.qid_tid = BW_40MHZ; if (band == NL80211_BAND_2GHZ) { min_rate = RSI_RATE_1; @@ -1259,15 +1258,12 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) auto_rate->num_supported_rates = cpu_to_le16(num_supported_rates * 2); auto_rate->moderate_rate_inx = cpu_to_le16(num_supported_rates / 2); - auto_rate->desc_word[7] |= cpu_to_le16(0 << 8); num_supported_rates *= 2; - auto_rate->desc_word[0] = cpu_to_le16((sizeof(*auto_rate) - - FRAME_DESC_SZ) | - (RSI_WIFI_MGMT_Q << 12)); + rsi_set_len_qno(&auto_rate->desc.desc_dword0.len_qno, + (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); - skb_put(skb, - sizeof(struct rsi_auto_rate)); + skb_put(skb, frame_len); kfree(selected_rates); return rsi_send_internal_mgmt_frame(common, skb); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 9c59250eb4de..c50153dcffb4 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -425,7 +425,7 @@ struct rsi_set_key { } __packed; struct rsi_auto_rate { - __le16 desc_word[8]; + struct rsi_cmd_desc desc; __le16 failure_limit; __le16 initial_boundary; __le16 max_threshold_limt; -- cgit v1.2.3-55-g7522 From 8a1ff83f2bf83b22e7d803ee2dbb103a3fc057ec Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:16 +0530 Subject: rsi: update tx auto rate command frame for AP mode Auto rate frame is sent to firmware when a new station is connected. Station id and station's ht capabilities are updated in auto rate command frame. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 44 +++++++++++++++++++++++++-------- 1 file changed, 34 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index c5d48cd42cbe..8762d13dd6d9 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -1152,8 +1152,11 @@ static bool rsi_map_rates(u16 rate, int *offset) * * Return: 0 on success, corresponding error code on failure. */ -static int rsi_send_auto_rate_request(struct rsi_common *common) +static int rsi_send_auto_rate_request(struct rsi_common *common, + struct ieee80211_sta *sta, + u16 sta_id) { + struct ieee80211_vif *vif = common->priv->vifs[0]; struct sk_buff *skb; struct rsi_auto_rate *auto_rate; int ii = 0, jj = 0, kk = 0; @@ -1161,11 +1164,14 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) u8 band = hw->conf.chandef.chan->band; u8 num_supported_rates = 0; u8 rate_table_offset, rate_offset = 0; - u32 rate_bitmap = common->bitrate_mask[band]; - + u32 rate_bitmap; u16 *selected_rates, min_rate; + bool is_ht = false, is_sgi = false; u16 frame_len = sizeof(struct rsi_auto_rate); + rsi_dbg(MGMT_TX_ZONE, + "%s: Sending auto rate request frame\n", __func__); + skb = dev_alloc_skb(frame_len); if (!skb) { rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n", @@ -1193,12 +1199,31 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) if (common->channel_width == BW_40MHZ) auto_rate->desc.desc_dword3.qid_tid = BW_40MHZ; + auto_rate->desc.desc_dword3.sta_id = sta_id; + + if (vif->type == NL80211_IFTYPE_STATION) { + rate_bitmap = common->bitrate_mask[band]; + is_ht = common->vif_info[0].is_ht; + is_sgi = common->vif_info[0].sgi; + } else { + rate_bitmap = sta->supp_rates[band]; + is_ht = sta->ht_cap.ht_supported; + if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) || + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)) + is_sgi = true; + } if (band == NL80211_BAND_2GHZ) { - min_rate = RSI_RATE_1; + if ((rate_bitmap == 0) && (is_ht)) + min_rate = RSI_RATE_MCS0; + else + min_rate = RSI_RATE_1; rate_table_offset = 0; } else { - min_rate = RSI_RATE_6; + if ((rate_bitmap == 0) && (is_ht)) + min_rate = RSI_RATE_MCS0; + else + min_rate = RSI_RATE_6; rate_table_offset = 4; } @@ -1212,7 +1237,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) } num_supported_rates = jj; - if (common->vif_info[0].is_ht) { + if (is_ht) { for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) selected_rates[jj++] = mcs[ii]; num_supported_rates += ARRAY_SIZE(mcs); @@ -1233,11 +1258,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common) } /* loading HT rates in the bottom half of the auto rate table */ - if (common->vif_info[0].is_ht) { + if (is_ht) { for (ii = rate_offset, kk = ARRAY_SIZE(rsi_mcsrates) - 1; ii < rate_offset + 2 * ARRAY_SIZE(rsi_mcsrates); ii++) { - if (common->vif_info[0].sgi || - conf_is_ht40(&common->priv->hw->conf)) + if (is_sgi || conf_is_ht40(&common->priv->hw->conf)) auto_rate->supported_rates[ii++] = cpu_to_le16(rsi_mcsrates[kk] | BIT(9)); else @@ -1300,7 +1324,7 @@ void rsi_inform_bss_status(struct rsi_common *common, qos_enable, aid, sta_id); if (common->min_rate == 0xffff) - rsi_send_auto_rate_request(common); + rsi_send_auto_rate_request(common, sta, sta_id); if (opmode == STA_OPMODE) { if (!rsi_send_block_unblock_frame(common, false)) common->hw_data_qs_blocked = false; -- cgit v1.2.3-55-g7522 From 32be57a666a51c2c987e816a744afb343b22e199 Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:17 +0530 Subject: rsi: aggregation parameters frame for AP mode TX command frame ampdu aggregation parameters is updated to include sta_id for AP mode. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 43 +++++++++++++++++++++++------ drivers/net/wireless/rsi/rsi_91x_mgmt.c | 6 ++-- drivers/net/wireless/rsi/rsi_mgmt.h | 3 +- 3 files changed, 39 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index be10d508d740..f2cb61f2b268 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -866,9 +866,11 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, int status = -EOPNOTSUPP; struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; - u16 seq_no = 0; + struct rsi_sta *rsta = NULL; + u16 seq_no = 0, seq_start = 0; u8 ii = 0; struct ieee80211_sta *sta = params->sta; + u8 sta_id = 0; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; @@ -880,17 +882,31 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, } mutex_lock(&common->mutex); - rsi_dbg(INFO_ZONE, "%s: AMPDU action %d called\n", __func__, action); + if (ssn != NULL) seq_no = *ssn; + if (vif->type == NL80211_IFTYPE_AP) { + rsta = rsi_find_sta(common, sta->addr); + if (!rsta) { + rsi_dbg(ERR_ZONE, "No station mapped\n"); + return 0; + } + sta_id = rsta->sta_id; + } + + rsi_dbg(INFO_ZONE, + "%s: AMPDU action tid=%d ssn=0x%x, buf_size=%d sta_id=%d\n", + __func__, tid, seq_no, buf_size, sta_id); + switch (action) { case IEEE80211_AMPDU_RX_START: status = rsi_send_aggregation_params_frame(common, tid, seq_no, buf_size, - STA_RX_ADDBA_DONE); + STA_RX_ADDBA_DONE, + sta_id); break; case IEEE80211_AMPDU_RX_STOP: @@ -898,11 +914,15 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, tid, 0, buf_size, - STA_RX_DELBA); + STA_RX_DELBA, + sta_id); break; case IEEE80211_AMPDU_TX_START: - common->vif_info[ii].seq_start = seq_no; + if (vif->type == NL80211_IFTYPE_STATION) + common->vif_info[ii].seq_start = seq_no; + else if (vif->type == NL80211_IFTYPE_AP) + rsta->seq_start[tid] = seq_no; ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); status = 0; break; @@ -914,18 +934,23 @@ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, tid, seq_no, buf_size, - STA_TX_DELBA); + STA_TX_DELBA, + sta_id); if (!status) ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: + if (vif->type == NL80211_IFTYPE_STATION) + seq_start = common->vif_info[ii].seq_start; + else if (vif->type == NL80211_IFTYPE_AP) + seq_start = rsta->seq_start[tid]; status = rsi_send_aggregation_params_frame(common, tid, - common->vif_info[ii] - .seq_start, + seq_start, buf_size, - STA_TX_ADDBA_DONE); + STA_TX_ADDBA_DONE, + sta_id); break; default: diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 8762d13dd6d9..2d3cae561be2 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -537,11 +537,11 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, u16 ssn, u8 buf_size, - u8 event) + u8 event, + u8 sta_id) { struct sk_buff *skb = NULL; struct rsi_aggr_params *aggr_params; - u8 peer_id = 0; u16 frame_len = sizeof(struct rsi_aggr_params); skb = dev_alloc_skb(frame_len); @@ -561,7 +561,7 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, aggr_params->desc_dword0.frame_type = AMPDU_IND; aggr_params->aggr_params = tid & RSI_AGGR_PARAMS_TID_MASK; - aggr_params->peer_id = peer_id; + aggr_params->peer_id = sta_id; if (event == STA_TX_ADDBA_DONE) { aggr_params->seq_start = cpu_to_le16(ssn); aggr_params->baw_size = cpu_to_le16(buf_size); diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index c50153dcffb4..2d827330130d 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -609,7 +609,8 @@ int rsi_mgmt_pkt_recv(struct rsi_common *common, u8 *msg); int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode, u8 *mac_addr, u8 vap_id, u8 vap_status); int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, - u16 ssn, u8 buf_size, u8 event); + u16 ssn, u8 buf_size, u8 event, + u8 sta_id); int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, u8 key_type, u8 key_id, u32 cipher); int rsi_set_channel(struct rsi_common *common, -- cgit v1.2.3-55-g7522 From 38ef62353acbaa0eea062a9f047b33aebd7d52ce Mon Sep 17 00:00:00 2001 From: Prameela Rani Garnepudi Date: Wed, 16 Aug 2017 18:43:18 +0530 Subject: rsi: security enhancements for AP mode Station id should be set in load key frame configured to device. For WEP mode, key is configured once from mac80211. This key is saved and configured to device every time a station is connected. Signed-off-by: Prameela Rani Garnepudi Signed-off-by: Kalle Valo --- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 60 +++++++++++++++++++++-------- drivers/net/wireless/rsi/rsi_91x_mgmt.c | 10 ++++- drivers/net/wireless/rsi/rsi_main.h | 1 + drivers/net/wireless/rsi/rsi_mgmt.h | 3 +- 4 files changed, 56 insertions(+), 18 deletions(-) diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index f2cb61f2b268..8b983d03f2da 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -758,11 +758,14 @@ static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw, */ static int rsi_hal_key_config(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_key_conf *key) + struct ieee80211_key_conf *key, + struct ieee80211_sta *sta) { struct rsi_hw *adapter = hw->priv; + struct rsi_sta *rsta = NULL; int status; u8 key_type; + s16 sta_id = 0; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) key_type = RSI_PAIRWISE_KEY; @@ -772,23 +775,35 @@ static int rsi_hal_key_config(struct ieee80211_hw *hw, rsi_dbg(ERR_ZONE, "%s: Cipher 0x%x key_type: %d key_len: %d\n", __func__, key->cipher, key_type, key->keylen); - if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) || - (key->cipher == WLAN_CIPHER_SUITE_WEP40)) { - status = rsi_hal_load_key(adapter->priv, - key->key, - key->keylen, - RSI_PAIRWISE_KEY, - key->keyidx, - key->cipher); - if (status) - return status; + if (vif->type == NL80211_IFTYPE_AP) { + if (sta) { + rsta = rsi_find_sta(adapter->priv, sta->addr); + if (rsta) + sta_id = rsta->sta_id; + } + adapter->priv->key = key; + } else { + if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) || + (key->cipher == WLAN_CIPHER_SUITE_WEP40)) { + status = rsi_hal_load_key(adapter->priv, + key->key, + key->keylen, + RSI_PAIRWISE_KEY, + key->keyidx, + key->cipher, + sta_id); + if (status) + return status; + } } + return rsi_hal_load_key(adapter->priv, key->key, key->keylen, key_type, key->keyidx, - key->cipher); + key->cipher, + sta_id); } /** @@ -816,7 +831,7 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw, switch (cmd) { case SET_KEY: secinfo->security_enable = true; - status = rsi_hal_key_config(hw, vif, key); + status = rsi_hal_key_config(hw, vif, key, sta); if (status) { mutex_unlock(&common->mutex); return status; @@ -834,10 +849,11 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw, break; case DISABLE_KEY: - secinfo->security_enable = false; + if (vif->type == NL80211_IFTYPE_STATION) + secinfo->security_enable = false; rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__); memset(key, 0, sizeof(struct ieee80211_key_conf)); - status = rsi_hal_key_config(hw, vif, key); + status = rsi_hal_key_config(hw, vif, key, sta); break; default: @@ -1242,6 +1258,20 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw, rsi_inform_bss_status(common, AP_OPMODE, 1, sta->addr, sta->wme, sta->aid, sta, sta_idx); + if (common->key) { + struct ieee80211_key_conf *key = common->key; + + if ((key->cipher == WLAN_CIPHER_SUITE_WEP104) || + (key->cipher == WLAN_CIPHER_SUITE_WEP40)) + rsi_hal_load_key(adapter->priv, + key->key, + key->keylen, + RSI_PAIRWISE_KEY, + key->keyidx, + key->cipher, + sta_idx); + } + common->num_stations++; } } diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c index 2d3cae561be2..f7b550f900c4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c @@ -715,8 +715,10 @@ int rsi_hal_load_key(struct rsi_common *common, u16 key_len, u8 key_type, u8 key_id, - u32 cipher) + u32 cipher, + s16 sta_id) { + struct ieee80211_vif *vif = common->priv->vifs[0]; struct sk_buff *skb = NULL; struct rsi_set_key *set_key; u16 key_descriptor = 0; @@ -734,8 +736,11 @@ int rsi_hal_load_key(struct rsi_common *common, memset(skb->data, 0, frame_len); set_key = (struct rsi_set_key *)skb->data; - if (key_type == RSI_GROUP_KEY) + if (key_type == RSI_GROUP_KEY) { key_descriptor = RSI_KEY_TYPE_BROADCAST; + if (vif->type == NL80211_IFTYPE_AP) + key_descriptor |= RSI_KEY_MODE_AP; + } if ((cipher == WLAN_CIPHER_SUITE_WEP40) || (cipher == WLAN_CIPHER_SUITE_WEP104)) { key_id = 0; @@ -754,6 +759,7 @@ int rsi_hal_load_key(struct rsi_common *common, (frame_len - FRAME_DESC_SZ), RSI_WIFI_MGMT_Q); set_key->desc_dword0.frame_type = SET_KEY_REQ; set_key->key_desc = cpu_to_le16(key_descriptor); + set_key->sta_id = sta_id; if (data) { if ((cipher == WLAN_CIPHER_SUITE_WEP40) || diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h index 169e2f9e3c5e..2c18dde633ea 100644 --- a/drivers/net/wireless/rsi/rsi_main.h +++ b/drivers/net/wireless/rsi/rsi_main.h @@ -269,6 +269,7 @@ struct rsi_common { struct rsi_sta stations[RSI_MAX_ASSOC_STAS + 1]; int num_stations; int max_stations; + struct ieee80211_key_conf *key; }; enum host_intf { diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h index 2d827330130d..c6e1fa669a27 100644 --- a/drivers/net/wireless/rsi/rsi_mgmt.h +++ b/drivers/net/wireless/rsi/rsi_mgmt.h @@ -409,6 +409,7 @@ struct rsi_dynamic_s { #define RSI_WEP_KEY_104 BIT(3) #define RSI_CIPHER_WPA BIT(4) #define RSI_CIPHER_TKIP BIT(5) +#define RSI_KEY_MODE_AP BIT(7) #define RSI_PROTECT_DATA_FRAMES BIT(13) #define RSI_KEY_ID_MASK 0xC0 #define RSI_KEY_ID_OFFSET 14 @@ -612,7 +613,7 @@ int rsi_send_aggregation_params_frame(struct rsi_common *common, u16 tid, u16 ssn, u8 buf_size, u8 event, u8 sta_id); int rsi_hal_load_key(struct rsi_common *common, u8 *data, u16 key_len, - u8 key_type, u8 key_id, u32 cipher); + u8 key_type, u8 key_id, u32 cipher, s16 sta_id); int rsi_set_channel(struct rsi_common *common, struct ieee80211_channel *channel); int rsi_send_vap_dynamic_update(struct rsi_common *common); -- cgit v1.2.3-55-g7522 From e76dc1dd0065a6bc6fbda062b96d6df817a21c1c Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 17 Aug 2017 11:02:40 +0200 Subject: Bluetooth: btbcm: Consolidate the controller information commands The commands that read the basic vendor information about the Broadcom controller are duplicated for UART and USB devices. Combine them into a single function to reduce the code complexity. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- drivers/bluetooth/btbcm.c | 69 ++++++++++++++++++++++++++--------------------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 9ab6cfbb831d..cc4bdefa6648 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -287,6 +287,37 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev) return skb; } +static int btbcm_read_info(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + /* Read Verbose Config Version Info */ + skb = btbcm_read_verbose_config(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); + kfree_skb(skb); + + /* Read Controller Features */ + skb = btbcm_read_controller_features(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]); + kfree_skb(skb); + + /* Read Local Name */ + skb = btbcm_read_local_name(hdev); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); + kfree_skb(skb); + + return 0; +} + static const struct { u16 subver; const char *name; @@ -322,13 +353,10 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len) subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); - /* Read Verbose Config Version Info */ - skb = btbcm_read_verbose_config(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); - kfree_skb(skb); + /* Read controller information */ + err = btbcm_read_info(hdev); + if (err) + return err; switch ((rev & 0xf000) >> 12) { case 0: @@ -431,29 +459,10 @@ int btbcm_setup_patchram(struct hci_dev *hdev) subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); - /* Read Verbose Config Version Info */ - skb = btbcm_read_verbose_config(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); - kfree_skb(skb); - - /* Read Controller Features */ - skb = btbcm_read_controller_features(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]); - kfree_skb(skb); - - /* Read Local Name */ - skb = btbcm_read_local_name(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); - kfree_skb(skb); + /* Read controller information */ + err = btbcm_read_info(hdev); + if (err) + return err; switch ((rev & 0xf000) >> 12) { case 0: -- cgit v1.2.3-55-g7522 From 93345c06b74513c1e1c7933aef146e1d03420079 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 17 Aug 2017 09:19:30 +0100 Subject: liquidio: fix spelling mistake: "interuupt" -> "interrupt" Trivial fix to spelling mistake in dev_info message Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 08aa06c90d46..a63ddf07f168 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -412,7 +412,7 @@ lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) kfree(oct->irq_name_storage); oct->irq_name_storage = NULL; if (octeon_setup_interrupt(oct, num_ioqs)) { - dev_info(&oct->pci_dev->dev, "Setup interuupt failed\n"); + dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); return 1; } -- cgit v1.2.3-55-g7522 From fd07a62d58c16b1fdd46f4099791c5bd242908c3 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 17 Aug 2017 10:01:07 +0100 Subject: net: hns3: ensure media_type is unitialized Media type is only set if h->ae_algo->ops->get_media_type is called so there is a possibility that media_type is uninitialized when it is used a switch statement. Fix this by initializing media_type to HNAE3_MEDIA_TYPE_UNKNOWN. Detected by CoverityScan, CID#1452624("Uninitialized scalar variable") Fixes: 496d03e960ae ("net: hns3: Add Ethtool support to HNS3 driver") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 0ad65e47c77e..53cab3ad4cda 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -304,7 +304,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; u32 supported_caps; u32 advertised_caps; - u8 media_type; + u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN; u8 link_stat; u8 auto_neg; u8 duplex; -- cgit v1.2.3-55-g7522 From 1ab2de2bfed3ab2073ed1e7afa2b1134930d2b70 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 17 Aug 2017 14:59:40 +0200 Subject: bpf: fix liveness propagation to parent in spilled stack slots Using parent->regs[] when propagating REG_LIVE_READ for spilled regs doesn't work since parent->regs[] denote the set of normal registers but not spilled ones. Propagate to the correct regs. Fixes: dc503a8ad984 ("bpf/verifier: track liveness for pruning") Reported-by: Dan Carpenter Signed-off-by: Daniel Borkmann Acked-by: Edward Cree Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 958ba84a9995..40f669ddb571 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3460,7 +3460,7 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state, if (parent->spilled_regs[i].live & REG_LIVE_READ) continue; if (state->spilled_regs[i].live == REG_LIVE_READ) { - parent->regs[i].live |= REG_LIVE_READ; + parent->spilled_regs[i].live |= REG_LIVE_READ; touched = true; } } -- cgit v1.2.3-55-g7522 From 4d6a75b65dbfa910e84b2cbd7cd54468cf33ff9b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 17 Aug 2017 15:07:22 +0200 Subject: bpf: no need to nullify ri->map in xdp_do_redirect We are guaranteed to have a NULL ri->map in this branch since we test for it earlier, so we don't need to reset it here. Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Signed-off-by: David S. Miller --- net/core/filter.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/core/filter.c b/net/core/filter.c index e9f8dcef6c57..ea3ca34d0bf4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2568,7 +2568,6 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, fwd = dev_get_by_index_rcu(dev_net(dev), index); ri->ifindex = 0; - ri->map = NULL; if (unlikely(!fwd)) { bpf_warn_invalid_xdp_redirect(index); return -EINVAL; -- cgit v1.2.3-55-g7522 From 71450804c689e4c601cb2a3f9c0ff7182cde84e6 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Thu, 17 Aug 2017 18:52:53 +0530 Subject: net: ibm: ibmveth: constify vio_device_id vio_device_id are not supposed to change at runtime. All functions working with vio_device_id provided by work with const vio_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmveth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index d17c2b03f580..f210398200ec 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1897,7 +1897,7 @@ static int ibmveth_resume(struct device *dev) return 0; } -static struct vio_device_id ibmveth_device_table[] = { +static const struct vio_device_id ibmveth_device_table[] = { { "network", "IBM,l-lan"}, { "", "" } }; -- cgit v1.2.3-55-g7522 From 8c37bc677af3458dd5598d22467fb913f31c5bb2 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Thu, 17 Aug 2017 18:52:54 +0530 Subject: net: ibm: ibmvnic: constify vio_device_id vio_device_id are not supposed to change at runtime. All functions working with vio_device_id provided by work with const vio_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5ac873173b2e..cb8182f4fdfa 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4022,7 +4022,7 @@ static int ibmvnic_resume(struct device *dev) return 0; } -static struct vio_device_id ibmvnic_device_table[] = { +static const struct vio_device_id ibmvnic_device_table[] = { {"network", "IBM,vnic"}, {"", "" } }; -- cgit v1.2.3-55-g7522 From 976d28bfd1f62a3f8e5370c5e7127ff5b3499359 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 17 Aug 2017 17:22:36 +0200 Subject: bpf: don't enable preemption twice in smap_do_verdict In smap_do_verdict(), the fall-through branch leads to call preempt_enable() twice for the SK_REDIRECT, which creates an imbalance. Only enable it for all remaining cases again. Fixes: 174a79ff9515 ("bpf: sockmap with sk redirect support") Reported-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/sockmap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index f7e5e6cf124a..39de541fbcdc 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -135,7 +135,8 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) /* Fall through and free skb otherwise */ case SK_DROP: default: - preempt_enable(); + if (rc != SK_REDIRECT) + preempt_enable(); kfree_skb(skb); } } -- cgit v1.2.3-55-g7522 From 047b0ecd683045dcea371f9d4d2917dcf3c553da Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 17 Aug 2017 17:22:37 +0200 Subject: bpf: reuse tc bpf prologue for sk skb progs Given both program types are effecitvely doing the same in the prologue, just reuse the one that we had for tc and only adapt to the corresponding drop verdict value. That way, we don't need to have the duplicate from 8a31db561566 ("bpf: add access to sock fields and pkt data from sk_skb programs") to maintain. Reported-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: John Fastabend Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 47 ++++++++++------------------------------------- 1 file changed, 10 insertions(+), 37 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index ea3ca34d0bf4..0f4df86d936a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3455,8 +3455,8 @@ static bool sock_filter_is_valid_access(int off, int size, return true; } -static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, - const struct bpf_prog *prog) +static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog, int drop_verdict) { struct bpf_insn *insn = insn_buf; @@ -3483,7 +3483,7 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, * return TC_ACT_SHOT; */ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); - *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, TC_ACT_SHOT); + *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); *insn++ = BPF_EXIT_INSN(); /* restore: */ @@ -3494,6 +3494,12 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, return insn - insn_buf; } +static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, + const struct bpf_prog *prog) +{ + return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT); +} + static bool tc_cls_act_is_valid_access(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) @@ -3600,40 +3606,7 @@ static bool sock_ops_is_valid_access(int off, int size, static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { - struct bpf_insn *insn = insn_buf; - - if (!direct_write) - return 0; - - /* if (!skb->cloned) - * goto start; - * - * (Fast-path, otherwise approximation that we might be - * a clone, do the rest in helper.) - */ - *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); - *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); - *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); - - /* ret = bpf_skb_pull_data(skb, 0); */ - *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); - *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); - *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, - BPF_FUNC_skb_pull_data); - /* if (!ret) - * goto restore; - * return SK_DROP; - */ - *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); - *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, SK_DROP); - *insn++ = BPF_EXIT_INSN(); - - /* restore: */ - *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); - /* start: */ - *insn++ = prog->insnsi[0]; - - return insn - insn_buf; + return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP); } static bool sk_skb_is_valid_access(int off, int size, -- cgit v1.2.3-55-g7522 From 33cd149e767be9afbab9fcd3d5165a2de62313c8 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Thu, 17 Aug 2017 19:59:51 +0200 Subject: Bluetooth: hci_bcm: Add serdev support Add basic support for Broadcom serial slave devices. Probe the serial device, retrieve its maximum speed and register a new hci uart device. Tested/compatible with bcm43438 (RPi3). Signed-off-by: Loic Poulain Signed-off-by: Marcel Holtmann --- drivers/bluetooth/Kconfig | 1 + drivers/bluetooth/hci_bcm.c | 85 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 84 insertions(+), 2 deletions(-) diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 3a6ead603e49..fae5a74dc737 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -168,6 +168,7 @@ config BT_HCIUART_INTEL config BT_HCIUART_BCM bool "Broadcom protocol support" depends on BT_HCIUART + depends on BT_HCIUART_SERDEV select BT_HCIUART_H4 select BT_BCM help diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 1eb286ade48a..34882f18c563 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include #include #include @@ -34,6 +36,7 @@ #include #include #include +#include #include #include @@ -46,6 +49,7 @@ #define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */ +/* platform device driver resources */ struct bcm_device { struct list_head list; @@ -69,6 +73,12 @@ struct bcm_device { #endif }; +/* serdev driver resources */ +struct bcm_serdev { + struct hci_uart hu; +}; + +/* generic bcm uart resources */ struct bcm_data { struct sk_buff *rx_skb; struct sk_buff_head txq; @@ -80,6 +90,14 @@ struct bcm_data { static DEFINE_MUTEX(bcm_device_lock); static LIST_HEAD(bcm_device_list); +static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + if (hu->serdev) + serdev_device_set_baudrate(hu->serdev, speed); + else + hci_uart_set_baudrate(hu, speed); +} + static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct hci_dev *hdev = hu->hdev; @@ -290,6 +308,14 @@ static int bcm_open(struct hci_uart *hu) hu->priv = bcm; + /* If this is a serdev defined device, then only use + * serdev open primitive and skip the rest. + */ + if (hu->serdev) { + serdev_device_open(hu->serdev); + goto out; + } + if (!hu->tty->dev) goto out; @@ -325,6 +351,12 @@ static int bcm_close(struct hci_uart *hu) bt_dev_dbg(hu->hdev, "hu %p", hu); + /* If this is a serdev defined device, only use serdev + * close primitive and then continue as usual. + */ + if (hu->serdev) + serdev_device_close(hu->serdev); + /* Protect bcm->dev against removal of the device or driver */ mutex_lock(&bcm_device_lock); if (bcm_device_exists(bdev)) { @@ -400,7 +432,7 @@ static int bcm_setup(struct hci_uart *hu) speed = 0; if (speed) - hci_uart_set_baudrate(hu, speed); + host_set_baudrate(hu, speed); /* Operational speed if any */ if (hu->oper_speed) @@ -413,7 +445,7 @@ static int bcm_setup(struct hci_uart *hu) if (speed) { err = bcm_set_baudrate(hu, speed); if (!err) - hci_uart_set_baudrate(hu, speed); + host_set_baudrate(hu, speed); } finalize: @@ -906,9 +938,57 @@ static struct platform_driver bcm_driver = { }, }; +static int bcm_serdev_probe(struct serdev_device *serdev) +{ + struct bcm_serdev *bcmdev; + u32 speed; + int err; + + bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL); + if (!bcmdev) + return -ENOMEM; + + bcmdev->hu.serdev = serdev; + serdev_device_set_drvdata(serdev, bcmdev); + + err = device_property_read_u32(&serdev->dev, "max-speed", &speed); + if (!err) + bcmdev->hu.oper_speed = speed; + + return hci_uart_register_device(&bcmdev->hu, &bcm_proto); +} + +static void bcm_serdev_remove(struct serdev_device *serdev) +{ + struct bcm_serdev *bcmdev = serdev_device_get_drvdata(serdev); + + hci_uart_unregister_device(&bcmdev->hu); +} + +#ifdef CONFIG_OF +static const struct of_device_id bcm_bluetooth_of_match[] = { + { .compatible = "brcm,bcm43438-bt" }, + { }, +}; +MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match); +#endif + +static struct serdev_device_driver bcm_serdev_driver = { + .probe = bcm_serdev_probe, + .remove = bcm_serdev_remove, + .driver = { + .name = "hci_uart_bcm", + .of_match_table = of_match_ptr(bcm_bluetooth_of_match), + }, +}; + int __init bcm_init(void) { + /* For now, we need to keep both platform device + * driver (ACPI generated) and serdev driver (DT). + */ platform_driver_register(&bcm_driver); + serdev_device_driver_register(&bcm_serdev_driver); return hci_uart_register_proto(&bcm_proto); } @@ -916,6 +996,7 @@ int __init bcm_init(void) int __exit bcm_deinit(void) { platform_driver_unregister(&bcm_driver); + serdev_device_driver_unregister(&bcm_serdev_driver); return hci_uart_unregister_proto(&bcm_proto); } -- cgit v1.2.3-55-g7522 From 778ead344acce7ad38df390f2f28b91118c9b2b0 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Thu, 17 Aug 2017 19:59:48 +0200 Subject: dt-bindings: net: bluetooth: Add broadcom-bluetooth Add binding document for serial bluetooth chips using Broadcom protocol. Signed-off-by: Loic Poulain Signed-off-by: Marcel Holtmann --- .../devicetree/bindings/net/broadcom-bluetooth.txt | 35 ++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/broadcom-bluetooth.txt diff --git a/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt new file mode 100644 index 000000000000..4194ff7e6ee6 --- /dev/null +++ b/Documentation/devicetree/bindings/net/broadcom-bluetooth.txt @@ -0,0 +1,35 @@ +Broadcom Bluetooth Chips +--------------------- + +This documents the binding structure and common properties for serial +attached Broadcom devices. + +Serial attached Broadcom devices shall be a child node of the host UART +device the slave device is attached to. + +Required properties: + + - compatible: should contain one of the following: + * "brcm,bcm43438-bt" + +Optional properties: + + - max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt + - shutdown-gpios: GPIO specifier, used to enable the BT module + - device-wakeup-gpios: GPIO specifier, used to wakeup the controller + - host-wakeup-gpios: GPIO specifier, used to wakeup the host processor + - clocks: clock specifier if external clock provided to the controller + - clock-names: should be "extclk" + + +Example: + +&uart2 { + pinctrl-names = "default"; + pinctrl-0 = <&uart2_pins>; + + bluetooth { + compatible = "brcm,bcm43438-bt"; + max-speed = <921600>; + }; +}; -- cgit v1.2.3-55-g7522 From 01d5e44ace8a20fc51e0d530f98acb3c365345a5 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Thu, 17 Aug 2017 21:41:09 +0200 Subject: Bluetooth: hci_bcm: Handle empty packet after firmware loading The Broadcom controller on the Raspberry Pi3 sends an empty packet with packet type 0x00 after launching the firmware. This will cause logging of errors. Bluetooth: hci0: Frame reassembly failed (-84) Since this seems to be an intented behaviour of the controller, handle it gracefully by parsing that empty packet with packet type 0x00 and then just simply report it as diagnostic packet. With that change no errors are logging and the packet itself is actually recorded in the Bluetooth monitor traces. < HCI Command: Broadcom Launch RAM (0x3f|0x004e) plen 4 Address: 0xffffffff > HCI Event: Command Complete (0x0e) plen 4 Broadcom Launch RAM (0x3f|0x004e) ncmd 1 Status: Success (0x00) = Vendor Diagnostic (len 0) < HCI Command: Broadcom Update UART Baud Rate (0x3f|0x0018) plen 6 00 00 00 10 0e 00 ...... > HCI Event: Command Complete (0x0e) plen 4 Broadcom Update UART Baud Rate (0x3f|0x0018) ncmd 1 Status: Success (0x00) < HCI Command: Reset (0x03|0x0003) plen 0 > HCI Event: Command Complete (0x0e) plen 4 Reset (0x03|0x0003) ncmd 1 Status: Success (0x00) Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- drivers/bluetooth/hci_bcm.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 34882f18c563..e2540113d0da 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -44,6 +44,9 @@ #include "btbcm.h" #include "hci_uart.h" +#define BCM_NULL_PKT 0x00 +#define BCM_NULL_SIZE 0 + #define BCM_LM_DIAG_PKT 0x07 #define BCM_LM_DIAG_SIZE 63 @@ -468,11 +471,19 @@ finalize: .lsize = 0, \ .maxlen = BCM_LM_DIAG_SIZE +#define BCM_RECV_NULL \ + .type = BCM_NULL_PKT, \ + .hlen = BCM_NULL_SIZE, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = BCM_NULL_SIZE + static const struct h4_recv_pkt bcm_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, + { BCM_RECV_NULL, .recv = hci_recv_diag }, }; static int bcm_recv(struct hci_uart *hu, const void *data, int count) -- cgit v1.2.3-55-g7522 From 64511df45cbeec02670a83b666ff584dd1d767a2 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 16 Jul 2017 16:22:08 +0300 Subject: iwlwifi: mvm: remove the corunning support The corunning block was supposed to help in coex scenarios. It required the driver to configure the firmware based on the coupling between the two antennas of the devices. This was never in use and the configuration sent by the driver has always been blank. Remove all that code. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/coex.h | 13 - .../net/wireless/intel/iwlwifi/fw/api/commands.h | 12 - drivers/net/wireless/intel/iwlwifi/fw/api/config.h | 8 - drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 268 --------------------- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 5 - drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 9 - drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 - 8 files changed, 320 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index df4ecec59b40..2ba3ea4fa999 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -76,7 +76,6 @@ enum iwl_bt_coex_lut_type { BT_COEX_INVALID_LUT = 0xff, }; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */ -#define BT_COEX_CORUN_LUT_SIZE (32) #define BT_REDUCED_TX_POWER_BIT BIT(7) enum iwl_bt_coex_mode { @@ -106,18 +105,6 @@ struct iwl_bt_coex_cmd { __le32 enabled_modules; } __packed; /* BT_COEX_CMD_API_S_VER_6 */ -/** - * struct iwl_bt_coex_corun_lut_update - bt coex update the corun lut - * @corun_lut20: co-running 20 MHz LUT configuration - * @corun_lut40: co-running 40 MHz LUT configuration - * - * The structure is used for the BT_COEX_UPDATE_CORUN_LUT command. - */ -struct iwl_bt_coex_corun_lut_update_cmd { - __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE]; - __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE]; -} __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ - /** * struct iwl_bt_coex_reduced_txp_update_cmd * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index 0eb35b119ae9..074868394427 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -135,12 +135,6 @@ enum iwl_legacy_cmds { */ DBG_CFG = 0x9, - /** - * @ANTENNA_COUPLING_NOTIFICATION: - * Antenna coupling data, &struct iwl_mvm_antenna_coupling_notif - */ - ANTENNA_COUPLING_NOTIFICATION = 0xa, - /** * @SCAN_ITERATION_COMPLETE_UMAC: * Firmware indicates a scan iteration completed, using @@ -523,12 +517,6 @@ enum iwl_legacy_cmds { */ BT_CONFIG = 0x9b, - /** - * @BT_COEX_UPDATE_CORUN_LUT: - * &struct iwl_bt_coex_corun_lut_update_cmd - */ - BT_COEX_UPDATE_CORUN_LUT = 0x5b, - /** * @BT_COEX_UPDATE_REDUCED_TXP: * &struct iwl_bt_coex_reduced_txp_update_cmd diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h index ee1bd45b7021..7f645b62804e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -181,12 +181,4 @@ struct iwl_dc2dc_config_resp { __le32 dc2dc_freq_tune1; } __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */ -/** - * struct iwl_mvm_antenna_coupling_notif - antenna coupling notification - * @isolation: antenna isolation value - */ -struct iwl_mvm_antenna_coupling_notif { - __le32 isolation; -} __packed; - #endif /* __iwl_fw_api_config_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 0b4486114ddc..890dbfff3a06 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -148,215 +148,6 @@ static const __le64 iwl_ci_mask[][3] = { }, }; -struct corunning_block_luts { - u8 range; - __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; -}; - -/* - * Ranges for the antenna coupling calibration / co-running block LUT: - * LUT0: [ 0, 12[ - * LUT1: [12, 20[ - * LUT2: [20, 21[ - * LUT3: [21, 23[ - * LUT4: [23, 27[ - * LUT5: [27, 30[ - * LUT6: [30, 32[ - * LUT7: [32, 33[ - * LUT8: [33, - [ - */ -static const struct corunning_block_luts antenna_coupling_ranges[] = { - { - .range = 0, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 12, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 20, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 21, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 23, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 27, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 30, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 32, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, - { - .range = 33, - .lut20 = { - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - cpu_to_le32(0x00000000), cpu_to_le32(0x00000000), - }, - }, -}; - static enum iwl_bt_coex_lut_type iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif) { @@ -437,9 +228,6 @@ int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED); - if (iwl_mvm_bt_is_plcr_supported(mvm)) - bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_CORUN_ENABLED); - if (iwl_mvm_is_mplut_supported(mvm)) bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED); @@ -908,59 +696,3 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm) { iwl_mvm_bt_coex_notif_handle(mvm); } - -void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_mvm_antenna_coupling_notif *notif = (void *)pkt->data; - u32 ant_isolation = le32_to_cpu(notif->isolation); - struct iwl_bt_coex_corun_lut_update_cmd cmd = {}; - u8 __maybe_unused lower_bound, upper_bound; - u8 lut; - - if (!iwl_mvm_bt_is_plcr_supported(mvm)) - return; - - lockdep_assert_held(&mvm->mutex); - - /* Ignore updates if we are in force mode */ - if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) - return; - - if (ant_isolation == mvm->last_ant_isol) - return; - - for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++) - if (ant_isolation < antenna_coupling_ranges[lut + 1].range) - break; - - lower_bound = antenna_coupling_ranges[lut].range; - - if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1) - upper_bound = antenna_coupling_ranges[lut + 1].range; - else - upper_bound = antenna_coupling_ranges[lut].range; - - IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n", - ant_isolation, lower_bound, upper_bound, lut); - - mvm->last_ant_isol = ant_isolation; - - if (mvm->last_corun_lut == lut) - return; - - mvm->last_corun_lut = lut; - - /* For the moment, use the same LUT for 20GHz and 40GHz */ - memcpy(&cmd.corun_lut20, antenna_coupling_ranges[lut].lut20, - sizeof(cmd.corun_lut20)); - - memcpy(&cmd.corun_lut40, antenna_coupling_ranges[lut].lut20, - sizeof(cmd.corun_lut40)); - - if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_CORUN_LUT, 0, - sizeof(cmd), &cmd)) - IWL_ERR(mvm, - "failed to send BT_COEX_UPDATE_CORUN_LUT command\n"); -} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index a922a351c916..753d4138e30f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -95,7 +95,6 @@ #define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH 62 #define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH 65 #define IWL_MVM_BT_COEX_SYNC2SCO 1 -#define IWL_MVM_BT_COEX_CORUNNING 0 #define IWL_MVM_BT_COEX_MPLUT 1 #define IWL_MVM_BT_COEX_RRC 1 #define IWL_MVM_BT_COEX_TTC 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index ba2745a3b537..3d7b5bd7a6b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -565,9 +565,6 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, pos += scnprintf(buf + pos, bufsz - pos, "bt_activity_grading = %d\n", le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf + pos, bufsz - pos, - "antenna isolation = %d CORUN LUT index = %d\n", - mvm->last_ant_isol, mvm->last_corun_lut); pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", notif->rrc_status & 0xF); pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", @@ -577,8 +574,6 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, IWL_MVM_BT_COEX_SYNC2SCO); pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", IWL_MVM_BT_COEX_MPLUT); - pos += scnprintf(buf + pos, bufsz - pos, "corunning = %d\n", - IWL_MVM_BT_COEX_CORUNNING); mutex_unlock(&mvm->mutex); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 8ff74fbb2562..54e5c8c6736b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -924,8 +924,6 @@ struct iwl_mvm { struct iwl_bt_coex_profile_notif last_bt_notif; struct iwl_bt_coex_ci_cmd last_bt_ci_cmd; - u32 last_ant_isol; - u8 last_corun_lut; u8 bt_tx_prio; enum iwl_bt_force_ant_mode bt_force_ant_mode; @@ -1175,13 +1173,6 @@ static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC); } -static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm) -{ - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && - IWL_MVM_BT_COEX_CORUNNING; -} - static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) { return fw_has_capa(&mvm->fw->ucode_capa, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 29a21a11c7f8..640881c8d703 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -256,8 +256,6 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, RX_HANDLER_ASYNC_LOCKED), - RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, - iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, iwl_mvm_window_status_notif, RX_HANDLER_SYNC), @@ -325,7 +323,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(INIT_COMPLETE_NOTIF), HCMD_NAME(PHY_CONTEXT_CMD), HCMD_NAME(DBG_CFG), - HCMD_NAME(ANTENNA_COUPLING_NOTIFICATION), HCMD_NAME(SCAN_CFG_CMD), HCMD_NAME(SCAN_REQ_UMAC), HCMD_NAME(SCAN_ABORT_UMAC), @@ -357,7 +354,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD), HCMD_NAME(HOT_SPOT_CMD), HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD), - HCMD_NAME(BT_COEX_UPDATE_CORUN_LUT), HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP), HCMD_NAME(BT_COEX_CI), HCMD_NAME(PHY_CONFIGURATION_CMD), -- cgit v1.2.3-55-g7522 From 00e0c6c8fe8833acfebebf0af53b48a965084fd4 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Mon, 24 Jul 2017 14:47:52 +0300 Subject: iwlwifi: mvm: consider RFKILL during INIT as success There's no need to differentiate an INIT that ended early because of RFKILL from one that succeded. Additionally, if INIT fails later, during calibration, due to RFKILL, we can just return success and continue as if we were already in RFKILL to start with. Remove this unnecessary differentiation and do some other small clean-ups while at it. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 38 +++++++++++++--------------- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 - 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 0099050f6e2b..ed18479a7b8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -475,13 +475,13 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT); if (ret) { IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret); - goto error; + goto remove_notif; } if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) { ret = iwl_mvm_send_bt_init_conf(mvm); if (ret) - goto error; + goto remove_notif; } /* Read the NVM only at driver load time, no need to do this twice */ @@ -490,7 +490,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_nvm_init(mvm, true); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); - goto error; + goto remove_notif; } } @@ -498,8 +498,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (mvm->nvm_file_name) iwl_mvm_load_nvm_to_nic(mvm); - ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); - WARN_ON(ret); + WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans)); /* * abort after reading the nvm in case RF Kill is on, we will complete @@ -508,9 +507,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "jump over all phy activities due to RF kill\n"); - iwl_remove_notification(&mvm->notif_wait, &calib_wait); - ret = 1; - goto out; + goto remove_notif; } mvm->calibrating = true; @@ -518,17 +515,13 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Send TX valid antennas before triggering calibrations */ ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) - goto error; + goto remove_notif; - /* - * Send phy configurations command to init uCode - * to start the 16.0 uCode init image internal calibrations. - */ ret = iwl_send_phy_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", ret); - goto error; + goto remove_notif; } /* @@ -536,15 +529,21 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) * just wait for the calibration complete notification. */ ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait, - MVM_UCODE_CALIB_TIMEOUT); + MVM_UCODE_CALIB_TIMEOUT); + if (!ret) + goto out; - if (ret && iwl_mvm_is_radio_hw_killed(mvm)) { + if (iwl_mvm_is_radio_hw_killed(mvm)) { IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n"); - ret = 1; + ret = 0; + } else { + IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n", + ret); } + goto out; -error: +remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: mvm->calibrating = false; @@ -1043,9 +1042,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); - /* this can't happen */ - if (WARN_ON(ret > 0)) - ret = -ERFKILL; return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 640881c8d703..76d256a9ad71 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -751,7 +751,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); mutex_unlock(&mvm->mutex); - /* returns 0 if successful, 1 if success but in rfkill */ if (err < 0) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err); goto out_free; -- cgit v1.2.3-55-g7522 From d98d94952ab8421909159e6a7e4a9c8ad2204753 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Mon, 24 Jul 2017 16:04:47 +0300 Subject: iwlwifi: call iwl_remove_notification from iwl_wait_notification The iwl_wait_notification() function removes the wait entry from the list. To make it clearer that it's doing the same thing as iwl_remove_notification(), call the latter instead of having duplicate code. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c | 25 +++++++++++----------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c index 29bb92e3df59..1096c945a68b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -161,6 +162,15 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, } IWL_EXPORT_SYMBOL(iwl_init_notification_wait); +void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, + struct iwl_notification_wait *wait_entry) +{ + spin_lock_bh(¬if_wait->notif_wait_lock); + list_del(&wait_entry->list); + spin_unlock_bh(¬if_wait->notif_wait_lock); +} +IWL_EXPORT_SYMBOL(iwl_remove_notification); + int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, struct iwl_notification_wait *wait_entry, unsigned long timeout) @@ -171,9 +181,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, wait_entry->triggered || wait_entry->aborted, timeout); - spin_lock_bh(¬if_wait->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(¬if_wait->notif_wait_lock); + iwl_remove_notification(notif_wait, wait_entry); if (wait_entry->aborted) return -EIO; @@ -184,12 +192,3 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, return 0; } IWL_EXPORT_SYMBOL(iwl_wait_notification); - -void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, - struct iwl_notification_wait *wait_entry) -{ - spin_lock_bh(¬if_wait->notif_wait_lock); - list_del(&wait_entry->list); - spin_unlock_bh(¬if_wait->notif_wait_lock); -} -IWL_EXPORT_SYMBOL(iwl_remove_notification); -- cgit v1.2.3-55-g7522 From fbfe378fe4e86fd3ff5ca491448da78c26a940ea Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 23 Jul 2017 13:59:47 +0300 Subject: iwlwifi: mvm: support new Coex firmware API The firmware now adds more information about time sharing with the Bluetooth core. Adapt the API structures and add the new fields in the debugfs hooks. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/coex.h | 31 ++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/fw/file.h | 3 +++ drivers/net/wireless/intel/iwlwifi/mvm/coex.c | 33 +++++++++++++++++++----- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 13 +++++++--- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 6 +++++ 5 files changed, 77 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index 2ba3ea4fa999..d9a74db01f90 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -178,6 +178,7 @@ enum iwl_bt_mxbox_dw3 { BT_MBOX(3, ACL_STATE, 3, 1), BT_MBOX(3, MSTR_STATE, 4, 1), BT_MBOX(3, OBX_STATE, 5, 1), + BT_MBOX(3, A2DP_SRC, 6, 1), BT_MBOX(3, OPEN_CON_2, 8, 2), BT_MBOX(3, TRAFFIC_LOAD, 10, 2), BT_MBOX(3, CHL_SEQN_LSB, 12, 1), @@ -187,6 +188,11 @@ enum iwl_bt_mxbox_dw3 { BT_MBOX(3, UPDATE_REQUEST, 21, 1), }; +enum iwl_bt_mxbox_dw4 { + BT_MBOX(4, ATS_BT_INTERVAL, 0, 7), + BT_MBOX(4, ATS_BT_ACTIVE_MAX_TH, 7, 7), +}; + #define BT_MBOX_MSG(_notif, _num, _field) \ ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ >> BT_MBOX##_num##_##_field##_POS) @@ -220,6 +226,31 @@ enum iwl_bt_ci_compliance { * @reserved: reserved */ struct iwl_bt_coex_profile_notif { + __le32 mbox_msg[8]; + __le32 msg_idx; + __le32 bt_ci_compliance; + + __le32 primary_ch_lut; + __le32 secondary_ch_lut; + __le32 bt_activity_grading; + u8 ttc_status; + u8 rrc_status; + __le16 reserved; +} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_5 */ + +/** + * struct iwl_bt_coex_profile_notif - notification about BT coex + * @mbox_msg: message from BT to WiFi + * @msg_idx: the index of the message + * @bt_ci_compliance: enum %iwl_bt_ci_compliance + * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type + * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type + * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading + * @ttc_status: is TTC enabled - one bit per PHY + * @rrc_status: is RRC enabled - one bit per PHY + * @reserved: reserved + */ +struct iwl_bt_coex_profile_notif_v4 { __le32 mbox_msg[4]; __le32 msg_idx; __le32 bt_ci_compliance; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index d933aa324ffe..a1cd2f41b026 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -246,6 +246,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t; * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement. * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2 * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used + * @IWL_UCODE_TLV_API_ATS_COEX_EXTERNAL: the coex notification is enlared to + * include information about ACL time sharing. * * @NUM_IWL_UCODE_TLV_API: number of bits used */ @@ -262,6 +264,7 @@ enum iwl_ucode_tlv_api { /* API Set 1 */ IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, + IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37, NUM_IWL_UCODE_TLV_API #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c index 890dbfff3a06..79c80f181f7d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c @@ -7,6 +7,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -512,17 +514,36 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data; + if (!iwl_mvm_has_new_ats_coex_api(mvm)) { + struct iwl_bt_coex_profile_notif_v4 *v4 = (void *)pkt->data; + + mvm->last_bt_notif.mbox_msg[0] = v4->mbox_msg[0]; + mvm->last_bt_notif.mbox_msg[1] = v4->mbox_msg[1]; + mvm->last_bt_notif.mbox_msg[2] = v4->mbox_msg[2]; + mvm->last_bt_notif.mbox_msg[3] = v4->mbox_msg[3]; + mvm->last_bt_notif.msg_idx = v4->msg_idx; + mvm->last_bt_notif.bt_ci_compliance = v4->bt_ci_compliance; + mvm->last_bt_notif.primary_ch_lut = v4->primary_ch_lut; + mvm->last_bt_notif.secondary_ch_lut = v4->secondary_ch_lut; + mvm->last_bt_notif.bt_activity_grading = + v4->bt_activity_grading; + mvm->last_bt_notif.ttc_status = v4->ttc_status; + mvm->last_bt_notif.rrc_status = v4->rrc_status; + } else { + /* save this notification for future use: rssi fluctuations */ + memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); + } + IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n"); - IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance); + IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", + mvm->last_bt_notif.bt_ci_compliance); IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n", - le32_to_cpu(notif->primary_ch_lut)); + le32_to_cpu(mvm->last_bt_notif.primary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n", - le32_to_cpu(notif->secondary_ch_lut)); + le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut)); IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n", - le32_to_cpu(notif->bt_activity_grading)); + le32_to_cpu(mvm->last_bt_notif.bt_activity_grading)); - /* remember this notification for future use: rssi fluctuations */ - memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif)); iwl_mvm_bt_coex_notif_handle(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 3d7b5bd7a6b7..c88a37397075 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -482,7 +482,8 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, true ? "\n" : ", "); static -int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, +int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm, + struct iwl_bt_coex_profile_notif *notif, char *buf, int pos, int bufsz) { pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); @@ -526,6 +527,7 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, BT_MBOX_PRINT(3, SCO_STATE, false); BT_MBOX_PRINT(3, SNIFF_STATE, false); BT_MBOX_PRINT(3, A2DP_STATE, false); + BT_MBOX_PRINT(3, A2DP_SRC, false); BT_MBOX_PRINT(3, ACL_STATE, false); BT_MBOX_PRINT(3, MSTR_STATE, false); BT_MBOX_PRINT(3, OBX_STATE, false); @@ -535,7 +537,12 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, BT_MBOX_PRINT(3, INBAND_P, false); BT_MBOX_PRINT(3, MSG_TYPE_2, false); BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); + BT_MBOX_PRINT(3, UPDATE_REQUEST, !iwl_mvm_has_new_ats_coex_api(mvm)); + + if (iwl_mvm_has_new_ats_coex_api(mvm)) { + BT_MBOX_PRINT(4, ATS_BT_INTERVAL, false); + BT_MBOX_PRINT(4, ATS_BT_ACTIVE_MAX_TH, true); + } return pos; } @@ -554,7 +561,7 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, mutex_lock(&mvm->mutex); - pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); + pos += iwl_mvm_coex_dump_mbox(mvm, notif, buf, pos, bufsz); pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", notif->bt_ci_compliance); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 54e5c8c6736b..d58de9b80886 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1242,6 +1242,12 @@ static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm) IWL_UCODE_TLV_API_NEW_RX_STATS); } +static inline bool iwl_mvm_has_new_ats_coex_api(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL); +} + static inline struct agg_tx_status * iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp) { -- cgit v1.2.3-55-g7522 From 4ecab5616023e742b70493cf0e90fc97e828d353 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 16 Jul 2017 12:28:05 +0300 Subject: iwlwifi: pcie: support short Tx queues for A000 device family This allows to modify TFD_TX_CMD_SLOTS to a power of 2 which is smaller than 256. Note that we still need to set values to wrap at 256 into the scheduler's write pointer, but all the rest of the code can use shorter transmit queues. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- .../net/wireless/intel/iwlwifi/pcie/ctxt-info.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 13 +++---- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | 41 +++++++++++----------- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 13 +++---- 6 files changed, 37 insertions(+), 36 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index eddaca76d514..3fc4343581ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -244,7 +244,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, ctxt_info->hcmd_cfg.cmd_queue_addr = cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = - TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX); + TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index f46871840fd2..79020cf8c79c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -661,10 +661,16 @@ static inline void iwl_pcie_sw_reset(struct iwl_trans *trans) usleep_range(5000, 6000); } +static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) +{ + return index & (q->n_window - 1); +} + static inline void *iwl_pcie_get_tfd(struct iwl_trans_pcie *trans_pcie, struct iwl_txq *txq, int idx) { - return txq->tfds + trans_pcie->tfd_size * idx; + return txq->tfds + trans_pcie->tfd_size * iwl_pcie_get_cmd_index(txq, + idx); } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) @@ -726,11 +732,6 @@ static inline bool iwl_queue_used(const struct iwl_txq *q, int i) !(i < q->read_ptr && i >= q->write_ptr); } -static inline u8 get_cmd_index(struct iwl_txq *q, u32 index) -{ - return index & (q->n_window - 1); -} - static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 351c4423125a..e5d2bf0bde37 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1176,7 +1176,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, sequence = le16_to_cpu(pkt->hdr.sequence); index = SEQ_TO_INDEX(sequence); - cmd_index = get_cmd_index(txq, index); + cmd_index = iwl_pcie_get_cmd_index(txq, index); if (rxq->id == 0) iwl_op_mode_rx(trans->op_mode, &rxq->napi, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 382d7c251066..3ecafa2ad922 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2835,7 +2835,7 @@ static struct iwl_trans_dump_data spin_lock_bh(&cmdq->lock); ptr = cmdq->write_ptr; for (i = 0; i < cmdq->n_window; i++) { - u8 idx = get_cmd_index(cmdq, ptr); + u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); u32 caplen, cmdlen; cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 4db45e56b6ba..d74613fcb756 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -88,14 +88,14 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; - int write_ptr = txq->write_ptr; + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; __le16 bc_ent; len = DIV_ROUND_UP(len, 4); - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) return; filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + @@ -111,7 +111,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[write_ptr] = bc_ent; + scd_bc_tbl->tfd_offset[idx] = bc_ent; } /* @@ -176,16 +176,12 @@ static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window */ - int rd_ptr = txq->read_ptr; - int idx = get_cmd_index(txq, rd_ptr); + int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); lockdep_assert_held(&txq->lock); - /* We have only q->n_window txq->entries, but we use - * TFD_QUEUE_SIZE_MAX tfds - */ iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_pcie_get_tfd(trans_pcie, txq, rd_ptr)); + iwl_pcie_get_tfd(trans_pcie, txq, idx)); /* free SKB */ if (txq->entries) { @@ -373,8 +369,9 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = - iwl_pcie_get_tfd(trans_pcie, txq, txq->write_ptr); + iwl_pcie_get_tfd(trans_pcie, txq, idx); dma_addr_t tb_phys; bool amsdu; int i, len, tb1_len, tb2_len, hdr_len; @@ -386,10 +383,10 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT); - tb_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); + tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); /* The first TB points to bi-directional DMA data */ if (!amsdu) - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); @@ -431,7 +428,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, * building the A-MSDU might have changed this data, so memcpy * it now */ - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); return tfd; } @@ -484,6 +481,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq = trans_pcie->txq[txq_id]; + int idx; void *tfd; if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), @@ -497,16 +495,18 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + /* Set up driver data for this TFD */ - txq->entries[txq->write_ptr].skb = skb; - txq->entries[txq->write_ptr].cmd = dev_cmd; + txq->entries[idx].skb = skb; + txq->entries[idx].cmd = dev_cmd; dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(txq->write_ptr))); + INDEX_TO_SEQ(idx))); /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->entries[txq->write_ptr].meta; + out_meta = &txq->entries[idx].meta; out_meta->flags = 0; tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); @@ -562,7 +562,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; - int idx, i, cmd_pos; + int i, cmd_pos, idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); u16 copy_size, cmd_size, tb0_size; bool had_nocopy = false; u8 group_id = iwl_cmd_groupid(cmd->id); @@ -651,7 +651,6 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - idx = get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; @@ -938,7 +937,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) txq_id, txq->read_ptr); if (txq_id != trans_pcie->cmd_queue) { - int idx = get_cmd_index(txq, txq->read_ptr); + int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb = txq->entries[idx].skb; if (WARN_ON_ONCE(!skb)) @@ -1070,7 +1069,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_QUEUE_SIZE_MAX)); + cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(TFD_TX_CMD_SLOTS)); ret = iwl_trans_send_cmd(trans, &hcmd); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index c893f9088f9d..c645d10d3707 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -106,7 +106,7 @@ static int iwl_queue_init(struct iwl_txq *q, int slots_num) q->n_window = slots_num; /* slots_num must be power-of-two size, otherwise - * get_cmd_index is broken. */ + * iwl_pcie_get_cmd_index is broken. */ if (WARN_ON(!is_power_of_2(slots_num))) return -EINVAL; @@ -428,7 +428,7 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) * idx is bounded by n_window */ int rd_ptr = txq->read_ptr; - int idx = get_cmd_index(txq, rd_ptr); + int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); lockdep_assert_held(&txq->lock); @@ -1100,7 +1100,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, for (; txq->read_ptr != tfd_num; txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { - struct sk_buff *skb = txq->entries[txq->read_ptr].skb; + int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb = txq->entries[idx].skb; if (WARN_ON_ONCE(!skb)) continue; @@ -1109,7 +1110,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, __skb_queue_tail(skbs, skb); - txq->entries[txq->read_ptr].skb = NULL; + txq->entries[idx].skb = NULL; if (!trans->cfg->use_tfh) iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); @@ -1559,7 +1560,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, goto free_dup_buf; } - idx = get_cmd_index(txq, txq->write_ptr); + idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; @@ -1751,7 +1752,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - cmd_index = get_cmd_index(txq, index); + cmd_index = iwl_pcie_get_cmd_index(txq, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; group_id = cmd->hdr.group_id; -- cgit v1.2.3-55-g7522 From 3e73aa3bf96619b51c80c73f06512f07fb2b5659 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 27 Jul 2017 09:40:16 +0300 Subject: iwlwifi: mvm: add command name for FRAME_RELEASE This name was missing in the list. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 76d256a9ad71..231878969332 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -384,6 +384,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), + HCMD_NAME(FRAME_RELEASE), HCMD_NAME(BA_NOTIF), HCMD_NAME(MCC_UPDATE_CMD), HCMD_NAME(MCC_CHUB_UPDATE_CMD), -- cgit v1.2.3-55-g7522 From 790e663433d839c5664408d3b1f066450e85c923 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 27 Jul 2017 09:45:10 +0300 Subject: iwlwifi: mvm: include more debug data when we get an unexpected baid When we get a valid baid in a received frame, we need to check that we are aware of this baid. If not, we check that the OLD_SN bit set. If that's not the case, we issue a WARNING. Print more data when that happens. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 6b8e57b7234a..4fbf102b3a98 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -636,8 +636,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, baid_data = rcu_dereference(mvm->baid_map[baid]); if (!baid_data) { WARN(!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN), - "Received baid %d, but no data exists for this BAID\n", - baid); + "Received baid %d, but no data exists for this BAID - reorder data 0x%x\n", + baid, reorder); return false; } @@ -758,7 +758,9 @@ static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, data = rcu_dereference(mvm->baid_map[baid]); if (!data) { - WARN_ON(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN)); + WARN(!(reorder_data & IWL_RX_MPDU_REORDER_BA_OLD_SN), + "OLD_SN isn't set, but no data exists for baid %d - reorder data 0x%x\n", + baid, reorder_data); goto out; } -- cgit v1.2.3-55-g7522 From 18f1755db781165bcda6b3c8d93b909af96bb5ab Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 26 Jul 2017 15:20:18 +0300 Subject: iwlwifi: mvm: group all dummy SAR function declarations together We have some of the SAR dummy functions when ACPI is not set declared in mvm.h and some declared in fw.c. Group them all together in fw.c for consistency and to avoid static/non-static issues. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 11 +++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 14 -------------- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index ed18479a7b8c..90ae50f7768a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -996,6 +996,17 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) { return 0; } + +int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, + int prof_b) +{ + return -ENOENT; +} + +int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) +{ + return -ENOENT; +} #endif /* CONFIG_ACPI */ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d58de9b80886..74fdd33fd9fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1825,21 +1825,7 @@ int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, u32 duration, u32 timeout); bool iwl_mvm_lqm_active(struct iwl_mvm *mvm); -#ifdef CONFIG_ACPI int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b); int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm); -#else -static inline -int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b) -{ - return -ENOENT; -} - -static inline -int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm) -{ - return -ENOENT; -} -#endif /* CONFIG_ACPI */ #endif /* __IWL_MVM_H__ */ -- cgit v1.2.3-55-g7522 From 7ccb498ca86c3700f391131b7c5eda9318b3b7db Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 1 Aug 2017 09:01:34 +0300 Subject: iwlwifi: mvm: use mvmsta consistently in rs.c We use mvmsta for the sta->drv_priv in mvm, but in rs.c we have a bunch of instances using sta_priv, which is probably due to it being copied from dvm. Change all occurrences to mvmsta for consistency with the rest of the driver Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 30 ++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 65beca3a457a..cdf10ce9dbea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1673,14 +1673,14 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct iwl_scale_tbl_info *tbl, enum rs_action scale_action) { - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || tbl->rate.index < IWL_RATE_MCS_5_INDEX || scale_action == RS_ACTION_DOWNSCALE) - sta_priv->tlc_amsdu = false; + mvmsta->tlc_amsdu = false; else - sta_priv->tlc_amsdu = true; + mvmsta->tlc_amsdu = true; } /* @@ -2228,11 +2228,11 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, u16 high_low; s32 sr; u8 prev_agg = lq_sta->is_agg; - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data; struct rs_rate *rate; - lq_sta->is_agg = !!sta_priv->agg_tids; + lq_sta->is_agg = !!mvmsta->agg_tids; /* * Select rate-scale / modulation-mode table to work with in @@ -2491,7 +2491,7 @@ lq_update: IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); if (tid != IWL_MAX_TID_COUNT) { - tid_data = &sta_priv->tid_data[tid]; + tid_data = &mvmsta->tid_data[tid]; if (tid_data->state != IWL_AGG_OFF) { IWL_DEBUG_RATE(mvm, "Stop aggregation on tid %d\n", @@ -2507,7 +2507,7 @@ lq_update: if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && (lq_sta->tx_agg_tid_en & (1 << tid)) && (tid != IWL_MAX_TID_COUNT)) { - tid_data = &sta_priv->tid_data[tid]; + tid_data = &mvmsta->tid_data[tid]; if (tid_data->state == IWL_AGG_OFF && !ndp) { IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", @@ -2900,10 +2900,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta, static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, gfp_t gfp) { - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate; struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; IWL_DEBUG_RATE(mvm, "create station rate scale window\n"); @@ -2917,7 +2917,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta, memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal)); lq_sta->pers.last_rssi = S8_MIN; - return &sta_priv->lq_sta; + return &mvmsta->lq_sta; } static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap, @@ -3109,8 +3109,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, struct ieee80211_hw *hw = mvm->hw; struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; - struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); - struct iwl_lq_sta *lq_sta = &sta_priv->lq_sta; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; struct ieee80211_supported_band *sband; unsigned long supp; /* must be unsigned long for for_each_set_bit */ @@ -3119,8 +3119,8 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, sband = hw->wiphy->bands[band]; - lq_sta->lq.sta_id = sta_priv->sta_id; - sta_priv->tlc_amsdu = false; + lq_sta->lq.sta_id = mvmsta->sta_id; + mvmsta->tlc_amsdu = false; for (j = 0; j < LQ_SIZE; j++) rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); @@ -3130,7 +3130,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, IWL_DEBUG_RATE(mvm, "LQ: *** rate scale station global init for station %d ***\n", - sta_priv->sta_id); + mvmsta->sta_id); /* TODO: what is a good starting rate for STA? About middle? Maybe not * the lowest or the highest rate.. Could consider using RSSI from * previous packets? Need to have IEEE 802.1X auth succeed immediately -- cgit v1.2.3-55-g7522 From f9cd3e0871b54e70d3ecfa89b76b2e9a7ac172af Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 2 Aug 2017 09:29:33 +0300 Subject: iwlwifi: mvm: update the firmware API in TX The firmware team is now re-using a bit that hasn't been used for a few generations. Re-use for TX_ON_AIR drop. This bit will be set by the firmware to indicate that a frame in an A-MPDU was dropped but not because of the already mapped reasons. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/tx.h | 9 +++------ drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index 4928310ddd31..14ad9fb895f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -409,7 +409,8 @@ enum iwl_tx_status { * @AGG_TX_STATE_BT_PRIO: * @AGG_TX_STATE_FEW_BYTES: * @AGG_TX_STATE_ABORT: - * @AGG_TX_STATE_LAST_SENT_TTL: + * @AGG_TX_STATE_TX_ON_AIR_DROP: TX_ON_AIR signal drop without underrun or + * BT detection * @AGG_TX_STATE_LAST_SENT_TRY_CNT: * @AGG_TX_STATE_LAST_SENT_BT_KILL: * @AGG_TX_STATE_SCD_QUERY: @@ -433,7 +434,7 @@ enum iwl_tx_agg_status { AGG_TX_STATE_BT_PRIO = 0x002, AGG_TX_STATE_FEW_BYTES = 0x004, AGG_TX_STATE_ABORT = 0x008, - AGG_TX_STATE_LAST_SENT_TTL = 0x010, + AGG_TX_STATE_TX_ON_AIR_DROP = 0x010, AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020, AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040, AGG_TX_STATE_SCD_QUERY = 0x080, @@ -445,10 +446,6 @@ enum iwl_tx_agg_status { AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS, }; -#define AGG_TX_STATE_LAST_SENT_MSK (AGG_TX_STATE_LAST_SENT_TTL| \ - AGG_TX_STATE_LAST_SENT_TRY_CNT| \ - AGG_TX_STATE_LAST_SENT_BT_KILL) - /* * The mask below describes a status where we are absolutely sure that the MPDU * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6d7d1a66af81..48f028366d51 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1515,7 +1515,7 @@ static const char *iwl_get_agg_tx_status(u16 status) AGG_TX_STATE_(BT_PRIO); AGG_TX_STATE_(FEW_BYTES); AGG_TX_STATE_(ABORT); - AGG_TX_STATE_(LAST_SENT_TTL); + AGG_TX_STATE_(TX_ON_AIR_DROP); AGG_TX_STATE_(LAST_SENT_TRY_CNT); AGG_TX_STATE_(LAST_SENT_BT_KILL); AGG_TX_STATE_(SCD_QUERY); -- cgit v1.2.3-55-g7522 From 3edfb5f44b942f04b4fbbddb24d8866bbfb5d1a4 Mon Sep 17 00:00:00 2001 From: Avraham Stern Date: Wed, 2 Aug 2017 12:13:20 +0300 Subject: iwlwifi: mvm: remove session protection to allow channel switch If a time event is already scheduled when trying to schedule one for channel switch, the code assumes the channel switch is already scheduled and no further action is required. However, it is possible that the scheduled time event is actually for session protection (e.g. when the first beacon after association contains the CSA IE). In this case the channel switch will not be scheduled which will finally lead to disconnection. Fix this by removing the old time event and schduling a new one for the channel switch. Signed-off-by: Avraham Stern Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 3 +- .../net/wireless/intel/iwlwifi/mvm/time-event.c | 34 ++++++++++++++++++++-- 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 2d1404c9fbf4..01143c491e53 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2023,8 +2023,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * We received a beacon from the associated AP so * remove the session protection. */ - iwl_mvm_remove_time_event(mvm, mvmvif, - &mvmvif->time_event_data); + iwl_mvm_stop_session_protection(mvm, vif); iwl_mvm_sf_update(mvm, vif, false); WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 65d8299108d5..4d0314912e94 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2017 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -726,8 +728,21 @@ void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; + u32 id; lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->time_event_lock); + id = te_data->id; + spin_unlock_bh(&mvm->time_event_lock); + + if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) { + IWL_DEBUG_TE(mvm, + "don't remove TE with id=%u (not session protection)\n", + id); + return; + } + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } @@ -859,8 +874,23 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); if (te_data->running) { - IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); - return -EBUSY; + u32 id; + + spin_lock_bh(&mvm->time_event_lock); + id = te_data->id; + spin_unlock_bh(&mvm->time_event_lock); + + if (id == TE_CHANNEL_SWITCH_PERIOD) { + IWL_DEBUG_TE(mvm, "CS period is already scheduled\n"); + return -EBUSY; + } + + /* + * Remove the session protection time event to allow the + * channel switch. If we got here, we just heard a beacon so + * the session protection is not needed anymore anyway. + */ + iwl_mvm_remove_time_event(mvm, mvmvif, te_data); } time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD); -- cgit v1.2.3-55-g7522 From 114db230d35442e6d3382adc8ac34c944b5cd0eb Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 6 Aug 2017 13:19:05 +0300 Subject: iwlwifi: mvm: don't send BAR on flushed frames When we flush a queue, the packets will have a 'failed' status but we shouldn't send a BAR. This check was missing. Because of that, when we got an ampdu_action with IEEE80211_AMPDU_TX_STOP_FLUSH, we started the following ping pong with the firmware: 1) Set the station as 'draining' 2) Get a failed Tx status (DRAINED) 3) Send a BAR because of the failed Tx status (loop of 2 and 3) This loop wasn't endless since the BAR isn't sent on a queue that would trigger a "nested" BAR. Signed-off-by: Emmanuel Grumbach Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 48f028366d51..52f9e8a76124 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1331,6 +1331,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + bool flushed = false; skb_freed++; @@ -1344,6 +1345,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, case TX_STATUS_DIRECT_DONE: info->flags |= IEEE80211_TX_STAT_ACK; break; + case TX_STATUS_FAIL_FIFO_FLUSHED: + case TX_STATUS_FAIL_DRAIN_FLOW: + flushed = true; + break; case TX_STATUS_FAIL_DEST_PS: /* the FW should have stopped the queue and not * return this status @@ -1366,7 +1371,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* Single frame failure in an AMPDU queue => send BAR */ if (info->flags & IEEE80211_TX_CTL_AMPDU && !(info->flags & IEEE80211_TX_STAT_ACK) && - !(info->flags & IEEE80211_TX_STAT_TX_FILTERED)) + !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed) info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; info->flags &= ~IEEE80211_TX_CTL_AMPDU; -- cgit v1.2.3-55-g7522 From d91c3fd0ceba90ed66949fccc856b1e8119a9ccd Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Sun, 6 Aug 2017 11:06:44 +0300 Subject: iwlwifi: move BT_MBOX_PRINT macro to common header Move the BT_MBOX_PRINT() macro from mvm/debugfs.c to fw/api/coex.h so it can be reused and remove duplicate definition of BT_MBOX_MSG(), keeping only the one already in coex.h. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/api/coex.h | 6 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 12 ------------ 2 files changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index d9a74db01f90..d09555afe2c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -197,6 +197,12 @@ enum iwl_bt_mxbox_dw4 { ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ >> BT_MBOX##_num##_##_field##_POS) +#define BT_MBOX_PRINT(_num, _field, _end) \ + pos += scnprintf(buf + pos, bufsz - pos, \ + "\t%s: %d%s", \ + #_field, \ + BT_MBOX_MSG(notif, _num, _field), \ + true ? "\n" : ", "); enum iwl_bt_activity_grading { BT_OFF = 0, BT_ON_NO_CONNECTION = 1, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index c88a37397075..e97904c2c4d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -469,18 +469,6 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } -#define BT_MBOX_MSG(_notif, _num, _field) \ - ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ - >> BT_MBOX##_num##_##_field##_POS) - - -#define BT_MBOX_PRINT(_num, _field, _end) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - "\t%s: %d%s", \ - #_field, \ - BT_MBOX_MSG(notif, _num, _field), \ - true ? "\n" : ", "); - static int iwl_mvm_coex_dump_mbox(struct iwl_mvm *mvm, struct iwl_bt_coex_profile_notif *notif, char *buf, -- cgit v1.2.3-55-g7522 From 5f5537ac3f0f7bd527d332aa166a009be833dfae Mon Sep 17 00:00:00 2001 From: João Paulo Rechi Vita Date: Thu, 3 Aug 2017 07:47:28 -0700 Subject: iwlwifi: Demote messages about fw flags size to info These messages are not reporting a real error, just the fact that the firmware knows about more flags than the driver. Currently these messages are presented to the user during boot if there is no bootsplash covering the console, even when booting the kernel with "quiet". Demoting it to the warn level helps having a clean boot process. Signed-off-by: João Paulo Rechi Vita Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index cdb765656115..bd3902f888e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -487,9 +487,9 @@ static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data, int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) { - IWL_ERR(drv, - "api flags index %d larger than supported by driver\n", - api_index); + IWL_WARN(drv, + "api flags index %d larger than supported by driver\n", + api_index); return; } @@ -508,9 +508,9 @@ static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data, int i; if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) { - IWL_ERR(drv, - "capa flags index %d larger than supported by driver\n", - api_index); + IWL_WARN(drv, + "capa flags index %d larger than supported by driver\n", + api_index); return; } -- cgit v1.2.3-55-g7522 From f5d8f50f271d1f80c2afd7eada1c91a863c87a06 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Tue, 8 Aug 2017 14:56:58 +0300 Subject: iwlwifi: mvm: Fix channel switch in case of count <= 1 The code did not consider the case that the channel switch counter is <= 1, which would result with an inaccurate calculation of the time event apply time. As the specification states that in case of counter == 0 the switch occurs at any time after the reception the frame, and for counter == 1 the switch would happens before the next TBTT, schedule the time event immediately. Signed-off-by: Ilan Peer Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 01143c491e53..cfabe302c9c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -3875,11 +3875,16 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, /* Schedule the time event to a bit before beacon 1, * to make sure we're in the new channel when the - * GO/AP arrives. + * GO/AP arrives. In case count <= 1 immediately schedule the + * TE (this might result with some packet loss or connection + * loss). */ - apply_time = chsw->device_timestamp + - ((vif->bss_conf.beacon_int * (chsw->count - 1) - - IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); + if (chsw->count <= 1) + apply_time = 0; + else + apply_time = chsw->device_timestamp + + ((vif->bss_conf.beacon_int * (chsw->count - 1) - + IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); if (chsw->block_tx) iwl_mvm_csa_client_absent(mvm, vif); -- cgit v1.2.3-55-g7522 From 5f19d6dd811b1bd4ceb0823d22ec00bbc479e379 Mon Sep 17 00:00:00 2001 From: Tzipi Peres Date: Tue, 25 Jul 2017 13:04:46 +0300 Subject: iwlwifi: distinguish different RF modules in A000 devices Newer versions of A000 devices come with two diffenent RF modules. The PCI_ID, the subsystem ID and the RF ID are identical in these two cases, so we need to differentiate them by using the CSR_HW_RF_ID register- in order to load the appropriate firmware. Signed-off-by: Tzipi Peres Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/cfg/a000.c | 34 ++++++++++++++++++++++--- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 4 ++- drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 7 ++++- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 23 ++++++++++++----- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 2 +- 5 files changed, 58 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index 40d67a5a2635..dcd35b5c9d24 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c @@ -76,13 +76,19 @@ #define IWL_A000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" #define IWL_A000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" #define IWL_A000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" +#define IWL_A000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" +#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" #define IWL_A000_JF_MODULE_FIRMWARE(api) \ IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" -#define IWL_A000_HR_QNJ_MODULE_FIRMWARE(api) \ +#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ + IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode" +#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ + IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 @@ -171,7 +177,7 @@ const struct iwl_cfg iwla000_2ax_cfg_hr = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; -const struct iwl_cfg iwla000_2ax_cfg_qnj_hr = { +const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0 = { .name = "Intel(R) Dual Band Wireless AX a000", .fw_name_pre = IWL_A000_HR_F0_FW_PRE, IWL_DEVICE_A000, @@ -181,6 +187,28 @@ const struct iwl_cfg iwla000_2ax_cfg_qnj_hr = { .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; +const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0 = { + .name = "Intel(R) Dual Band Wireless AX a000", + .fw_name_pre = IWL_A000_JF_B0_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0 = { + .name = "Intel(R) Dual Band Wireless AX a000", + .fw_name_pre = IWL_A000_HR_A0_FW_PRE, + IWL_DEVICE_A000, + .ht_params = &iwl_a000_ht_params, + .nvm_ver = IWL_A000_NVM_VERSION, + .nvm_calib_ver = IWL_A000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + MODULE_FIRMWARE(IWL_A000_HR_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_A000_JF_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_A000_HR_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_A000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 573dbeed3fbf..b82a3d0f64b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -463,7 +463,9 @@ extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; extern const struct iwl_cfg iwla000_2ax_cfg_hr; -extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr; +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_f0; +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_jf_b0; +extern const struct iwl_cfg iwla000_2ax_cfg_qnj_hr_a0; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 7d468ad7cb6a..b03e0f975b5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -354,11 +354,16 @@ enum { #define CSR_HW_REV_TYPE_135 (0x0000120) #define CSR_HW_REV_TYPE_7265D (0x0000210) #define CSR_HW_REV_TYPE_NONE (0x00001F0) +#define CSR_HW_REV_TYPE_QNJ (0x0000360) +#define CSR_HW_REV_TYPE_HR_CDB (0x0000340) /* RF_ID value */ #define CSR_HW_RF_ID_TYPE_JF (0x00105100) #define CSR_HW_RF_ID_TYPE_HR (0x0010A000) -#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109000) +#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00) + +/* HW_RF CHIP ID */ +#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 87712aeac31f..27d1eec1c899 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -690,12 +690,23 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) iwl_trans->cfg = cfg_7265d; } - if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb) { - if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_JF) - cfg = &iwla000_2ac_cfg_jf; - else if (iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) - cfg = &iwla000_2ac_cfg_hr; - + if (iwl_trans->cfg->rf_id && cfg == &iwla000_2ac_cfg_hr_cdb && + iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) { + u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id); + u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF); + u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR); + + if (rf_id_chp == jf_chp_id) { + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) + cfg = &iwla000_2ax_cfg_qnj_jf_b0; + else + cfg = &iwla000_2ac_cfg_jf; + } else if (rf_id_chp == hr_chp_id) { + if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ) + cfg = &iwla000_2ax_cfg_qnj_hr_a0; + else + cfg = &iwla000_2ac_cfg_hr; + } iwl_trans->cfg = cfg; } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 3ecafa2ad922..58873cc27396 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -3145,7 +3145,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); if (hw_status & UMAG_GEN_HW_IS_FPGA) - trans->cfg = &iwla000_2ax_cfg_qnj_hr; + trans->cfg = &iwla000_2ax_cfg_qnj_hr_f0; else trans->cfg = &iwla000_2ac_cfg_hr; } -- cgit v1.2.3-55-g7522 From 3f7a5e13e85026b6e460bbd6e87f87379421d272 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 16 Aug 2017 08:47:38 +0300 Subject: iwlwifi: pci: add new PCI ID for 7265D We have a new PCI subsystem ID for 7265D. Add it to the list. Cc: stable@vger.kernel.org Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 27d1eec1c899..5398a0917f06 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -430,6 +430,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)}, /* 8000 Series */ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, -- cgit v1.2.3-55-g7522 From b823cf3bae8111341212e698b94ab7293e7fb9f9 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 16 Aug 2017 10:46:52 +0300 Subject: iwlwifi: update channel flags parser There are some new flags in the channel flags that we don't know about. Also, the "WIDE" flag is very confusing, because it actually means 20MHz bandwidth, which is not very wide. Add the new flags and rename the confusing one. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 51 +++++++++++++--------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 5c08f4d40f6a..1172e4572a82 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -183,22 +183,26 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = { * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS * on same channel on 2.4 or same UNII band on 5.2 - * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?) - * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) - * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?) - * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?) + * @NVM_CHANNEL_UNIFORM: uniform spreading required + * @NVM_CHANNEL_20MHZ: 20 MHz channel okay + * @NVM_CHANNEL_40MHZ: 40 MHz channel okay + * @NVM_CHANNEL_80MHZ: 80 MHz channel okay + * @NVM_CHANNEL_160MHZ: 160 MHz channel okay + * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?) */ enum iwl_nvm_channel_flags { - NVM_CHANNEL_VALID = BIT(0), - NVM_CHANNEL_IBSS = BIT(1), - NVM_CHANNEL_ACTIVE = BIT(3), - NVM_CHANNEL_RADAR = BIT(4), - NVM_CHANNEL_INDOOR_ONLY = BIT(5), - NVM_CHANNEL_GO_CONCURRENT = BIT(6), - NVM_CHANNEL_WIDE = BIT(8), - NVM_CHANNEL_40MHZ = BIT(9), - NVM_CHANNEL_80MHZ = BIT(10), - NVM_CHANNEL_160MHZ = BIT(11), + NVM_CHANNEL_VALID = BIT(0), + NVM_CHANNEL_IBSS = BIT(1), + NVM_CHANNEL_ACTIVE = BIT(3), + NVM_CHANNEL_RADAR = BIT(4), + NVM_CHANNEL_INDOOR_ONLY = BIT(5), + NVM_CHANNEL_GO_CONCURRENT = BIT(6), + NVM_CHANNEL_UNIFORM = BIT(7), + NVM_CHANNEL_20MHZ = BIT(8), + NVM_CHANNEL_40MHZ = BIT(9), + NVM_CHANNEL_80MHZ = BIT(10), + NVM_CHANNEL_160MHZ = BIT(11), + NVM_CHANNEL_DC_HIGH = BIT(12), }; #define CHECK_AND_PRINT_I(x) \ @@ -327,7 +331,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, channel->flags = 0; IWL_DEBUG_EEPROM(dev, - "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", + "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", channel->hw_value, is_5ghz ? "5.2" : "2.4", ch_flags, @@ -337,10 +341,12 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, CHECK_AND_PRINT_I(RADAR), CHECK_AND_PRINT_I(INDOOR_ONLY), CHECK_AND_PRINT_I(GO_CONCURRENT), - CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(UNIFORM), + CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), + CHECK_AND_PRINT_I(DC_HIGH), channel->max_power, ((ch_flags & NVM_CHANNEL_IBSS) && !(ch_flags & NVM_CHANNEL_RADAR)) @@ -865,22 +871,25 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, prev_center_freq = center_freq; IWL_DEBUG_DEV(dev, IWL_DL_LAR, - "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", + "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x): %s\n", center_freq, band == NL80211_BAND_5GHZ ? "5.2" : "2.4", CHECK_AND_PRINT_I(VALID), + CHECK_AND_PRINT_I(IBSS), CHECK_AND_PRINT_I(ACTIVE), CHECK_AND_PRINT_I(RADAR), - CHECK_AND_PRINT_I(WIDE), + CHECK_AND_PRINT_I(INDOOR_ONLY), + CHECK_AND_PRINT_I(GO_CONCURRENT), + CHECK_AND_PRINT_I(UNIFORM), + CHECK_AND_PRINT_I(20MHZ), CHECK_AND_PRINT_I(40MHZ), CHECK_AND_PRINT_I(80MHZ), CHECK_AND_PRINT_I(160MHZ), - CHECK_AND_PRINT_I(INDOOR_ONLY), - CHECK_AND_PRINT_I(GO_CONCURRENT), + CHECK_AND_PRINT_I(DC_HIGH), ch_flags, ((ch_flags & NVM_CHANNEL_ACTIVE) && !(ch_flags & NVM_CHANNEL_RADAR)) - ? "" : "not "); + ? "Ad-Hoc" : ""); } regd->n_reg_rules = valid_rules; -- cgit v1.2.3-55-g7522 From 482e48440a0e0a6260d026b92de99034ac4d7b52 Mon Sep 17 00:00:00 2001 From: Gregory Greenman Date: Tue, 15 Aug 2017 12:27:01 +0300 Subject: iwlwifi: mvm: change open and close criteria of a BA session Tx BA session should be started according to the current throughput without any dependence on the internal rate scaling state. The criteria for opening a BA session will be 10 frames per second. Sending frequent del BAs can cause inter-op issues with some APs. We'll not close a BA session until we receive an explicit del BA from the peer. Signed-off-by: Gregory Greenman Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 117 ++++++++++++--------- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 7 ++ 3 files changed, 73 insertions(+), 52 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 753d4138e30f..976640fed334 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -136,6 +136,7 @@ #define IWL_MVM_RS_SR_NO_DECREASE 85 /* percent */ #define IWL_MVM_RS_AGG_TIME_LIMIT 4000 /* 4 msecs. valid 100-8000 */ #define IWL_MVM_RS_AGG_DISABLE_START 3 +#define IWL_MVM_RS_AGG_START_THRESHOLD 10 /* num frames per second */ #define IWL_MVM_RS_TPC_SR_FORCE_INCREASE 75 /* percent */ #define IWL_MVM_RS_TPC_SR_NO_INCREASE 85 /* percent */ #define IWL_MVM_RS_TPC_TX_POWER_STEP 3 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index cdf10ce9dbea..44c873082a31 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -622,7 +622,9 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid); - ret = ieee80211_start_tx_ba_session(sta, tid, 5000); + + /* start BA session until the peer sends del BA */ + ret = ieee80211_start_tx_ba_session(sta, tid, 0); if (ret == -EAGAIN) { /* * driver and mac80211 is out of sync @@ -636,15 +638,31 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm, return ret; } -static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, u8 tid, - struct iwl_lq_sta *lq_data, +static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, + u8 tid, struct iwl_lq_sta *lq_sta, struct ieee80211_sta *sta) { - if (tid < IWL_MAX_TID_COUNT) - rs_tl_turn_on_agg_for_tid(mvm, lq_data, tid, sta); - else + struct iwl_mvm_tid_data *tid_data; + + /* + * In AP mode, tid can be equal to IWL_MAX_TID_COUNT + * when the frame is not QoS + */ + if (WARN_ON_ONCE(tid > IWL_MAX_TID_COUNT)) { IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n", tid, IWL_MAX_TID_COUNT); + return; + } else if (tid == IWL_MAX_TID_COUNT) { + return; + } + + tid_data = &mvmsta->tid_data[tid]; + if ((tid_data->state == IWL_AGG_OFF) && + (lq_sta->tx_agg_tid_en & BIT(tid)) && + (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { + IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); + rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta); + } } static inline int get_num_of_ant_from_rate(u32 rate_n_flags) @@ -753,8 +771,38 @@ static int rs_collect_tpc_data(struct iwl_mvm *mvm, window); } +static void rs_update_tid_tpt_stats(struct iwl_mvm *mvm, + struct iwl_mvm_sta *mvmsta, + u8 tid, int successes) +{ + struct iwl_mvm_tid_data *tid_data; + + if (tid >= IWL_MAX_TID_COUNT) + return; + + tid_data = &mvmsta->tid_data[tid]; + + /* + * Measure if there're enough successful transmits per second. + * These statistics are used only to decide if we can start a + * BA session, so it should be updated only when A-MPDU is + * off. + */ + if (tid_data->state != IWL_AGG_OFF) + return; + + if (time_is_before_jiffies(tid_data->tpt_meas_start + HZ) || + (tid_data->tx_count >= IWL_MVM_RS_AGG_START_THRESHOLD)) { + tid_data->tx_count_last = tid_data->tx_count; + tid_data->tx_count = 0; + tid_data->tpt_meas_start = jiffies; + } else { + tid_data->tx_count += successes; + } +} + static int rs_collect_tlc_data(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, + struct iwl_mvm_sta *mvmsta, u8 tid, struct iwl_scale_tbl_info *tbl, int scale_index, int attempts, int successes) { @@ -764,12 +812,14 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm, return -EINVAL; if (tbl->column != RS_COLUMN_INVALID) { - struct lq_sta_pers *pers = &lq_sta->pers; + struct lq_sta_pers *pers = &mvmsta->lq_sta.pers; pers->tx_stats[tbl->column][scale_index].total += attempts; pers->tx_stats[tbl->column][scale_index].success += successes; } + rs_update_tid_tpt_stats(mvm, mvmsta, tid, successes); + /* Select window for current tx bit rate */ window = &(tbl->win[scale_index]); return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, @@ -1211,12 +1261,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (time_after(jiffies, (unsigned long)(lq_sta->last_tx + (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) { - int t; - IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n"); - for (t = 0; t < IWL_MAX_TID_COUNT; t++) - ieee80211_stop_tx_ba_session(sta, t); - iwl_mvm_rs_rate_init(mvm, sta, info->band, false); return; } @@ -1312,7 +1357,8 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (info->status.ampdu_ack_len == 0) info->status.ampdu_len = 1; - rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index, + rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, + lq_rate.index, info->status.ampdu_len, info->status.ampdu_ack_len); @@ -1351,7 +1397,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, lq_rate.index, 1, i < retries ? 0 : legacy_success, reduced_txp); - rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, + rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl, lq_rate.index, 1, i < retries ? 0 : legacy_success); } @@ -2229,7 +2275,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm, s32 sr; u8 prev_agg = lq_sta->is_agg; struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); - struct iwl_mvm_tid_data *tid_data; struct rs_rate *rate; lq_sta->is_agg = !!mvmsta->agg_tids; @@ -2480,44 +2525,12 @@ lq_update: } } + if (!ndp) + rs_tl_turn_on_agg(mvm, mvmsta, tid, lq_sta, sta); + if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) { - /* If the "active" (non-search) mode was legacy, - * and we've tried switching antennas, - * but we haven't been able to try HT modes (not available), - * stay with best antenna legacy modulation for a while - * before next round of mode comparisons. */ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); - if (is_legacy(&tbl1->rate)) { - IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); - - if (tid != IWL_MAX_TID_COUNT) { - tid_data = &mvmsta->tid_data[tid]; - if (tid_data->state != IWL_AGG_OFF) { - IWL_DEBUG_RATE(mvm, - "Stop aggregation on tid %d\n", - tid); - ieee80211_stop_tx_ba_session(sta, tid); - } - } - rs_set_stay_in_table(mvm, 1, lq_sta); - } else { - /* If we're in an HT mode, and all 3 mode switch actions - * have been tried and compared, stay in this best modulation - * mode for a while before next round of mode comparisons. */ - if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) && - (lq_sta->tx_agg_tid_en & (1 << tid)) && - (tid != IWL_MAX_TID_COUNT)) { - tid_data = &mvmsta->tid_data[tid]; - if (tid_data->state == IWL_AGG_OFF && !ndp) { - IWL_DEBUG_RATE(mvm, - "try to aggregate tid %d\n", - tid); - rs_tl_turn_on_agg(mvm, tid, - lq_sta, sta); - } - } - rs_set_stay_in_table(mvm, 0, lq_sta); - } + rs_set_stay_in_table(mvm, is_legacy(&tbl1->rate), lq_sta); } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 005037aa3122..d13893806513 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -316,6 +316,10 @@ enum iwl_mvm_agg_state { * @is_tid_active: has this TID sent traffic in the last * %IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this * field should be ignored. + * @tpt_meas_start: time of the throughput measurements start, is reset every HZ + * @tx_count_last: number of frames transmitted during the last second + * @tx_count: counts the number of frames transmitted since the last reset of + * tpt_meas_start */ struct iwl_mvm_tid_data { struct sk_buff_head deferred_tx_frames; @@ -330,6 +334,9 @@ struct iwl_mvm_tid_data { u16 ssn; u16 tx_time; bool is_tid_active; + unsigned long tpt_meas_start; + u32 tx_count_last; + u32 tx_count; }; struct iwl_mvm_key_pn { -- cgit v1.2.3-55-g7522 From 01a9c948a09348950515bf2abb6113ed83e696d8 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 15 Aug 2017 20:48:41 +0300 Subject: iwlwifi: add workaround to disable wide channels in 5GHz The OTP in some SKUs have erroneously allowed 40MHz and 80MHz channels in the 5.2GHz band. The firmware has been modified to not allow this in those SKUs, so the driver needs to do the same otherwise the firmware will assert when we try to use it. Cc: stable@vger.kernel.org Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/nvm.c | 3 +- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 62 ++++++++++++++++++---- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 3 +- 3 files changed, 56 insertions(+), 12 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c index ae03d0f5564f..e81f6dd3744e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c @@ -148,7 +148,8 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) rsp->regulatory.channel_profile, nvm->valid_tx_ant & fwrt->fw->valid_tx_ant, nvm->valid_rx_ant & fwrt->fw->valid_rx_ant, - rsp->regulatory.lar_enabled && lar_fw_supported); + rsp->regulatory.lar_enabled && lar_fw_supported, + false); iwl_free_resp(&hcmd); return nvm; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 1172e4572a82..ea165b3e6dd3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -79,6 +79,7 @@ /* NVM offsets (in words) definitions */ enum wkp_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ + SUBSYSTEM_ID = 0x0A, HW_ADDR = 0x15, /* NVM SW-Section offset (in words) definitions */ @@ -258,13 +259,12 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 * const nvm_ch_flags, - bool lar_supported) + bool lar_supported, bool no_wide_in_5ghz) { int ch_idx; int n_channels = 0; struct ieee80211_channel *channel; u16 ch_flags; - bool is_5ghz; int num_of_ch, num_2ghz_channels; const u8 *nvm_chan; @@ -279,12 +279,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, } for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { + bool is_5ghz = (ch_idx >= num_2ghz_channels); + ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx); - if (ch_idx >= num_2ghz_channels && - !data->sku_cap_band_52GHz_enable) + if (is_5ghz && !data->sku_cap_band_52GHz_enable) continue; + /* workaround to disable wide channels in 5GHz */ + if (no_wide_in_5ghz && is_5ghz) { + ch_flags &= ~(NVM_CHANNEL_40MHZ | + NVM_CHANNEL_80MHZ | + NVM_CHANNEL_160MHZ); + } + if (ch_flags & NVM_CHANNEL_160MHZ) data->vht160_supported = true; @@ -307,8 +315,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, n_channels++; channel->hw_value = nvm_chan[ch_idx]; - channel->band = (ch_idx < num_2ghz_channels) ? - NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; + channel->band = is_5ghz ? + NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; channel->center_freq = ieee80211_channel_to_frequency( channel->hw_value, channel->band); @@ -320,7 +328,6 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, * is not used in mvm, and is used for backwards compatibility */ channel->max_power = IWL_DEFAULT_MAX_TX_POWER; - is_5ghz = channel->band == NL80211_BAND_5GHZ; /* don't put limitations in case we're using LAR */ if (!lar_supported) @@ -438,14 +445,15 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported) + u8 tx_chains, u8 rx_chains, bool lar_supported, + bool no_wide_in_5ghz) { int n_channels; int n_used = 0; struct ieee80211_supported_band *sband; n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags, - lar_supported); + lar_supported, no_wide_in_5ghz); sband = &data->bands[NL80211_BAND_2GHZ]; sband->band = NL80211_BAND_2GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS]; @@ -651,6 +659,39 @@ static int iwl_set_hw_address(struct iwl_trans *trans, return 0; } +static bool +iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, + const __le16 *nvm_hw) +{ + /* + * Workaround a bug in Indonesia SKUs where the regulatory in + * some 7000-family OTPs erroneously allow wide channels in + * 5GHz. To check for Indonesia, we take the SKU value from + * bits 1-4 in the subsystem ID and check if it is either 5 or + * 9. In those cases, we need to force-disable wide channels + * in 5GHz otherwise the FW will throw a sysassert when we try + * to use them. + */ + if (cfg->device_family == IWL_DEVICE_FAMILY_7000) { + /* + * Unlike the other sections in the NVM, the hw + * section uses big-endian. + */ + u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw + + SUBSYSTEM_ID); + u8 sku = (subsystem_id & 0x1e) >> 1; + + if (sku == 5 || sku == 9) { + IWL_DEBUG_EEPROM(dev, + "disabling wide channels in 5GHz (0x%0x %d)\n", + subsystem_id, sku); + return true; + } + } + + return false; +} + struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, @@ -661,6 +702,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct device *dev = trans->dev; struct iwl_nvm_data *data; bool lar_enabled; + bool no_wide_in_5ghz = iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw); u32 sku, radio_cfg; u16 lar_config; const __le16 *ch_section; @@ -731,7 +773,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, } iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, - lar_fw_supported && lar_enabled); + lar_fw_supported && lar_enabled, no_wide_in_5ghz); data->calib_version = 255; return data; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 3fd6506a02ab..50d9b3eaa4f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -93,7 +93,8 @@ void iwl_set_hw_address_from_csr(struct iwl_trans *trans, */ void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, - u8 tx_chains, u8 rx_chains, bool lar_supported); + u8 tx_chains, u8 rx_chains, bool lar_supported, + bool no_wide_in_5ghz); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW -- cgit v1.2.3-55-g7522 From 8a0d53ce10053ce39b9512a054ab4c0bef2e2cc3 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 15 Aug 2017 23:12:39 +0300 Subject: iwlwifi: fw: fix lar_enabled endian problem in iwl_fw_get_nvm We read the regulatory.lar_enabled field in iwl_fw_get_nvm() and store it in nvm->lar_enabled, taking care of endianness. But then later we read it again to pass the value to iwl_init_sbands() without handling endianness. To solve this, simply reuse nvm->lar_enabled when calling that function. Fixes: e9e1ba3dbf00 ("iwlwifi: mvm: support getting nvm data from firmware") Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/fw/nvm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c index e81f6dd3744e..bd2e1fb43f5a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/nvm.c @@ -148,8 +148,7 @@ struct iwl_nvm_data *iwl_fw_get_nvm(struct iwl_fw_runtime *fwrt) rsp->regulatory.channel_profile, nvm->valid_tx_ant & fwrt->fw->valid_tx_ant, nvm->valid_rx_ant & fwrt->fw->valid_rx_ant, - rsp->regulatory.lar_enabled && lar_fw_supported, - false); + nvm->lar_enabled, false); iwl_free_resp(&hcmd); return nvm; -- cgit v1.2.3-55-g7522 From 5bd1d2c1ea9d0f8ca7ad0457c3a24fb91c9ca756 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 15 Aug 2017 18:46:44 +0300 Subject: iwlwifi: mvm: remove useless argument in iwl_nvm_init() We always call iwl_nvm_init() with read_nvm_from_nic == true, so this argument is useless. Remove it. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 5 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 98 ++++++++++++++-------------- 3 files changed, 51 insertions(+), 54 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 90ae50f7768a..3d65ab49a8a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -388,7 +388,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) } if (IWL_MVM_PARSE_NVM && read_nvm) { - ret = iwl_nvm_init(mvm, true); + ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto error; @@ -486,8 +486,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) /* Read the NVM only at driver load time, no need to do this twice */ if (read_nvm) { - /* Read nvm */ - ret = iwl_nvm_init(mvm, true); + ret = iwl_nvm_init(mvm); if (ret) { IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); goto remove_notif; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 74fdd33fd9fb..83303bac0e4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1373,7 +1373,7 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear); void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm); /* NVM */ -int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic); +int iwl_nvm_init(struct iwl_mvm *mvm); int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm); int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 08020386c3d4..5a6916f0b9ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -546,7 +546,7 @@ int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm) return ret; } -int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) +int iwl_nvm_init(struct iwl_mvm *mvm) { int ret, section; u32 size_read = 0; @@ -557,63 +557,61 @@ int iwl_nvm_init(struct iwl_mvm *mvm, bool read_nvm_from_nic) return -EINVAL; /* load NVM values from nic */ - if (read_nvm_from_nic) { - /* Read From FW NVM */ - IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); - - nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, - GFP_KERNEL); - if (!nvm_buffer) - return -ENOMEM; - for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { - /* we override the constness for initial read */ - ret = iwl_nvm_read_section(mvm, section, nvm_buffer, - size_read); - if (ret < 0) - continue; - size_read += ret; - temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); - if (!temp) { - ret = -ENOMEM; - break; - } + /* Read From FW NVM */ + IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n"); + + nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size, + GFP_KERNEL); + if (!nvm_buffer) + return -ENOMEM; + for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) { + /* we override the constness for initial read */ + ret = iwl_nvm_read_section(mvm, section, nvm_buffer, + size_read); + if (ret < 0) + continue; + size_read += ret; + temp = kmemdup(nvm_buffer, ret, GFP_KERNEL); + if (!temp) { + ret = -ENOMEM; + break; + } - iwl_mvm_nvm_fixups(mvm, section, temp, ret); + iwl_mvm_nvm_fixups(mvm, section, temp, ret); - mvm->nvm_sections[section].data = temp; - mvm->nvm_sections[section].length = ret; + mvm->nvm_sections[section].data = temp; + mvm->nvm_sections[section].length = ret; #ifdef CONFIG_IWLWIFI_DEBUGFS - switch (section) { - case NVM_SECTION_TYPE_SW: - mvm->nvm_sw_blob.data = temp; - mvm->nvm_sw_blob.size = ret; - break; - case NVM_SECTION_TYPE_CALIBRATION: - mvm->nvm_calib_blob.data = temp; - mvm->nvm_calib_blob.size = ret; - break; - case NVM_SECTION_TYPE_PRODUCTION: - mvm->nvm_prod_blob.data = temp; - mvm->nvm_prod_blob.size = ret; - break; - case NVM_SECTION_TYPE_PHY_SKU: - mvm->nvm_phy_sku_blob.data = temp; - mvm->nvm_phy_sku_blob.size = ret; + switch (section) { + case NVM_SECTION_TYPE_SW: + mvm->nvm_sw_blob.data = temp; + mvm->nvm_sw_blob.size = ret; + break; + case NVM_SECTION_TYPE_CALIBRATION: + mvm->nvm_calib_blob.data = temp; + mvm->nvm_calib_blob.size = ret; + break; + case NVM_SECTION_TYPE_PRODUCTION: + mvm->nvm_prod_blob.data = temp; + mvm->nvm_prod_blob.size = ret; + break; + case NVM_SECTION_TYPE_PHY_SKU: + mvm->nvm_phy_sku_blob.data = temp; + mvm->nvm_phy_sku_blob.size = ret; + break; + default: + if (section == mvm->cfg->nvm_hw_section_num) { + mvm->nvm_hw_blob.data = temp; + mvm->nvm_hw_blob.size = ret; break; - default: - if (section == mvm->cfg->nvm_hw_section_num) { - mvm->nvm_hw_blob.data = temp; - mvm->nvm_hw_blob.size = ret; - break; - } } -#endif } - if (!size_read) - IWL_ERR(mvm, "OTP is blank\n"); - kfree(nvm_buffer); +#endif } + if (!size_read) + IWL_ERR(mvm, "OTP is blank\n"); + kfree(nvm_buffer); /* Only if PNVM selected in the mod param - load external NVM */ if (mvm->nvm_file_name) { -- cgit v1.2.3-55-g7522 From ee4fe54024f071976120dce71aefe056dfab4eb7 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 15 Aug 2017 19:24:59 +0300 Subject: iwlwifi: mvm: remove useless check for mvm->cfg in iwl_parse_nvm_section() At this point we have already copied the cfg pointer to mvm and we have been dereferencing this pointer many times before, so it will never be NULL or we would have crashed. Remove the useless check. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 5a6916f0b9ec..b05673e4a193 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -326,9 +326,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) } } - if (WARN_ON(!mvm->cfg)) - return NULL; - hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; -- cgit v1.2.3-55-g7522 From 8fe34b060a4b11be35ffd9383acdd0bc7bee72e2 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Thu, 17 Aug 2017 18:51:56 +0300 Subject: iwlwifi: use big-endian for the hw section of the nvm Unlike the other sections of the NVM, the hw section is in big-endian. To read a value from it, we had to cast it to __be16. Fix that by using __be16 * for the entire section. Signed-off-by: Luca Coelho --- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 11 +++++------ drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 5 +++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index ea165b3e6dd3..aa382f719988 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -582,7 +582,7 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, - const __le16 *nvm_hw) + const __be16 *nvm_hw) { const u8 *hw_addr; @@ -629,7 +629,7 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, static int iwl_set_hw_address(struct iwl_trans *trans, const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_hw, + struct iwl_nvm_data *data, const __be16 *nvm_hw, const __le16 *mac_override) { if (cfg->mac_addr_from_csr) { @@ -661,7 +661,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans, static bool iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, - const __le16 *nvm_hw) + const __be16 *nvm_hw) { /* * Workaround a bug in Indonesia SKUs where the regulatory in @@ -677,8 +677,7 @@ iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, * Unlike the other sections in the NVM, the hw * section uses big-endian. */ - u16 subsystem_id = be16_to_cpup((const __be16 *)nvm_hw - + SUBSYSTEM_ID); + u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID); u8 sku = (subsystem_id & 0x1e) >> 1; if (sku == 5 || sku == 9) { @@ -694,7 +693,7 @@ iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const __le16 *nvm_hw, const __le16 *nvm_sw, + const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 50d9b3eaa4f8..2d1a24dd8410 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -77,7 +77,7 @@ */ struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const __le16 *nvm_hw, const __le16 *nvm_sw, + const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index b05673e4a193..422aa6be9932 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -292,7 +292,8 @@ static struct iwl_nvm_data * iwl_parse_nvm_sections(struct iwl_mvm *mvm) { struct iwl_nvm_section *sections = mvm->nvm_sections; - const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; + const __be16 *hw; + const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; bool lar_enabled; /* Checking for required sections */ @@ -326,7 +327,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) } } - hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; + hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; -- cgit v1.2.3-55-g7522 From 70535350e26f9bf8c21de0300728f17f61cdcf77 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Thu, 17 Aug 2017 23:11:25 -0700 Subject: liquidio: with embedded f/w, don't reload f/w, issue pf flr at exit 1. Add support for PF FLR when exiting (enables CORE_DRV_ACTIVE upon next driver init) 2. Skip some initialization (don't try to load f/w, activate consoles). Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 70 ++++++++++++++++--------- 1 file changed, 46 insertions(+), 24 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 0eea6a2d0200..bd67980b5462 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1121,6 +1121,33 @@ static bool fw_type_is_none(void) sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; } +/** + * \brief PCI FLR for each Octeon device. + * @param oct octeon device + */ +static void octeon_pci_flr(struct octeon_device *oct) +{ + int rc; + + pci_save_state(oct->pci_dev); + + pci_cfg_access_lock(oct->pci_dev); + + /* Quiesce the device completely */ + pci_write_config_word(oct->pci_dev, PCI_COMMAND, + PCI_COMMAND_INTX_DISABLE); + + rc = __pci_reset_function_locked(oct->pci_dev); + + if (rc != 0) + dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", + rc, oct->pf_num); + + pci_cfg_access_unlock(oct->pci_dev); + + pci_restore_state(oct->pci_dev); +} + /** *\brief Destroy resources associated with octeon device * @param pdev PCI device structure @@ -1269,14 +1296,16 @@ static void octeon_destroy_resources(struct octeon_device *oct) case OCT_DEV_PCI_MAP_DONE: refcount = octeon_deregister_device(oct); - if (!fw_type_is_none()) { - /* Soft reset the octeon device before exiting. - * Implementation note: here, we reset the device - * if it is a CN6XXX OR the last CN23XX device. - */ - if (OCTEON_CN6XXX(oct) || !refcount) - oct->fn_list.soft_reset(oct); - } + /* Soft reset the octeon device before exiting. + * However, if fw was loaded from card (i.e. autoboot), + * perform an FLR instead. + * Implementation note: only soft-reset the device + * if it is a CN6XXX OR the LAST CN23XX device. + */ + if (fw_type_is_none()) + octeon_pci_flr(oct); + else if (OCTEON_CN6XXX(oct) || !refcount) + oct->fn_list.soft_reset(oct); octeon_unmap_pci_barx(oct, 0); octeon_unmap_pci_barx(oct, 1); @@ -1912,11 +1941,6 @@ static int load_firmware(struct octeon_device *oct) char fw_name[LIO_MAX_FW_FILENAME_LEN]; char *tmp_fw_type; - if (fw_type_is_none()) { - dev_info(&oct->pci_dev->dev, "Skipping firmware load\n"); - return ret; - } - if (fw_type[0] == '\0') tmp_fw_type = LIO_FW_NAME_TYPE_NIC; else @@ -3900,18 +3924,16 @@ static int octeon_device_init(struct octeon_device *octeon_dev) octeon_dev->app_mode = CVM_DRV_INVALID_APP; if (OCTEON_CN23XX_PF(octeon_dev)) { - if (!cn23xx_fw_loaded(octeon_dev)) { + if (!cn23xx_fw_loaded(octeon_dev) && !fw_type_is_none()) { fw_loaded = 0; - if (!fw_type_is_none()) { - /* Do a soft reset of the Octeon device. */ - if (octeon_dev->fn_list.soft_reset(octeon_dev)) - return 1; - /* things might have changed */ - if (!cn23xx_fw_loaded(octeon_dev)) - fw_loaded = 0; - else - fw_loaded = 1; - } + /* Do a soft reset of the Octeon device. */ + if (octeon_dev->fn_list.soft_reset(octeon_dev)) + return 1; + /* things might have changed */ + if (!cn23xx_fw_loaded(octeon_dev)) + fw_loaded = 0; + else + fw_loaded = 1; } else { fw_loaded = 1; } -- cgit v1.2.3-55-g7522 From 3c57f61501c4006f558fc804e0bed11705a3ed45 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Thu, 17 Aug 2017 23:11:30 -0700 Subject: liquidio: with embedded f/w, issue droq credits before enablement 1. Issue credits BEFORE enabling DROQ's; this prevents PKTPF_ERR interrupt. Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index bd67980b5462..268ba5215bdd 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -4049,6 +4049,18 @@ static int octeon_device_init(struct octeon_device *octeon_dev) atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); + /* Send Credit for Octeon Output queues. Credits are always sent BEFORE + * the output queue is enabled. + * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in + * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. + * Otherwise, it is possible that the DRV_ACTIVE message will be sent + * before any credits have been issued, causing the ring to be reset + * (and the f/w appear to never have started). + */ + for (j = 0; j < octeon_dev->num_oqs; j++) + writel(octeon_dev->droq[j]->max_count, + octeon_dev->droq[j]->pkts_credit_reg); + /* Enable the input and output queues for this Octeon device */ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); if (ret) { @@ -4133,14 +4145,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev) atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); - /* Send Credit for Octeon Output queues. Credits are always sent after - * the output queue is enabled. - */ - for (j = 0; j < octeon_dev->num_oqs; j++) - writel(octeon_dev->droq[j]->max_count, - octeon_dev->droq[j]->pkts_credit_reg); - - /* Packets can start arriving on the output queues from this point. */ return 0; } -- cgit v1.2.3-55-g7522 From ae2b27b859a144f503d382580320873c0beb09c7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 18 Aug 2017 10:27:02 +0300 Subject: bpf: fix a return in sockmap_get_from_fd() "map" is a valid pointer. We wanted to return "err" instead. Also let's return a zero literal at the end. Fixes: 174a79ff9515 ("bpf: sockmap with sk redirect support") Signed-off-by: Dan Carpenter Acked-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/syscall.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index d2f2bdf71ffa..b8cb1b3c9bfb 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1125,11 +1125,11 @@ static int sockmap_get_from_fd(const union bpf_attr *attr, int ptype) fdput(f); bpf_prog_put(prog1); bpf_prog_put(prog2); - return PTR_ERR(map); + return err; } fdput(f); - return err; + return 0; } static int bpf_prog_attach(const union bpf_attr *attr) -- cgit v1.2.3-55-g7522 From 7286384bcf982242fd439d27a071e8cba7b2cfd6 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 18 Aug 2017 10:19:04 +0200 Subject: s390/qeth: split L2 xmit paths l2_hard_start_xmit() actually doesn't contain much shared code, and having device-specific paths makes isolated changes a lot easier. So split it into three routines for IQD, OSN and OSD/OSM/OSX. No functional change. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 225 ++++++++++++++++++++++------------------ 1 file changed, 123 insertions(+), 102 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 438a7f29e99f..310bfa225e20 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -676,143 +676,164 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } -static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type) { + unsigned int data_offset = ETH_HLEN; + struct qeth_hdr *hdr; int rc; - struct qeth_hdr *hdr = NULL; - int elements = 0; - struct qeth_card *card = dev->ml_priv; - struct sk_buff *new_skb = skb; - int cast_type = qeth_l2_get_cast_type(card, skb); - struct qeth_qdio_out_q *queue; - int tx_bytes = skb->len; - int data_offset = -1; - int elements_needed = 0; - int hd_len = 0; - unsigned int nr_frags; - if (card->qdio.do_prio_queueing || (cast_type && - card->info.is_multicast_different)) - queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, - qeth_get_ip_version(skb), cast_type)]; - else - queue = card->qdio.out_qs[card->qdio.default_out_queue]; + hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!hdr) + return -ENOMEM; + qeth_l2_fill_header(card, hdr, skb, cast_type); + hdr->hdr.l2.pkt_length = skb->len; + skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr), + data_offset); - if ((card->state != CARD_STATE_UP) || !card->lan_online) { - card->stats.tx_carrier_errors++; - goto tx_drop; + if (!qeth_get_elements_no(card, skb, 1, data_offset)) { + rc = -E2BIG; + goto out; } + rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset, + data_offset); +out: + if (rc) + kmem_cache_free(qeth_core_header_cache, hdr); + return rc; +} - if ((card->info.type == QETH_CARD_TYPE_OSN) && - (skb->protocol == htons(ETH_P_IPV6))) - goto tx_drop; - - if (card->options.performance_stats) { - card->perf_stats.outbound_cnt++; - card->perf_stats.outbound_start_time = qeth_get_micros(); - } - netif_stop_queue(dev); +static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type) +{ + unsigned int elements, nr_frags; + struct sk_buff *skb_copy; + struct qeth_hdr *hdr; + int rc; /* fix hardware limitation: as long as we do not have sbal * chaining we can not send long frag lists */ - if ((card->info.type != QETH_CARD_TYPE_IQD) && - !qeth_get_elements_no(card, new_skb, 0, 0)) { - int lin_rc = skb_linearize(new_skb); + if (!qeth_get_elements_no(card, skb, 0, 0)) { + rc = skb_linearize(skb); if (card->options.performance_stats) { - if (lin_rc) + if (rc) card->perf_stats.tx_linfail++; else card->perf_stats.tx_lin++; } - if (lin_rc) - goto tx_drop; + if (rc) + return rc; } - nr_frags = skb_shinfo(new_skb)->nr_frags; + nr_frags = skb_shinfo(skb)->nr_frags; - if (card->info.type == QETH_CARD_TYPE_OSN) - hdr = (struct qeth_hdr *)skb->data; - else { - if (card->info.type == QETH_CARD_TYPE_IQD) { - new_skb = skb; - data_offset = ETH_HLEN; - hd_len = ETH_HLEN; - hdr = kmem_cache_alloc(qeth_core_header_cache, - GFP_ATOMIC); - if (!hdr) - goto tx_drop; - elements_needed++; - qeth_l2_fill_header(card, hdr, new_skb, cast_type); - hdr->hdr.l2.pkt_length = new_skb->len; - skb_copy_from_linear_data(new_skb, - ((char *)hdr) + sizeof(*hdr), - ETH_HLEN); - } else { - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, - sizeof(struct qeth_hdr)); - if (!new_skb) - goto tx_drop; - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - qeth_l2_fill_header(card, hdr, new_skb, cast_type); - if (new_skb->ip_summed == CHECKSUM_PARTIAL) - qeth_l2_hdr_csum(card, hdr, new_skb); - } - } + /* create a copy with writeable headroom */ + skb_copy = skb_realloc_headroom(skb, sizeof(struct qeth_hdr)); + if (!skb_copy) + return -ENOMEM; + hdr = skb_push(skb_copy, sizeof(struct qeth_hdr)); + qeth_l2_fill_header(card, hdr, skb_copy, cast_type); + if (skb_copy->ip_summed == CHECKSUM_PARTIAL) + qeth_l2_hdr_csum(card, hdr, skb_copy); - elements = qeth_get_elements_no(card, new_skb, elements_needed, - (data_offset > 0) ? data_offset : 0); + elements = qeth_get_elements_no(card, skb_copy, 0, 0); if (!elements) { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - goto tx_drop; + rc = -E2BIG; + goto out; } - - if (card->info.type != QETH_CARD_TYPE_IQD) { - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, - sizeof(struct qeth_hdr_layer2))) - goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, - elements); - } else - rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - data_offset, hd_len); + if (qeth_hdr_chk_and_bounce(skb_copy, &hdr, sizeof(*hdr))) { + rc = -EINVAL; + goto out; + } + rc = qeth_do_send_packet(card, queue, skb_copy, hdr, elements); +out: if (!rc) { - card->stats.tx_packets++; - card->stats.tx_bytes += tx_bytes; + /* tx success, free dangling original */ + dev_kfree_skb_any(skb); if (card->options.performance_stats && nr_frags) { card->perf_stats.sg_skbs_sent++; /* nr_frags + skb->data */ card->perf_stats.sg_frags_sent += nr_frags + 1; } - if (new_skb != skb) - dev_kfree_skb_any(skb); - rc = NETDEV_TX_OK; } else { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); + /* tx fail, free copy */ + dev_kfree_skb_any(skb_copy); + } + return rc; +} - if (rc == -EBUSY) { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - return NETDEV_TX_BUSY; - } else - goto tx_drop; +static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue) +{ + unsigned int elements; + struct qeth_hdr *hdr; + + if (skb->protocol == htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + hdr = (struct qeth_hdr *)skb->data; + elements = qeth_get_elements_no(card, skb, 0, 0); + if (!elements) + return -E2BIG; + if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) + return -EINVAL; + return qeth_do_send_packet(card, queue, skb, hdr, elements); +} + +static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + int cast_type = qeth_l2_get_cast_type(card, skb); + struct qeth_qdio_out_q *queue; + int tx_bytes = skb->len; + int rc; + + if (card->qdio.do_prio_queueing || (cast_type && + card->info.is_multicast_different)) + queue = card->qdio.out_qs[qeth_get_priority_queue(card, skb, + qeth_get_ip_version(skb), cast_type)]; + else + queue = card->qdio.out_qs[card->qdio.default_out_queue]; + + if ((card->state != CARD_STATE_UP) || !card->lan_online) { + card->stats.tx_carrier_errors++; + goto tx_drop; } - netif_wake_queue(dev); - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; - return rc; + if (card->options.performance_stats) { + card->perf_stats.outbound_cnt++; + card->perf_stats.outbound_start_time = qeth_get_micros(); + } + netif_stop_queue(dev); + + switch (card->info.type) { + case QETH_CARD_TYPE_OSN: + rc = qeth_l2_xmit_osn(card, skb, queue); + break; + case QETH_CARD_TYPE_IQD: + rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type); + break; + default: + rc = qeth_l2_xmit_osa(card, skb, queue, cast_type); + } + + if (!rc) { + card->stats.tx_packets++; + card->stats.tx_bytes += tx_bytes; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ tx_drop: card->stats.tx_dropped++; card->stats.tx_errors++; - if ((new_skb != skb) && new_skb) - dev_kfree_skb_any(new_skb); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; -- cgit v1.2.3-55-g7522 From ae79fe03aed71db0771bcb8397425b9de502cf6c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 18 Aug 2017 10:19:05 +0200 Subject: s390/qeth: pass full data length to l2_fill_header() For IQD we already need to fix up the qeth_hdr's length field, and future changes will require more flexibility for OSA as well. The device-specific path knows best what header length it requires, so just pass it from there. While at it, remove the unused qeth_card parameter. No functional change. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 310bfa225e20..3f5b852408d3 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -259,13 +259,14 @@ static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, card->perf_stats.tx_csum++; } -static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int cast_type) +static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, + int cast_type, unsigned int data_len) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + hdr->hdr.l2.pkt_length = data_len; /* set byte byte 3 to casting flags */ if (cast_type == RTN_MULTICAST) @@ -275,7 +276,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, else hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; - hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr); /* VSWITCH relies on the VLAN * information to be present in * the QDIO header */ @@ -686,8 +686,7 @@ static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) return -ENOMEM; - qeth_l2_fill_header(card, hdr, skb, cast_type); - hdr->hdr.l2.pkt_length = skb->len; + qeth_l2_fill_header(hdr, skb, cast_type, skb->len); skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr), data_offset); @@ -733,7 +732,8 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, if (!skb_copy) return -ENOMEM; hdr = skb_push(skb_copy, sizeof(struct qeth_hdr)); - qeth_l2_fill_header(card, hdr, skb_copy, cast_type); + qeth_l2_fill_header(hdr, skb_copy, cast_type, + skb_copy->len - sizeof(*hdr)); if (skb_copy->ip_summed == CHECKSUM_PARTIAL) qeth_l2_hdr_csum(card, hdr, skb_copy); -- cgit v1.2.3-55-g7522 From 13ddacb526ff17f45b09d6e02d7270e6bb3acaf8 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 18 Aug 2017 10:19:06 +0200 Subject: s390/qeth: pass TSO header length to fill_buffer() The TSO code already calculates the length of its header element, no need to duplicate this in the low-level code again. Use this opportunity to make hd_len unsigned, and for TSO match its calculation to what tso_fill_header() does. No functional change. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 7 ++++--- drivers/s390/net/qeth_core_main.c | 18 ++++++++---------- drivers/s390/net/qeth_l2_main.c | 4 ++-- drivers/s390/net/qeth_l3_main.c | 15 +++++++++------ 4 files changed, 23 insertions(+), 21 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 4a4ca5cb37a0..2f5673812810 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -949,9 +949,10 @@ int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, - int hd_len); -int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int); + unsigned int hd_len); +int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int hd_len, int elements); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_core_get_sset_count(struct net_device *, int); void qeth_core_get_ethtool_stats(struct net_device *, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 415424e618ad..6cafeceea3ce 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3956,11 +3956,11 @@ static void __qeth_fill_buffer(struct sk_buff *skb, static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int offset, int hd_len) + unsigned int offset, unsigned int hd_len) { struct qdio_buffer *buffer; - int flush_cnt = 0, hdr_len; bool is_first_elem = true; + int flush_cnt = 0; buffer = buf->buffer; refcount_inc(&skb->users); @@ -3970,14 +3970,12 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, int element = buf->next_element_to_fill; is_first_elem = false; - hdr_len = sizeof(struct qeth_hdr_tso) + - ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len; /*fill first buffer entry only with header information */ buffer->element[element].addr = skb->data; - buffer->element[element].length = hdr_len; + buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->next_element_to_fill++; - skb_pull(skb, hdr_len); + skb_pull(skb, hd_len); } /* IQD */ @@ -4020,7 +4018,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, - int hd_len) + unsigned int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; @@ -4050,8 +4048,8 @@ out: EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, - struct sk_buff *skb, struct qeth_hdr *hdr, - int elements_needed) + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int hd_len, int elements_needed) { struct qeth_qdio_out_buffer *buffer; int start_index; @@ -4100,7 +4098,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } } - tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, 0); + tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, hd_len); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 3f5b852408d3..c78d9fadb9c8 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -746,7 +746,7 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, rc = -EINVAL; goto out; } - rc = qeth_do_send_packet(card, queue, skb_copy, hdr, elements); + rc = qeth_do_send_packet(card, queue, skb_copy, hdr, 0, elements); out: if (!rc) { /* tx success, free dangling original */ @@ -778,7 +778,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, return -E2BIG; if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) return -EINVAL; - return qeth_do_send_packet(card, queue, skb, hdr, elements); + return qeth_do_send_packet(card, queue, skb, hdr, 0, elements); } static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0a3dc14a1381..fa8b638e3842 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2637,6 +2637,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_get_priority_queue(card, skb, ipv, cast_type) : card->qdio.default_out_queue]; int tx_bytes = skb->len; + unsigned int hd_len = 0; bool use_tso; int data_offset = -1; unsigned int nr_frags; @@ -2756,16 +2757,18 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (card->info.type != QETH_CARD_TYPE_IQD) { int len; - if (use_tso) - len = ((unsigned long)tcp_hdr(new_skb) + - tcp_hdrlen(new_skb)) - - (unsigned long)new_skb->data; - else + if (use_tso) { + hd_len = sizeof(struct qeth_hdr_tso) + + ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); + len = hd_len; + } else { len = sizeof(struct qeth_hdr_layer3); + } if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); + rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, + elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, data_offset, 0); -- cgit v1.2.3-55-g7522 From 9c3bfda999fb0e4f284021134e5040bed5c82ac5 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 18 Aug 2017 10:19:07 +0200 Subject: s390/qeth: pass TSO data offset to fill_buffer() For TSO we need to skip the skb's qeth/IP/TCP headers when mapping it into buffer elements. Instead of (mis)using skb_pull(), pass a corresponding offset to fill_buffer() like we already do for IQDs. No actual change in the resulting TSO buffers. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 +- drivers/s390/net/qeth_core_main.c | 10 ++++------ drivers/s390/net/qeth_l2_main.c | 4 ++-- drivers/s390/net/qeth_l3_main.c | 2 +- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 2f5673812810..5753fbc485d5 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -952,7 +952,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card, unsigned int hd_len); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int hd_len, int elements); + unsigned int hd_len, unsigned int offset, int elements); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_core_get_sset_count(struct net_device *, int); void qeth_core_get_ethtool_stats(struct net_device *, diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 6cafeceea3ce..4a5c3028dfb6 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3975,11 +3975,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->next_element_to_fill++; - skb_pull(skb, hd_len); - } - /* IQD */ - if (offset > 0) { + } else if (offset) { int element = buf->next_element_to_fill; is_first_elem = false; @@ -4049,7 +4046,8 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned int hd_len, int elements_needed) + unsigned int offset, unsigned int hd_len, + int elements_needed) { struct qeth_qdio_out_buffer *buffer; int start_index; @@ -4098,7 +4096,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } } - tmp = qeth_fill_buffer(queue, buffer, skb, hdr, 0, hd_len); + tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c78d9fadb9c8..a6233ab562f0 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -746,7 +746,7 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, rc = -EINVAL; goto out; } - rc = qeth_do_send_packet(card, queue, skb_copy, hdr, 0, elements); + rc = qeth_do_send_packet(card, queue, skb_copy, hdr, 0, 0, elements); out: if (!rc) { /* tx success, free dangling original */ @@ -778,7 +778,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, return -E2BIG; if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) return -EINVAL; - return qeth_do_send_packet(card, queue, skb, hdr, 0, elements); + return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements); } static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index fa8b638e3842..02400bbcb610 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2768,7 +2768,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) goto tx_drop; rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, - elements); + hd_len, elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, data_offset, 0); -- cgit v1.2.3-55-g7522 From f1588177b259e75a9fabcf3d4d0be4d6b0981d24 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 18 Aug 2017 10:19:08 +0200 Subject: s390/qeth: pass full IQD header length to fill_buffer() This is a prerequisite for unifying the code to build header elements. The TSO header has a different size, so we can no longer rely on implicitly adding the size of a normal qeth_hdr. No functional change. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 3 +-- drivers/s390/net/qeth_l2_main.c | 2 +- drivers/s390/net/qeth_l3_main.c | 3 ++- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 4a5c3028dfb6..cef9f54d0eb9 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3981,8 +3981,7 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, is_first_elem = false; buffer->element[element].addr = hdr; - buffer->element[element].length = sizeof(struct qeth_hdr) + - hd_len; + buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; buf->is_header[element] = 1; buf->next_element_to_fill++; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index a6233ab562f0..c85fadf21b38 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -695,7 +695,7 @@ static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, goto out; } rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset, - data_offset); + sizeof(*hdr) + data_offset); out: if (rc) kmem_cache_free(qeth_core_header_cache, hdr); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 02400bbcb610..ab661a431f7c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2670,6 +2670,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (card->info.type == QETH_CARD_TYPE_IQD) { new_skb = skb; data_offset = ETH_HLEN; + hd_len = sizeof(*hdr); hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@ -2771,7 +2772,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, hd_len, elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - data_offset, 0); + data_offset, hd_len); if (!rc) { card->stats.tx_packets++; -- cgit v1.2.3-55-g7522 From eaf3cc087f6e15073dd4b02b7ad2f1301702bd74 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 18 Aug 2017 10:19:09 +0200 Subject: s390/qeth: unify code to build header elements After plenty of refactoring, use hd_len as single indication that the skb needs a dedicated header element. This preserves existing behaviour for TSO, as 'hdr' always points to skb->data. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index cef9f54d0eb9..ffefdd97abca 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3953,37 +3953,38 @@ static void __qeth_fill_buffer(struct sk_buff *skb, buf->next_element_to_fill = element; } +/** + * qeth_fill_buffer() - map skb into an output buffer + * @queue: QDIO queue to submit the buffer on + * @buf: buffer to transport the skb + * @skb: skb to map into the buffer + * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated + * from qeth_core_header_cache. + * @offset: when mapping the skb, start at skb->data + offset + * @hd_len: if > 0, build a dedicated header element of this size + */ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned int offset, unsigned int hd_len) { - struct qdio_buffer *buffer; + struct qdio_buffer *buffer = buf->buffer; bool is_first_elem = true; int flush_cnt = 0; - buffer = buf->buffer; refcount_inc(&skb->users); skb_queue_tail(&buf->skb_list, skb); - if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { - int element = buf->next_element_to_fill; - is_first_elem = false; - - /*fill first buffer entry only with header information */ - buffer->element[element].addr = skb->data; - buffer->element[element].length = hd_len; - buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; - buf->next_element_to_fill++; - /* IQD */ - } else if (offset) { + /* build dedicated header element */ + if (hd_len) { int element = buf->next_element_to_fill; is_first_elem = false; buffer->element[element].addr = hdr; buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; - buf->is_header[element] = 1; + /* remember to free cache-allocated qeth_hdr: */ + buf->is_header[element] = ((void *)hdr != skb->data); buf->next_element_to_fill++; } -- cgit v1.2.3-55-g7522 From 0d6f02d37531ff6fad15c211162f3974fadc9ede Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Fri, 18 Aug 2017 10:19:10 +0200 Subject: s390/qeth: use skb_cow_head() for L2 OSA xmit Taking a full copy via skb_realloc_headroom() on every xmit is overkill and wastes CPU time; all we actually need is to push on the qeth_hdr. So rework the L2 OSA TX path to avoid the copy. Minor complications arise because struct qeth_hdr must not cross a page boundary. So add a new helper qeth_push_hdr() that catches this, and falls back to the hdr cache that we already use for IQDs. This change uncovered that qeth's TX completion takes rather long. Now that we no longer free the original skb straight away and thus call skb->destructor later than before, throughput regresses significantly. For now, restore old behaviour by adding an explicit skb_orphan(), and a big TODO to improve the TX completion time. Tested-by: Nils Hoppmann Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 1 + drivers/s390/net/qeth_core_main.c | 28 +++++++++++++++++++ drivers/s390/net/qeth_l2_main.c | 58 ++++++++++++++++++++++++--------------- 3 files changed, 65 insertions(+), 22 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 5753fbc485d5..59e09854c4f7 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -985,6 +985,7 @@ int qeth_set_features(struct net_device *, netdev_features_t); int qeth_recover_features(struct net_device *); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); int qeth_vm_request_mac(struct qeth_card *card); +int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index ffefdd97abca..bae7440abc01 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3890,6 +3890,34 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) } EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); +/** + * qeth_push_hdr() - push a qeth_hdr onto an skb. + * @skb: skb that the qeth_hdr should be pushed onto. + * @hdr: double pointer to a qeth_hdr. When returning with >= 0, + * it contains a valid pointer to a qeth_hdr. + * @len: length of the hdr that needs to be pushed on. + * + * Returns the pushed length. If the header can't be pushed on + * (eg. because it would cross a page boundary), it is allocated from + * the cache instead and 0 is returned. + * Error to create the hdr is indicated by returning with < 0. + */ +int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len) +{ + if (skb_headroom(skb) >= len && + qeth_get_elements_for_range((addr_t)skb->data - len, + (addr_t)skb->data) == 1) { + *hdr = skb_push(skb, len); + return len; + } + /* fall back */ + *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!*hdr) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL_GPL(qeth_push_hdr); + static void __qeth_fill_buffer(struct sk_buff *skb, struct qeth_qdio_out_buffer *buf, bool is_first_elem, unsigned int offset) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index c85fadf21b38..760b023eae95 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -705,9 +705,11 @@ out: static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, struct qeth_qdio_out_q *queue, int cast_type) { + int push_len = sizeof(struct qeth_hdr); unsigned int elements, nr_frags; - struct sk_buff *skb_copy; - struct qeth_hdr *hdr; + unsigned int hdr_elements = 0; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; int rc; /* fix hardware limitation: as long as we do not have sbal @@ -727,38 +729,44 @@ static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, } nr_frags = skb_shinfo(skb)->nr_frags; - /* create a copy with writeable headroom */ - skb_copy = skb_realloc_headroom(skb, sizeof(struct qeth_hdr)); - if (!skb_copy) - return -ENOMEM; - hdr = skb_push(skb_copy, sizeof(struct qeth_hdr)); - qeth_l2_fill_header(hdr, skb_copy, cast_type, - skb_copy->len - sizeof(*hdr)); - if (skb_copy->ip_summed == CHECKSUM_PARTIAL) - qeth_l2_hdr_csum(card, hdr, skb_copy); - - elements = qeth_get_elements_no(card, skb_copy, 0, 0); + rc = skb_cow_head(skb, push_len); + if (rc) + return rc; + push_len = qeth_push_hdr(skb, &hdr, push_len); + if (push_len < 0) + return push_len; + if (!push_len) { + /* hdr was allocated from cache */ + hd_len = sizeof(*hdr); + hdr_elements = 1; + } + qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); + if (skb->ip_summed == CHECKSUM_PARTIAL) + qeth_l2_hdr_csum(card, hdr, skb); + + elements = qeth_get_elements_no(card, skb, hdr_elements, 0); if (!elements) { rc = -E2BIG; goto out; } - if (qeth_hdr_chk_and_bounce(skb_copy, &hdr, sizeof(*hdr))) { - rc = -EINVAL; - goto out; - } - rc = qeth_do_send_packet(card, queue, skb_copy, hdr, 0, 0, elements); + elements += hdr_elements; + + /* TODO: remove the skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); out: if (!rc) { - /* tx success, free dangling original */ - dev_kfree_skb_any(skb); if (card->options.performance_stats && nr_frags) { card->perf_stats.sg_skbs_sent++; /* nr_frags + skb->data */ card->perf_stats.sg_frags_sent += nr_frags + 1; } } else { - /* tx fail, free copy */ - dev_kfree_skb_any(skb_copy); + if (hd_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) + /* roll back to ETH header */ + skb_pull(skb, push_len); } return rc; } @@ -1011,6 +1019,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->vlan_features |= NETIF_F_RXCSUM; } } + if (card->info.type != QETH_CARD_TYPE_OSN && + card->info.type != QETH_CARD_TYPE_IQD) { + card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; + card->dev->needed_headroom = sizeof(struct qeth_hdr); + } + card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * -- cgit v1.2.3-55-g7522 From 7f3b39dafc6234dc1565fafe6adb15a6c4932182 Mon Sep 17 00:00:00 2001 From: Chris Mi Date: Fri, 18 Aug 2017 07:24:20 -0400 Subject: net/sched: Fix the logic error to decide the ingress qdisc The offending commit used a newly added helper function. But the logic is wrong. Without this fix, the affected NICs can't do HW offload. Error -EOPNOTSUPP will be returned directly. Fixes: a2e8da9378cc ("net/sched: use newly added classid identity helpers") Signed-off-by: Chris Mi Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/netronome/nfp/bpf/main.c | 2 +- drivers/net/ethernet/netronome/nfp/flower/offload.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 77538cd8184a..e55a9299547a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2892,7 +2892,7 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate) static int cxgb_setup_tc_cls_u32(struct net_device *dev, struct tc_cls_u32_offload *cls_u32) { - if (is_classid_clsact_ingress(cls_u32->common.classid) || + if (!is_classid_clsact_ingress(cls_u32->common.classid) || cls_u32->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f9fd8d8f1bef..56d7ef014d0d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9230,7 +9230,7 @@ static int ixgbe_setup_tc_cls_u32(struct net_device *dev, { struct ixgbe_adapter *adapter = netdev_priv(dev); - if (is_classid_clsact_ingress(cls_u32->common.classid) || + if (!is_classid_clsact_ingress(cls_u32->common.classid) || cls_u32->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8633ca5af6ed..2fc3832bc2f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3031,7 +3031,7 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - if (is_classid_clsact_ingress(cls_flower->common.classid) || + if (!is_classid_clsact_ingress(cls_flower->common.classid) || cls_flower->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f34c00fbf78c..7a9f53f74976 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -657,7 +657,7 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev, { struct mlx5e_priv *priv = netdev_priv(dev); - if (is_classid_clsact_ingress(cls_flower->common.classid) || + if (!is_classid_clsact_ingress(cls_flower->common.classid) || cls_flower->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 0e6864922d5c..f4de3a7377b0 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -127,7 +127,7 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, struct nfp_net *nn = netdev_priv(netdev); if (type != TC_SETUP_CLSBPF || !nfp_net_ebpf_capable(nn) || - is_classid_clsact_ingress(cls_bpf->common.classid) || + !is_classid_clsact_ingress(cls_bpf->common.classid) || cls_bpf->common.protocol != htons(ETH_P_ALL) || cls_bpf->common.chain_index) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 3ad5aaa210a4..d868a5700e01 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -390,7 +390,7 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, struct tc_cls_flower_offload *cls_flower = type_data; if (type != TC_SETUP_CLSFLOWER || - is_classid_clsact_ingress(cls_flower->common.classid) || + !is_classid_clsact_ingress(cls_flower->common.classid) || !eth_proto_is_802_3(cls_flower->common.protocol) || cls_flower->common.chain_index) return -EOPNOTSUPP; -- cgit v1.2.3-55-g7522 From d36d36ceac11e514f1c313ea6ca7568e2463b6f2 Mon Sep 17 00:00:00 2001 From: Salil Date: Fri, 18 Aug 2017 12:31:37 +0100 Subject: net: hns3: Fixes the missing u64_stats_fetch_begin_irq in 64-bit stats fetch This patch fixes the missing u64_stats_fetch_begin_irq() while trying to atomically do 64-bit RX/TX fetch. We did not get any error during test as our SoC is 64-bit so all of these seq/lock operations results in NOOP. As such, this seq lock supports has been added for the sake of completion if this code ever runs on 32-bit platform and we are trying to do 64-bit stats fetch. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Reported-by: Dan Carpenter Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index 9589b7e1d24c..b12730a23c25 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1054,6 +1054,7 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) /* fetch the tx stats */ ring = priv->ring_data[idx].ring; do { + start = u64_stats_fetch_begin_irq(&ring->syncp); tx_bytes += ring->stats.tx_bytes; tx_pkts += ring->stats.tx_pkts; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); @@ -1061,6 +1062,7 @@ hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) /* fetch the rx stats */ ring = priv->ring_data[idx + queue_num].ring; do { + start = u64_stats_fetch_begin_irq(&ring->syncp); rx_bytes += ring->stats.rx_bytes; rx_pkts += ring->stats.rx_pkts; } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); -- cgit v1.2.3-55-g7522 From 9db85f33c0d724ce9251ad80dff49aa7bdbd95af Mon Sep 17 00:00:00 2001 From: Salil Date: Fri, 18 Aug 2017 12:31:38 +0100 Subject: net: hns3: Fixes the static checker error warning in hns3_get_link_ksettings() This patch fixes the static check error warning in hns3_get_link_ksettings() function by re-arranging the code. Fixes: 496d03e960ae ("net: hns3: Add Ethtool support to HNS3 Driver") Reported-by: Dan Carpenter Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- .../ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c | 85 ++++++++++++---------- 1 file changed, 48 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c index 53cab3ad4cda..d636399232fb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c @@ -313,7 +313,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev, if (!h->ae_algo || !h->ae_algo->ops) return -EOPNOTSUPP; - /* 1.auto_neg&speed&duplex from cmd */ + /* 1.auto_neg & speed & duplex from cmd */ if (h->ae_algo->ops->get_ksettings_an_result) { h->ae_algo->ops->get_ksettings_an_result(h, &auto_neg, &speed, &duplex); @@ -329,50 +329,61 @@ static int hns3_get_link_ksettings(struct net_device *netdev, } /* 2.media_type get from bios parameter block */ - if (h->ae_algo->ops->get_media_type) + if (h->ae_algo->ops->get_media_type) { h->ae_algo->ops->get_media_type(h, &media_type); - switch (media_type) { - case HNAE3_MEDIA_TYPE_FIBER: - cmd->base.port = PORT_FIBRE; - supported_caps = HNS3_LM_FIBRE_BIT | HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT; + switch (media_type) { + case HNAE3_MEDIA_TYPE_FIBER: + cmd->base.port = PORT_FIBRE; + supported_caps = HNS3_LM_FIBRE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_COPPER: + cmd->base.port = PORT_TP; + supported_caps = HNS3_LM_TP_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_BACKPLANE: + cmd->base.port = PORT_NONE; + supported_caps = HNS3_LM_BACKPLANE_BIT | + HNS3_LM_PAUSE_BIT | + HNS3_LM_AUTONEG_BIT | + HNS3_LM_1000BASET_FULL_BIT | + HNS3_LM_100BASET_FULL_BIT | + HNS3_LM_100BASET_HALF_BIT | + HNS3_LM_10BASET_FULL_BIT | + HNS3_LM_10BASET_HALF_BIT; + + advertised_caps = supported_caps; + break; + case HNAE3_MEDIA_TYPE_UNKNOWN: + default: + cmd->base.port = PORT_OTHER; + supported_caps = 0; + advertised_caps = 0; + break; + } - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_COPPER: - cmd->base.port = PORT_TP; - supported_caps = HNS3_LM_TP_BIT | HNS3_LM_AUTONEG_BIT | - HNS3_LM_PAUSE_BIT | HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT; - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_BACKPLANE: - cmd->base.port = PORT_NONE; - supported_caps = HNS3_LM_BACKPLANE_BIT | HNS3_LM_PAUSE_BIT | - HNS3_LM_AUTONEG_BIT | HNS3_LM_1000BASET_FULL_BIT | - HNS3_LM_100BASET_FULL_BIT | HNS3_LM_100BASET_HALF_BIT | - HNS3_LM_10BASET_FULL_BIT | HNS3_LM_10BASET_HALF_BIT; - - advertised_caps = supported_caps; - break; - case HNAE3_MEDIA_TYPE_UNKNOWN: - default: - cmd->base.port = PORT_OTHER; - supported_caps = 0; - advertised_caps = 0; - break; + /* now, map driver link modes to ethtool link modes */ + hns3_driv_to_eth_caps(supported_caps, cmd, false); + hns3_driv_to_eth_caps(advertised_caps, cmd, true); } - /* now, map driver link modes to ethtool link modes */ - hns3_driv_to_eth_caps(supported_caps, cmd, false); - hns3_driv_to_eth_caps(advertised_caps, cmd, true); - /* 3.mdix_ctrl&mdix get from phy reg */ if (h->ae_algo->ops->get_mdix_mode) h->ae_algo->ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl, - &cmd->base.eth_tp_mdix); + &cmd->base.eth_tp_mdix); /* 4.mdio_support */ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22; -- cgit v1.2.3-55-g7522 From 1898d4e404a5f0095071fa3ae178a1d066360fbb Mon Sep 17 00:00:00 2001 From: Salil Date: Fri, 18 Aug 2017 12:31:39 +0100 Subject: net: hns3: Fixes the static check warning due to missing unsupp L3 proto check This patch fixes the static check warning due to missing handling leg of unsupported L3 protocol type in the hns3_get_l4_protocol() function. Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC") Reported-by: Dan Carpenter Signed-off-by: Salil Mehta Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index b12730a23c25..e731f87f3c46 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -436,8 +436,8 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen, return 0; } -static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, - u8 *il4_proto) +static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, + u8 *il4_proto) { union { struct iphdr *v4; @@ -461,6 +461,8 @@ static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, &l4_proto_tmp, &frag_off); } else if (skb->protocol == htons(ETH_P_IP)) { l4_proto_tmp = l3.v4->protocol; + } else { + return -EINVAL; } *ol4_proto = l4_proto_tmp; @@ -468,7 +470,7 @@ static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, /* tunnel packet */ if (!skb->encapsulation) { *il4_proto = 0; - return; + return 0; } /* find inner header point */ @@ -486,6 +488,8 @@ static void hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, } *il4_proto = l4_proto_tmp; + + return 0; } static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, @@ -757,7 +761,9 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, protocol = vlan_get_protocol(skb); skb->protocol = protocol; } - hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); + if (ret) + return ret; hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, &type_cs_vlan_tso, &ol_type_vlan_len_msec); -- cgit v1.2.3-55-g7522 From 401481e06099533892b3d8f1db498cbc480b5b24 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 18 Aug 2017 13:34:22 +0200 Subject: ipv6: fix false-postive maybe-uninitialized warning Adding a lock around one of the assignments prevents gcc from tracking the state of the local 'fibmatch' variable, so it can no longer prove that 'dst' is always initialized, leading to a bogus warning: net/ipv6/route.c: In function 'inet6_rtm_getroute': net/ipv6/route.c:3659:2: error: 'dst' may be used uninitialized in this function [-Werror=maybe-uninitialized] This moves the other assignment into the same lock to shut up the warning. Fixes: 121622dba8da ("ipv6: route: make rtm_getroute not assume rtnl is locked") Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- net/ipv6/route.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index dc021ed6dd37..bec12ae3e6b7 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3624,6 +3624,8 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (!fibmatch) dst = ip6_route_input_lookup(net, dev, &fl6, flags); + else + dst = ip6_route_lookup(net, &fl6, 0); rcu_read_unlock(); } else { @@ -3631,10 +3633,10 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, if (!fibmatch) dst = ip6_route_output(net, NULL, &fl6); + else + dst = ip6_route_lookup(net, &fl6, 0); } - if (fibmatch) - dst = ip6_route_lookup(net, &fl6, 0); rt = container_of(dst, struct rt6_info, dst); if (rt->dst.error) { -- cgit v1.2.3-55-g7522 From 9620fef27ed2cdb37bf6fd028f32bea2ef5119a8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 Aug 2017 12:08:07 -0700 Subject: ipv4: convert dst_metrics.refcnt from atomic_t to refcount_t refcount_t type and corresponding API should be used instead of atomic_t when the variable is used as a reference counter. This allows to avoid accidental refcounter overflows that might lead to use-after-free situations. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/dst.h | 3 ++- net/core/dst.c | 6 +++--- net/ipv4/fib_semantics.c | 4 ++-- net/ipv4/route.c | 4 ++-- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/include/net/dst.h b/include/net/dst.h index f73611ec4017..93568bd0a352 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -107,7 +108,7 @@ struct dst_entry { struct dst_metrics { u32 metrics[RTAX_MAX]; - atomic_t refcnt; + refcount_t refcnt; }; extern const struct dst_metrics dst_default_metrics; diff --git a/net/core/dst.c b/net/core/dst.c index 00aa972ad1a1..d6ead757c258 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -55,7 +55,7 @@ const struct dst_metrics dst_default_metrics = { * We really want to avoid false sharing on this variable, and catch * any writes on it. */ - .refcnt = ATOMIC_INIT(1), + .refcnt = REFCOUNT_INIT(1), }; void dst_init(struct dst_entry *dst, struct dst_ops *ops, @@ -213,7 +213,7 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old); unsigned long prev, new; - atomic_set(&p->refcnt, 1); + refcount_set(&p->refcnt, 1); memcpy(p->metrics, old_p->metrics, sizeof(p->metrics)); new = (unsigned long) p; @@ -225,7 +225,7 @@ u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) if (prev & DST_METRICS_READ_ONLY) p = NULL; } else if (prev & DST_METRICS_REFCOUNTED) { - if (atomic_dec_and_test(&old_p->refcnt)) + if (refcount_dec_and_test(&old_p->refcnt)) kfree(old_p); } } diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index d521caf57385..394d800db50c 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -220,7 +220,7 @@ static void free_fib_info_rcu(struct rcu_head *head) } endfor_nexthops(fi); m = fi->fib_metrics; - if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt)) + if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt)) kfree(m); kfree(fi); } @@ -1090,7 +1090,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, kfree(fi); return ERR_PTR(err); } - atomic_set(&fi->fib_metrics->refcnt, 1); + refcount_set(&fi->fib_metrics->refcnt, 1); } else { fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index d400c0543106..872b4cb136d3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1398,7 +1398,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst) struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst); struct rtable *rt = (struct rtable *) dst; - if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt)) + if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt)) kfree(p); if (!list_empty(&rt->rt_uncached)) { @@ -1456,7 +1456,7 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr, dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true); if (fi->fib_metrics != &dst_default_metrics) { rt->dst._metrics |= DST_METRICS_REFCOUNTED; - atomic_inc(&fi->fib_metrics->refcnt); + refcount_inc(&fi->fib_metrics->refcnt); } #ifdef CONFIG_IP_ROUTE_CLASSID rt->dst.tclassid = nh->nh_tclassid; -- cgit v1.2.3-55-g7522 From d2896116dbc7be7cabd5db414e008aef4a5e0a00 Mon Sep 17 00:00:00 2001 From: Intiyaz Basha Date: Fri, 18 Aug 2017 13:07:19 -0700 Subject: liquidio: fix Smatch error Fix Smatch error by not dereferencing iq pointer if it's NULL. See http://marc.info/?l=kernel-janitors&m=150296723301129&w=2 Also, remove unnecessary parentheses. Fixes: d314ac222829 ("liquidio: moved liquidio_napi_poll to lio_core.c") Reported-by: Dan Carpenter Signed-off-by: Intiyaz Basha Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c index d4f0646084b7..0e7896cdb295 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_core.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -668,8 +668,8 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget) #define MAX_REG_CNT 2000000U /* force enable interrupt if reg cnts are high to avoid wraparound */ - if (((work_done < budget) && (tx_done)) || - (iq->pkt_in_done >= MAX_REG_CNT) || + if ((work_done < budget && tx_done) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || (droq->pkt_count >= MAX_REG_CNT)) { tx_done = 1; napi_complete_done(napi, work_done); -- cgit v1.2.3-55-g7522 From f21e5077010acda73a60d85e416d6887d537e506 Mon Sep 17 00:00:00 2001 From: Dimitris Michailidis Date: Wed, 16 Aug 2017 14:34:46 -0700 Subject: macvlan: add offload features for encapsulation Currently macvlan devices do not set their hw_enc_features making encapsulated Tx packets resort to SW fallbacks. Add encapsulation GSO offloads to ->features as is done for the other GSOs and set ->hw_enc_features. Signed-off-by: Dimitris Michailidis Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index ca35c6ba7947..d2aea961e0f4 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -835,7 +835,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define ALWAYS_ON_OFFLOADS \ (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \ - NETIF_F_GSO_ROBUST) + NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL) #define ALWAYS_ON_FEATURES (ALWAYS_ON_OFFLOADS | NETIF_F_LLTX) @@ -874,6 +874,7 @@ static int macvlan_init(struct net_device *dev) dev->hw_features |= NETIF_F_LRO; dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->vlan_features |= ALWAYS_ON_OFFLOADS; + dev->hw_enc_features |= dev->features; dev->gso_max_size = lowerdev->gso_max_size; dev->gso_max_segs = lowerdev->gso_max_segs; dev->hard_header_len = lowerdev->hard_header_len; -- cgit v1.2.3-55-g7522 From 0888e372c37fa31882c8ed89fb2f8188b08b6718 Mon Sep 17 00:00:00 2001 From: Levin, Alexander (Sasha Levin) Date: Thu, 17 Aug 2017 00:35:11 +0000 Subject: net: inet: diag: expose sockets cgroup classid This is useful for directly looking up a task based on class id rather than having to scan through all open file descriptors. Signed-off-by: Sasha Levin Signed-off-by: David S. Miller --- include/uapi/linux/inet_diag.h | 1 + net/ipv4/inet_diag.c | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index bbe201047df6..678496897a68 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -142,6 +142,7 @@ enum { INET_DIAG_PAD, INET_DIAG_MARK, INET_DIAG_BBRINFO, + INET_DIAG_CLASS_ID, __INET_DIAG_MAX, }; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 3828b3a805cd..67325d5832d7 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -274,6 +274,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto errout; } + if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { + u32 classid = 0; + +#ifdef CONFIG_SOCK_CGROUP_DATA + classid = sock_cgroup_classid(&sk->sk_cgrp_data); +#endif + + if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) + goto errout; + } + out: nlmsg_end(skb, nlh); return 0; -- cgit v1.2.3-55-g7522 From 3de42f5617dca6d2e5c8bbc4a07b4cfe270f8764 Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Wed, 16 Aug 2017 18:30:13 -0700 Subject: liquidio: remove support for deprecated f/w cmd OCTNET_CMD_RESET_PF Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_main.c | 9 --------- drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 1 - 2 files changed, 10 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 268ba5215bdd..89d4bbc81707 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1438,15 +1438,6 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) liquidio_stop(netdev); - if (fw_type_is_none()) { - struct octnic_ctrl_pkt nctrl; - - memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); - nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF; - nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; - octnet_send_nic_ctrl_pkt(oct, &nctrl); - } - if (oct->props[lio->ifidx].napi_enabled == 1) { list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) napi_disable(napi); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 18d29550e2f8..906e30aadadc 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -189,7 +189,6 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_Q 0 /* NIC Command types */ -#define OCTNET_CMD_RESET_PF 0x0 #define OCTNET_CMD_CHANGE_MTU 0x1 #define OCTNET_CMD_CHANGE_MACADDR 0x2 #define OCTNET_CMD_CHANGE_DEVFLAGS 0x3 -- cgit v1.2.3-55-g7522 From d2cee2e5d0106abb5da25f393b3c50e0bb01f7f9 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 17 Aug 2017 18:22:32 +0200 Subject: ixgbe: change ndo_xdp_xmit return code on xmit errors Use errno -ENOSPC ("No space left on device") when the XDP xmit have no space left on the TX ring buffer, instead of -ENOMEM. Signed-off-by: Jesper Dangaard Brouer Acked-by: John Fastabend Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 56d7ef014d0d..8d3224ad6434 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -9860,7 +9860,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp) err = ixgbe_xmit_xdp_ring(adapter, xdp); if (err != IXGBE_XDP_TX) - return -ENOMEM; + return -ENOSPC; return 0; } -- cgit v1.2.3-55-g7522 From 4c03bdd7b5c084c3c6973cb2419edac5363c051f Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Thu, 17 Aug 2017 18:22:37 +0200 Subject: xdp: adjust xdp redirect tracepoint to include return error code The return error code need to be included in the tracepoint xdp:xdp_redirect, else its not possible to distinguish successful or failed XDP_REDIRECT transmits. XDP have no queuing mechanism. Thus, it is fairly easily to overrun a NIC transmit queue. The eBPF program invoking helpers (bpf_redirect or bpf_redirect_map) to redirect a packet doesn't get any feedback whether the packet was actually transmitted. Info on failed transmits in the tracepoint xdp:xdp_redirect, is interesting as this opens for providing a feedback-loop to the receiving XDP program. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- include/trace/events/xdp.h | 11 +++++++---- net/core/filter.c | 19 ++++++++++++------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 7b1eb7b4be41..0e42e69f773b 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -53,15 +53,16 @@ TRACE_EVENT(xdp_redirect, TP_PROTO(const struct net_device *from, const struct net_device *to, - const struct bpf_prog *xdp, u32 act), + const struct bpf_prog *xdp, u32 act, int err), - TP_ARGS(from, to, xdp, act), + TP_ARGS(from, to, xdp, act, err), TP_STRUCT__entry( __string(name_from, from->name) __string(name_to, to->name) __array(u8, prog_tag, 8) __field(u32, act) + __field(int, err) ), TP_fast_assign( @@ -70,12 +71,14 @@ TRACE_EVENT(xdp_redirect, __assign_str(name_from, from->name); __assign_str(name_to, to->name); __entry->act = act; + __entry->err = err; ), - TP_printk("prog=%s from=%s to=%s action=%s", + TP_printk("prog=%s from=%s to=%s action=%s err=%d", __print_hex_str(__entry->prog_tag, 8), __get_str(name_from), __get_str(name_to), - __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) + __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), + __entry->err) ); #endif /* _TRACE_XDP_H */ diff --git a/net/core/filter.c b/net/core/filter.c index 0f4df86d936a..fa2115695037 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2535,14 +2535,16 @@ int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_map *map = ri->map; u32 index = ri->ifindex; struct net_device *fwd; - int err = -EINVAL; + int err; ri->ifindex = 0; ri->map = NULL; fwd = __dev_map_lookup_elem(map, index); - if (!fwd) + if (!fwd) { + err = -EINVAL; goto out; + } if (ri->map_to_flush && (ri->map_to_flush != map)) xdp_do_flush_map(); @@ -2552,7 +2554,7 @@ int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, ri->map_to_flush = map; out: - trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT); + trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err); return err; } @@ -2562,6 +2564,7 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct net_device *fwd; u32 index = ri->ifindex; + int err; if (ri->map) return xdp_do_redirect_map(dev, xdp, xdp_prog); @@ -2570,12 +2573,14 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, ri->ifindex = 0; if (unlikely(!fwd)) { bpf_warn_invalid_xdp_redirect(index); - return -EINVAL; + err = -EINVAL; + goto out; } - trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT); - - return __bpf_tx_xdp(fwd, NULL, xdp, 0); + err = __bpf_tx_xdp(fwd, NULL, xdp, 0); +out: + trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err); + return err; } EXPORT_SYMBOL_GPL(xdp_do_redirect); -- cgit v1.2.3-55-g7522 From 16a4362573782115096799aebd9862f8bb140169 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Thu, 17 Aug 2017 18:14:43 -0700 Subject: bpf: Fix map-in-map checking in the verifier In check_map_func_compatibility(), a 'break' has been accidentally removed for the BPF_MAP_TYPE_ARRAY_OF_MAPS and BPF_MAP_TYPE_HASH_OF_MAPS cases. This patch adds it back. Fixes: 174a79ff9515 ("bpf: sockmap with sk redirect support") Cc: John Fastabend Signed-off-by: Martin KaFai Lau Acked-by: Alexei Starovoitov Acked-by: John Fastabend Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 40f669ddb571..4f6e7eb42ba0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1523,6 +1523,7 @@ static int check_map_func_compatibility(struct bpf_map *map, int func_id) case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; + break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && -- cgit v1.2.3-55-g7522 From 1547f538c14581346c2eb82f8af0071ee6ec2b30 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 18 Aug 2017 14:49:25 +0100 Subject: mlx5: ensure 0 is returned when vport is zero Currently, if vport is zero then then an uninialized return status in err is returned. Since the only return status at the end of the function esw_add_uc_addr is zero for the current set of return paths we may as well just return 0 rather than err to fix this issue. Detected by CoverityScan, CID#1452698 ("Uninitialized scalar variable") Fixes: eeb66cdb6826 ("net/mlx5: Separate between E-Switch and MPFS") Signed-off-by: Colin Ian King Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index fd51f0ea8df9..6b84c1113301 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -401,7 +401,7 @@ fdb_add: esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n", vport, mac, vaddr->flow_rule); - return err; + return 0; } static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) -- cgit v1.2.3-55-g7522 From 3b1ded4e0b5118040d44b3b85391aa8865b0d05c Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:02:18 -0500 Subject: amd-xgbe: Set the MDIO mode for 10000Base-T configuration Currently the MDIO mode is set to none for the 10000Base-T, which is incorrect. The MDIO mode should for this configuration should be clause 45. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 04b5c149caca..81c45fa65e0a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -2921,7 +2921,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) phy_data->start_mode = XGBE_MODE_KR; } - phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; + phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; break; /* 10GBase-R support */ -- cgit v1.2.3-55-g7522 From f087b506ff5c38e0f2873651dba4e2b73a1b2d67 Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:02:27 -0500 Subject: amd-xgbe: Set the MII control width for the MAC interface When running in SGMII mode at speeds below 1000Mbps, the auto-negotition control register must set the MII control width for the MAC interface to be 8-bits wide. By default the width is 4-bits. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 1 + drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 9795419aac2d..d07edf9eaa69 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1339,6 +1339,7 @@ #define XGBE_AN_CL37_PCS_MODE_BASEX 0x00 #define XGBE_AN_CL37_PCS_MODE_SGMII 0x04 #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 +#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 /* Bit setting and getting macros * The get macro will extract the current bit field value from within diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 80684914dd8a..2222bbf8d0a4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -982,6 +982,8 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata) break; } + reg |= XGBE_AN_CL37_MII_CTRL_8BIT; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n", -- cgit v1.2.3-55-g7522 From 25ff96a9dc9c6a5a21778c36cb1e7175128d2071 Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:02:40 -0500 Subject: amd-xgbe: Be sure driver shuts down cleanly on module removal Sometimes when the driver is being unloaded while the devices are still up the driver can issue errors. This is based on timing and the double invocation of some routines. The phy_exit() call needs to be run after the network device has been closed and unregistered from the system. Also, the phy_exit() does not need to invoke phy_stop() since that will be called as part of the device closing, so remove that call. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 4 ++-- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 500147d9e3c8..53a425cc7a2d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -458,6 +458,8 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata) if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK)) xgbe_ptp_unregister(pdata); + unregister_netdev(netdev); + pdata->phy_if.phy_exit(pdata); flush_workqueue(pdata->an_workqueue); @@ -465,8 +467,6 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata) flush_workqueue(pdata->dev_workqueue); destroy_workqueue(pdata->dev_workqueue); - - unregister_netdev(netdev); } static int __init xgbe_mod_init(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 2222bbf8d0a4..24092020f119 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -1533,8 +1533,6 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) static void xgbe_phy_exit(struct xgbe_prv_data *pdata) { - xgbe_phy_stop(pdata); - pdata->phy_if.phy_impl.exit(pdata); } -- cgit v1.2.3-55-g7522 From 1b631424e8244722419ebf6ba1fa5a7d72e675c2 Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:02:49 -0500 Subject: amd-xgbe: Update TSO packet statistics accuracy When transmitting a TSO packet, the driver only increments the TSO packet statistic by one rather than the number of total packets that were sent. Update the driver to record the total number of packets that resulted from TSO transmit. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 06f953e1e9b2..bb60507d3d9b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1740,7 +1740,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, packet->tcp_header_len / 4); - pdata->ext_stats.tx_tso_packets++; + pdata->ext_stats.tx_tso_packets += packet->tx_packets; } else { /* Enable CRC and Pad Insertion */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); -- cgit v1.2.3-55-g7522 From efbaa828330aed8cfa9529451abfea4a465303ec Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:02:57 -0500 Subject: amd-xgbe: Add support to handle device renaming Many of the names used by the driver are based upon the name of the device found during device probe. Move the formatting of the names into the device open function so that any renaming that occurs before the device is brought up will be accounted for. This also means moving the creation of some named workqueues into the device open path. Add support to register for net events so that if a device is renamed the corresponding debugfs directory can be renamed. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 25 ++++++++++ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 44 +++++++++++++++-- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 72 ++++++++++++---------------- drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +- 4 files changed, 100 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 7546b660d6b5..7d128be61310 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -527,3 +527,28 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) debugfs_remove_recursive(pdata->xgbe_debugfs); pdata->xgbe_debugfs = NULL; } + +void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) +{ + struct dentry *pfile; + char *buf; + + if (!pdata->xgbe_debugfs) + return; + + buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name); + if (!buf) + return; + + if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf)) + goto out; + + pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent, + pdata->xgbe_debugfs, + pdata->xgbe_debugfs->d_parent, buf); + if (!pfile) + netdev_err(pdata->netdev, "debugfs_rename failed\n"); + +out: + kfree(buf); +} diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 2fd9b80b39b0..d6d29d8c58a7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -887,7 +887,7 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata) (unsigned long)pdata); ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, - netdev->name, pdata); + netdev_name(netdev), pdata); if (ret) { netdev_alert(netdev, "error requesting irq %d\n", pdata->dev_irq); @@ -1589,16 +1589,42 @@ static int xgbe_open(struct net_device *netdev) DBGPR("-->xgbe_open\n"); + /* Create the various names based on netdev name */ + snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", + netdev_name(netdev)); + + snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc", + netdev_name(netdev)); + + snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c", + netdev_name(netdev)); + + /* Create workqueues */ + pdata->dev_workqueue = + create_singlethread_workqueue(netdev_name(netdev)); + if (!pdata->dev_workqueue) { + netdev_err(netdev, "device workqueue creation failed\n"); + return -ENOMEM; + } + + pdata->an_workqueue = + create_singlethread_workqueue(pdata->an_name); + if (!pdata->an_workqueue) { + netdev_err(netdev, "phy workqueue creation failed\n"); + ret = -ENOMEM; + goto err_dev_wq; + } + /* Reset the phy settings */ ret = xgbe_phy_reset(pdata); if (ret) - return ret; + goto err_an_wq; /* Enable the clocks */ ret = clk_prepare_enable(pdata->sysclk); if (ret) { netdev_alert(netdev, "dma clk_prepare_enable failed\n"); - return ret; + goto err_an_wq; } ret = clk_prepare_enable(pdata->ptpclk); @@ -1651,6 +1677,12 @@ err_ptpclk: err_sysclk: clk_disable_unprepare(pdata->sysclk); +err_an_wq: + destroy_workqueue(pdata->an_workqueue); + +err_dev_wq: + destroy_workqueue(pdata->dev_workqueue); + return ret; } @@ -1674,6 +1706,12 @@ static int xgbe_close(struct net_device *netdev) clk_disable_unprepare(pdata->ptpclk); clk_disable_unprepare(pdata->sysclk); + flush_workqueue(pdata->an_workqueue); + destroy_workqueue(pdata->an_workqueue); + + flush_workqueue(pdata->dev_workqueue); + destroy_workqueue(pdata->dev_workqueue); + set_bit(XGBE_DOWN, &pdata->dev_state); DBGPR("<--xgbe_close\n"); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 53a425cc7a2d..c5ff385d51ae 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -120,6 +120,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -399,35 +400,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) return ret; } - /* Create the PHY/ANEG name based on netdev name */ - snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", - netdev_name(netdev)); - - /* Create the ECC name based on netdev name */ - snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc", - netdev_name(netdev)); - - /* Create the I2C name based on netdev name */ - snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c", - netdev_name(netdev)); - - /* Create workqueues */ - pdata->dev_workqueue = - create_singlethread_workqueue(netdev_name(netdev)); - if (!pdata->dev_workqueue) { - netdev_err(netdev, "device workqueue creation failed\n"); - ret = -ENOMEM; - goto err_netdev; - } - - pdata->an_workqueue = - create_singlethread_workqueue(pdata->an_name); - if (!pdata->an_workqueue) { - netdev_err(netdev, "phy workqueue creation failed\n"); - ret = -ENOMEM; - goto err_wq; - } - if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK)) xgbe_ptp_register(pdata); @@ -439,14 +411,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) pdata->rx_ring_count); return 0; - -err_wq: - destroy_workqueue(pdata->dev_workqueue); - -err_netdev: - unregister_netdev(netdev); - - return ret; } void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata) @@ -461,18 +425,42 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata) unregister_netdev(netdev); pdata->phy_if.phy_exit(pdata); +} - flush_workqueue(pdata->an_workqueue); - destroy_workqueue(pdata->an_workqueue); +static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(data); + struct xgbe_prv_data *pdata = netdev_priv(netdev); - flush_workqueue(pdata->dev_workqueue); - destroy_workqueue(pdata->dev_workqueue); + if (netdev->netdev_ops != xgbe_get_netdev_ops()) + goto out; + + switch (event) { + case NETDEV_CHANGENAME: + xgbe_debugfs_rename(pdata); + break; + + default: + break; + } + +out: + return NOTIFY_DONE; } +static struct notifier_block xgbe_netdev_notifier = { + .notifier_call = xgbe_netdev_event, +}; + static int __init xgbe_mod_init(void) { int ret; + ret = register_netdevice_notifier(&xgbe_netdev_notifier); + if (ret) + return ret; + ret = xgbe_platform_init(); if (ret) return ret; @@ -489,6 +477,8 @@ static void __exit xgbe_mod_exit(void) xgbe_pci_exit(); xgbe_platform_exit(); + + unregister_netdevice_notifier(&xgbe_netdev_notifier); } module_init(xgbe_mod_init); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e9282c924621..9a80f20adc1c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -130,6 +130,7 @@ #include #include #include +#include #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.3" @@ -1172,7 +1173,6 @@ struct xgbe_prv_data { struct tasklet_struct tasklet_i2c; struct tasklet_struct tasklet_an; -#ifdef CONFIG_DEBUG_FS struct dentry *xgbe_debugfs; unsigned int debugfs_xgmac_reg; @@ -1183,7 +1183,6 @@ struct xgbe_prv_data { unsigned int debugfs_xprop_reg; unsigned int debugfs_xi2c_reg; -#endif }; /* Function prototypes*/ @@ -1232,9 +1231,11 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *); #ifdef CONFIG_DEBUG_FS void xgbe_debugfs_init(struct xgbe_prv_data *); void xgbe_debugfs_exit(struct xgbe_prv_data *); +void xgbe_debugfs_rename(struct xgbe_prv_data *pdata); #else static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {} static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {} +static inline void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) {} #endif /* CONFIG_DEBUG_FS */ /* NOTE: Uncomment for function trace log messages in KERNEL LOG */ -- cgit v1.2.3-55-g7522 From 40452f0ec84a3b7082a934404783a121d378b990 Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:03:08 -0500 Subject: amd-xgbe: Add additional dynamic debug messages Add some additional dynamic debug message to the driver. The new messages will provide additional information about the PCS window calculation. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 1e56ad7bd9a5..3e5833cf1fab 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -292,6 +292,10 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; if (netif_msg_probe(pdata)) { + dev_dbg(dev, "xpcs window def = %#010x\n", + pdata->xpcs_window_def_reg); + dev_dbg(dev, "xpcs window sel = %#010x\n", + pdata->xpcs_window_sel_reg); dev_dbg(dev, "xpcs window = %#010x\n", pdata->xpcs_window); dev_dbg(dev, "xpcs window size = %#010x\n", -- cgit v1.2.3-55-g7522 From caa575afad73ce1a40848543461667c57a1ad989 Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:03:17 -0500 Subject: amd-xgbe: Optimize DMA channel interrupt enablement Currently whenever the driver needs to enable or disable interrupts for a DMA channel it reads the interrupt enable register (IER), updates the value and then writes the new value back to the IER. Since the hardware does not change the IER, software can track this value and elimiate the need to read it each time. Add the IER value to the channel related data structure and use that as the base for enabling and disabling interrupts, thus removing the need for the MMIO read. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 77 +++++++++++++++----------------- drivers/net/ethernet/amd/xgbe/xgbe.h | 4 +- 2 files changed, 37 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index bb60507d3d9b..75a479caac03 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -605,7 +605,6 @@ static void xgbe_config_flow_control(struct xgbe_prv_data *pdata) static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; - unsigned int dma_ch_isr, dma_ch_ier; unsigned int i; /* Set the interrupt mode if supported */ @@ -617,20 +616,20 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) channel = pdata->channel[i]; /* Clear all the interrupts which are set */ - dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); - XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); + XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, + XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); /* Clear all interrupt enable bits */ - dma_ch_ier = 0; + channel->curr_ier = 0; /* Enable following interrupts * NIE - Normal Interrupt Summary Enable * AIE - Abnormal Interrupt Summary Enable * FBEE - Fatal Bus Error Enable */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); if (channel->tx_ring) { /* Enable the following Tx interrupts @@ -639,7 +638,8 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) * mode) */ if (!pdata->per_channel_irq || pdata->channel_irq_mode) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, TIE, 1); } if (channel->rx_ring) { /* Enable following Rx interrupts @@ -648,12 +648,13 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) * per channel interrupts in edge triggered * mode) */ - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); if (!pdata->per_channel_irq || pdata->channel_irq_mode) - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, + DMA_CH_IER, RIE, 1); } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); } } @@ -1964,44 +1965,40 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc) static int xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1); break; case XGMAC_INT_DMA_ALL: - dma_ch_ier |= channel->saved_ier; + channel->curr_ier |= channel->saved_ier; break; default: return -1; } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); return 0; } @@ -2009,45 +2006,41 @@ static int xgbe_enable_int(struct xgbe_channel *channel, static int xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id) { - unsigned int dma_ch_ier; - - dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER); - switch (int_id) { case XGMAC_INT_DMA_CH_SR_TI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); break; case XGMAC_INT_DMA_CH_SR_TPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0); break; case XGMAC_INT_DMA_CH_SR_TBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_RBU: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0); break; case XGMAC_INT_DMA_CH_SR_RPS: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0); break; case XGMAC_INT_DMA_CH_SR_TI_RI: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0); - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0); break; case XGMAC_INT_DMA_CH_SR_FBE: - XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0); + XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0); break; case XGMAC_INT_DMA_ALL: - channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK; - dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK; + channel->saved_ier = channel->curr_ier; + channel->curr_ier = 0; break; default: return -1; } - XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier); + XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier); return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 9a80f20adc1c..58bb455bf1a1 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -182,8 +182,6 @@ #define XGBE_IRQ_MODE_EDGE 0 #define XGBE_IRQ_MODE_LEVEL 1 -#define XGBE_DMA_INTERRUPT_MASK 0x31c7 - #define XGMAC_MIN_PACKET 60 #define XGMAC_STD_PACKET_MTU 1500 #define XGMAC_MAX_STD_PACKET 1518 @@ -462,6 +460,8 @@ struct xgbe_channel { /* Netdev related settings */ struct napi_struct napi; + /* Per channel interrupt enablement tracker */ + unsigned int curr_ier; unsigned int saved_ier; unsigned int tx_timer_active; -- cgit v1.2.3-55-g7522 From 3be95872e89eeabc83ddd6011c988d85c94df050 Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:03:26 -0500 Subject: amd-xgbe: Add hardware features debug output Use the dynamic debug support to output information about the hardware features reported by the device. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 78 ++++++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index d6d29d8c58a7..7498bb81f918 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -732,8 +732,6 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) unsigned int mac_hfr0, mac_hfr1, mac_hfr2; struct xgbe_hw_features *hw_feat = &pdata->hw_feat; - DBGPR("-->xgbe_get_all_hw_features\n"); - mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R); mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R); mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R); @@ -828,7 +826,81 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); - DBGPR("<--xgbe_get_all_hw_features\n"); + if (netif_msg_probe(pdata)) { + dev_dbg(pdata->dev, "Hardware features:\n"); + + /* Hardware feature register 0 */ + dev_dbg(pdata->dev, " 1GbE support : %s\n", + hw_feat->gmii ? "yes" : "no"); + dev_dbg(pdata->dev, " VLAN hash filter : %s\n", + hw_feat->vlhash ? "yes" : "no"); + dev_dbg(pdata->dev, " MDIO interface : %s\n", + hw_feat->sma ? "yes" : "no"); + dev_dbg(pdata->dev, " Wake-up packet support : %s\n", + hw_feat->rwk ? "yes" : "no"); + dev_dbg(pdata->dev, " Magic packet support : %s\n", + hw_feat->mgk ? "yes" : "no"); + dev_dbg(pdata->dev, " Management counters : %s\n", + hw_feat->mmc ? "yes" : "no"); + dev_dbg(pdata->dev, " ARP offload : %s\n", + hw_feat->aoe ? "yes" : "no"); + dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n", + hw_feat->ts ? "yes" : "no"); + dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n", + hw_feat->eee ? "yes" : "no"); + dev_dbg(pdata->dev, " TX checksum offload : %s\n", + hw_feat->tx_coe ? "yes" : "no"); + dev_dbg(pdata->dev, " RX checksum offload : %s\n", + hw_feat->rx_coe ? "yes" : "no"); + dev_dbg(pdata->dev, " Additional MAC addresses : %u\n", + hw_feat->addn_mac); + dev_dbg(pdata->dev, " Timestamp source : %s\n", + (hw_feat->ts_src == 1) ? "internal" : + (hw_feat->ts_src == 2) ? "external" : + (hw_feat->ts_src == 3) ? "internal/external" : "n/a"); + dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n", + hw_feat->sa_vlan_ins ? "yes" : "no"); + + /* Hardware feature register 1 */ + dev_dbg(pdata->dev, " RX fifo size : %u\n", + hw_feat->rx_fifo_size); + dev_dbg(pdata->dev, " TX fifo size : %u\n", + hw_feat->tx_fifo_size); + dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n", + hw_feat->adv_ts_hi ? "yes" : "no"); + dev_dbg(pdata->dev, " DMA width : %u\n", + hw_feat->dma_width); + dev_dbg(pdata->dev, " Data Center Bridging : %s\n", + hw_feat->dcb ? "yes" : "no"); + dev_dbg(pdata->dev, " Split header : %s\n", + hw_feat->sph ? "yes" : "no"); + dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n", + hw_feat->tso ? "yes" : "no"); + dev_dbg(pdata->dev, " Debug memory interface : %s\n", + hw_feat->dma_debug ? "yes" : "no"); + dev_dbg(pdata->dev, " Receive Side Scaling : %s\n", + hw_feat->rss ? "yes" : "no"); + dev_dbg(pdata->dev, " Traffic Class count : %u\n", + hw_feat->tc_cnt); + dev_dbg(pdata->dev, " Hash table size : %u\n", + hw_feat->hash_table_size); + dev_dbg(pdata->dev, " L3/L4 Filters : %u\n", + hw_feat->l3l4_filter_num); + + /* Hardware feature register 2 */ + dev_dbg(pdata->dev, " RX queue count : %u\n", + hw_feat->rx_q_cnt); + dev_dbg(pdata->dev, " TX queue count : %u\n", + hw_feat->tx_q_cnt); + dev_dbg(pdata->dev, " RX DMA channel count : %u\n", + hw_feat->rx_ch_cnt); + dev_dbg(pdata->dev, " TX DMA channel count : %u\n", + hw_feat->rx_ch_cnt); + dev_dbg(pdata->dev, " PPS outputs : %u\n", + hw_feat->pps_out_num); + dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n", + hw_feat->aux_snap_num); + } } static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) -- cgit v1.2.3-55-g7522 From 80a788c94e6c0aa29dfbb43eae4a0d4144992c5d Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:03:35 -0500 Subject: amd-xgbe: Add per queue Tx and Rx statistics Add per queue Tx and Rx packet and byte counts. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 23 ++++++++++++++++------- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 26 +++++++++++++++++++++++++- drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +++++ 3 files changed, 46 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 75a479caac03..a9784084202a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1609,6 +1609,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) struct xgbe_ring_data *rdata; struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; + unsigned int tx_packets, tx_bytes; unsigned int csum, tso, vlan; unsigned int tso_context, vlan_context; unsigned int tx_set_ic; @@ -1618,6 +1619,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) DBGPR("-->xgbe_dev_xmit\n"); + tx_packets = packet->tx_packets; + tx_bytes = packet->tx_bytes; + csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE); tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, @@ -1645,13 +1649,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) * - Addition of Tx frame count to the frame count since the * last interrupt was set does not exceed the frame count setting */ - ring->coalesce_count += packet->tx_packets; + ring->coalesce_count += tx_packets; if (!pdata->tx_frames) tx_set_ic = 0; - else if (packet->tx_packets > pdata->tx_frames) + else if (tx_packets > pdata->tx_frames) tx_set_ic = 1; - else if ((ring->coalesce_count % pdata->tx_frames) < - packet->tx_packets) + else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets) tx_set_ic = 1; else tx_set_ic = 0; @@ -1741,7 +1744,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN, packet->tcp_header_len / 4); - pdata->ext_stats.tx_tso_packets += packet->tx_packets; + pdata->ext_stats.tx_tso_packets += tx_packets; } else { /* Enable CRC and Pad Insertion */ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0); @@ -1789,8 +1792,11 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1); /* Save the Tx info to report back during cleanup */ - rdata->tx.packets = packet->tx_packets; - rdata->tx.bytes = packet->tx_bytes; + rdata->tx.packets = tx_packets; + rdata->tx.bytes = tx_bytes; + + pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets; + pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes; /* In case the Tx DMA engine is running, make sure everything * is written to the descriptor(s) before setting the OWN bit @@ -1944,6 +1950,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel) FRAME, 1); } + pdata->ext_stats.rxq_packets[channel->queue_index]++; + pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len; + DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 67a2e52ad25d..f80b186779b7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -186,6 +186,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct xgbe_prv_data *pdata = netdev_priv(netdev); int i; switch (stringset) { @@ -195,6 +196,18 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data) ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } + for (i = 0; i < pdata->tx_ring_count; i++) { + sprintf(data, "txq_%u_packets", i); + data += ETH_GSTRING_LEN; + sprintf(data, "txq_%u_bytes", i); + data += ETH_GSTRING_LEN; + } + for (i = 0; i < pdata->rx_ring_count; i++) { + sprintf(data, "rxq_%u_packets", i); + data += ETH_GSTRING_LEN; + sprintf(data, "rxq_%u_bytes", i); + data += ETH_GSTRING_LEN; + } break; } } @@ -211,15 +224,26 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev, stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset; *data++ = *(u64 *)stat; } + for (i = 0; i < pdata->tx_ring_count; i++) { + *data++ = pdata->ext_stats.txq_packets[i]; + *data++ = pdata->ext_stats.txq_bytes[i]; + } + for (i = 0; i < pdata->rx_ring_count; i++) { + *data++ = pdata->ext_stats.rxq_packets[i]; + *data++ = pdata->ext_stats.rxq_bytes[i]; + } } static int xgbe_get_sset_count(struct net_device *netdev, int stringset) { + struct xgbe_prv_data *pdata = netdev_priv(netdev); int ret; switch (stringset) { case ETH_SS_STATS: - ret = XGBE_STATS_COUNT; + ret = XGBE_STATS_COUNT + + (pdata->tx_ring_count * 2) + + (pdata->rx_ring_count * 2); break; default: diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 58bb455bf1a1..0e93155bc2d5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -668,6 +668,11 @@ struct xgbe_ext_stats { u64 tx_tso_packets; u64 rx_split_header_packets; u64 rx_buffer_unavailable; + + u64 txq_packets[XGBE_MAX_DMA_CHANNELS]; + u64 txq_bytes[XGBE_MAX_DMA_CHANNELS]; + u64 rxq_packets[XGBE_MAX_DMA_CHANNELS]; + u64 rxq_bytes[XGBE_MAX_DMA_CHANNELS]; }; struct xgbe_hw_if { -- cgit v1.2.3-55-g7522 From 606c07f3086017a8c2d7ce0843807e81b541edcc Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:03:44 -0500 Subject: net: ethtool: Add macro to clear a link mode setting There are currently macros to set and test an ETHTOOL_LINK_MODE_ setting, but not to clear one. Add a macro to clear an ETHTOOL_LINK_MODE_ setting. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- include/linux/ethtool.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index afdbb701fdb4..4587a4c36923 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -136,6 +136,17 @@ struct ethtool_link_ksettings { #define ethtool_link_ksettings_add_link_mode(ptr, name, mode) \ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) +/** + * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings + * link mode mask + * @ptr : pointer to struct ethtool_link_ksettings + * @name : one of supported/advertising/lp_advertising + * @mode : one of the ETHTOOL_LINK_MODE_*_BIT + * (not atomic, no bound checking) + */ +#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \ + __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name) + /** * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask * @ptr : pointer to struct ethtool_link_ksettings -- cgit v1.2.3-55-g7522 From 85f9feb64bd5cfd6412258be15f90b517b4b40fc Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:03:55 -0500 Subject: amd-xgbe: Convert to using the new link mode settings Convert from using the old u32 supported, advertising, etc. link settings to the new link mode settings that support bit positions / settings greater than 32 bits. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 56 +++-- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 77 +++--- drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c | 54 +++-- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 350 +++++++++++++++------------ drivers/net/ethernet/amd/xgbe/xgbe.h | 50 +++- 5 files changed, 345 insertions(+), 242 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index f80b186779b7..cea25acd5de2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -267,6 +267,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct ethtool_link_ksettings *lks = &pdata->phy.lks; int ret = 0; if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) { @@ -279,16 +280,21 @@ static int xgbe_set_pauseparam(struct net_device *netdev, pdata->phy.tx_pause = pause->tx_pause; pdata->phy.rx_pause = pause->rx_pause; - pdata->phy.advertising &= ~ADVERTISED_Pause; - pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + XGBE_CLR_ADV(lks, Pause); + XGBE_CLR_ADV(lks, Asym_Pause); if (pause->rx_pause) { - pdata->phy.advertising |= ADVERTISED_Pause; - pdata->phy.advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_ADV(lks, Pause); + XGBE_SET_ADV(lks, Asym_Pause); } - if (pause->tx_pause) - pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + if (pause->tx_pause) { + /* Equivalent to XOR of Asym_Pause */ + if (XGBE_ADV(lks, Asym_Pause)) + XGBE_CLR_ADV(lks, Asym_Pause); + else + XGBE_SET_ADV(lks, Asym_Pause); + } if (netif_running(netdev)) ret = pdata->phy_if.phy_config_aneg(pdata); @@ -300,22 +306,20 @@ static int xgbe_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct ethtool_link_ksettings *lks = &pdata->phy.lks; cmd->base.phy_address = pdata->phy.address; - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - pdata->phy.supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - pdata->phy.advertising); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, - pdata->phy.lp_advertising); - cmd->base.autoneg = pdata->phy.autoneg; cmd->base.speed = pdata->phy.speed; cmd->base.duplex = pdata->phy.duplex; cmd->base.port = PORT_NONE; + XGBE_LM_COPY(cmd, supported, lks, supported); + XGBE_LM_COPY(cmd, advertising, lks, advertising); + XGBE_LM_COPY(cmd, lp_advertising, lks, lp_advertising); + return 0; } @@ -323,7 +327,8 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - u32 advertising; + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); u32 speed; int ret; @@ -355,15 +360,17 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, } } - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - netif_dbg(pdata, link, netdev, - "requested advertisement %#x, phy supported %#x\n", - advertising, pdata->phy.supported); + "requested advertisement 0x%*pb, phy supported 0x%*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported); + + bitmap_and(advertising, + cmd->link_modes.advertising, lks->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS); - advertising &= pdata->phy.supported; - if ((cmd->base.autoneg == AUTONEG_ENABLE) && !advertising) { + if ((cmd->base.autoneg == AUTONEG_ENABLE) && + bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) { netdev_err(netdev, "unsupported requested advertisement\n"); return -EINVAL; @@ -373,12 +380,13 @@ static int xgbe_set_link_ksettings(struct net_device *netdev, pdata->phy.autoneg = cmd->base.autoneg; pdata->phy.speed = speed; pdata->phy.duplex = cmd->base.duplex; - pdata->phy.advertising = advertising; + bitmap_copy(lks->link_modes.advertising, advertising, + __ETHTOOL_LINK_MODE_MASK_NBITS); if (cmd->base.autoneg == AUTONEG_ENABLE) - pdata->phy.advertising |= ADVERTISED_Autoneg; + XGBE_SET_ADV(lks, Autoneg); else - pdata->phy.advertising &= ~ADVERTISED_Autoneg; + XGBE_CLR_ADV(lks, Autoneg); if (netif_running(netdev)) ret = pdata->phy_if.phy_config_aneg(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 24092020f119..072b9f664597 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -615,12 +615,14 @@ static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata) static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + /* Be sure we aren't looping trying to negotiate */ if (xgbe_in_kr_mode(pdata)) { pdata->kr_state = XGBE_RX_ERROR; - if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) && - !(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) + if (!XGBE_ADV(lks, 1000baseKX_Full) && + !XGBE_ADV(lks, 2500baseX_Full)) return XGBE_AN_NO_LINK; if (pdata->kx_state != XGBE_RX_BPA) @@ -628,7 +630,7 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) } else { pdata->kx_state = XGBE_RX_ERROR; - if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full)) + if (!XGBE_ADV(lks, 10000baseKR_Full)) return XGBE_AN_NO_LINK; if (pdata->kr_state != XGBE_RX_BPA) @@ -944,18 +946,19 @@ static void xgbe_an_state_machine(struct work_struct *work) static void xgbe_an37_init(struct xgbe_prv_data *pdata) { - unsigned int advertising, reg; + struct ethtool_link_ksettings lks; + unsigned int reg; - advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + pdata->phy_if.phy_impl.an_advertising(pdata, &lks); /* Set up Advertisement register */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); - if (advertising & ADVERTISED_Pause) + if (XGBE_ADV(&lks, Pause)) reg |= 0x100; else reg &= ~0x100; - if (advertising & ADVERTISED_Asym_Pause) + if (XGBE_ADV(&lks, Asym_Pause)) reg |= 0x80; else reg &= ~0x80; @@ -992,13 +995,14 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata) static void xgbe_an73_init(struct xgbe_prv_data *pdata) { - unsigned int advertising, reg; + struct ethtool_link_ksettings lks; + unsigned int reg; - advertising = pdata->phy_if.phy_impl.an_advertising(pdata); + pdata->phy_if.phy_impl.an_advertising(pdata, &lks); /* Set up Advertisement register 3 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); - if (advertising & ADVERTISED_10000baseR_FEC) + if (XGBE_ADV(&lks, 10000baseR_FEC)) reg |= 0xc000; else reg &= ~0xc000; @@ -1007,13 +1011,13 @@ static void xgbe_an73_init(struct xgbe_prv_data *pdata) /* Set up Advertisement register 2 next */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); - if (advertising & ADVERTISED_10000baseKR_Full) + if (XGBE_ADV(&lks, 10000baseKR_Full)) reg |= 0x80; else reg &= ~0x80; - if ((advertising & ADVERTISED_1000baseKX_Full) || - (advertising & ADVERTISED_2500baseX_Full)) + if (XGBE_ADV(&lks, 1000baseKX_Full) || + XGBE_ADV(&lks, 2500baseX_Full)) reg |= 0x20; else reg &= ~0x20; @@ -1022,12 +1026,12 @@ static void xgbe_an73_init(struct xgbe_prv_data *pdata) /* Set up Advertisement register 1 last */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); - if (advertising & ADVERTISED_Pause) + if (XGBE_ADV(&lks, Pause)) reg |= 0x400; else reg &= ~0x400; - if (advertising & ADVERTISED_Asym_Pause) + if (XGBE_ADV(&lks, Asym_Pause)) reg |= 0x800; else reg &= ~0x800; @@ -1283,9 +1287,10 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; - pdata->phy.lp_advertising = 0; + XGBE_ZERO_LP_ADV(lks); if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) mode = xgbe_cur_mode(pdata); @@ -1515,17 +1520,21 @@ static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) { - if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + + if (XGBE_ADV(lks, 10000baseKR_Full)) return SPEED_10000; - else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full) + else if (XGBE_ADV(lks, 10000baseT_Full)) return SPEED_10000; - else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full) + else if (XGBE_ADV(lks, 2500baseX_Full)) return SPEED_2500; - else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full) + else if (XGBE_ADV(lks, 2500baseT_Full)) + return SPEED_2500; + else if (XGBE_ADV(lks, 1000baseKX_Full)) return SPEED_1000; - else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full) + else if (XGBE_ADV(lks, 1000baseT_Full)) return SPEED_1000; - else if (pdata->phy.advertising & ADVERTISED_100baseT_Full) + else if (XGBE_ADV(lks, 100baseT_Full)) return SPEED_100; return SPEED_UNKNOWN; @@ -1538,6 +1547,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata) static int xgbe_phy_init(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; int ret; mutex_init(&pdata->an_mutex); @@ -1555,11 +1565,13 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) ret = pdata->phy_if.phy_impl.init(pdata); if (ret) return ret; - pdata->phy.advertising = pdata->phy.supported; + + /* Copy supported link modes to advertising link modes */ + XGBE_LM_COPY(lks, advertising, lks, supported); pdata->phy.address = 0; - if (pdata->phy.advertising & ADVERTISED_Autoneg) { + if (XGBE_ADV(lks, Autoneg)) { pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; @@ -1576,16 +1588,21 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) pdata->phy.rx_pause = pdata->rx_pause; /* Fix up Flow Control advertising */ - pdata->phy.advertising &= ~ADVERTISED_Pause; - pdata->phy.advertising &= ~ADVERTISED_Asym_Pause; + XGBE_CLR_ADV(lks, Pause); + XGBE_CLR_ADV(lks, Asym_Pause); if (pdata->rx_pause) { - pdata->phy.advertising |= ADVERTISED_Pause; - pdata->phy.advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_ADV(lks, Pause); + XGBE_SET_ADV(lks, Asym_Pause); } - if (pdata->tx_pause) - pdata->phy.advertising ^= ADVERTISED_Asym_Pause; + if (pdata->tx_pause) { + /* Equivalent to XOR of Asym_Pause */ + if (XGBE_ADV(lks, Asym_Pause)) + XGBE_CLR_ADV(lks, Asym_Pause); + else + XGBE_SET_ADV(lks, Asym_Pause); + } if (netif_msg_drv(pdata)) xgbe_dump_phy_registers(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c index c75edcac5e0a..d16eae415f72 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c @@ -231,20 +231,21 @@ static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_Backplane; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, Backplane); /* Compare Advertisement and Link Partner register 1 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); if (lp_reg & 0x400) - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); if (lp_reg & 0x800) - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ @@ -266,12 +267,12 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_LP_ADV(lks, 10000baseKR_Full); if (lp_reg & 0x20) { if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000) - pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full; + XGBE_SET_LP_ADV(lks, 2500baseX_Full); else - pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_LP_ADV(lks, 1000baseKX_Full); } ad_reg &= lp_reg; @@ -290,14 +291,17 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) - pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + XGBE_SET_LP_ADV(lks, 10000baseR_FEC); return mode; } -static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata) +static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, + struct ethtool_link_ksettings *dlks) { - return pdata->phy.advertising; + struct ethtool_link_ksettings *slks = &pdata->phy.lks; + + XGBE_LM_COPY(dlks, advertising, slks, advertising); } static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) @@ -565,11 +569,10 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) } static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, - enum xgbe_mode mode, u32 advert) + enum xgbe_mode mode, bool advert) { if (pdata->phy.autoneg == AUTONEG_ENABLE) { - if (pdata->phy.advertising & advert) - return true; + return advert; } else { enum xgbe_mode cur_mode; @@ -583,16 +586,18 @@ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_KX_1000: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseKX_Full); + XGBE_ADV(lks, 1000baseKX_Full)); case XGBE_MODE_KX_2500: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_2500baseX_Full); + XGBE_ADV(lks, 2500baseX_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseKR_Full); + XGBE_ADV(lks, 10000baseKR_Full)); default: return false; } @@ -672,6 +677,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata) static int xgbe_phy_init(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data; int ret; @@ -790,21 +796,23 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } /* Initialize supported features */ - pdata->phy.supported = SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_Backplane; - pdata->phy.supported |= SUPPORTED_10000baseKR_Full; + XGBE_ZERO_SUP(lks); + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, Backplane); + XGBE_SET_SUP(lks, 10000baseKR_Full); switch (phy_data->speed_set) { case XGBE_SPEEDSET_1000_10000: - pdata->phy.supported |= SUPPORTED_1000baseKX_Full; + XGBE_SET_SUP(lks, 1000baseKX_Full); break; case XGBE_SPEEDSET_2500_10000: - pdata->phy.supported |= SUPPORTED_2500baseX_Full; + XGBE_SET_SUP(lks, 2500baseX_Full); break; } if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) - pdata->phy.supported |= SUPPORTED_10000baseR_FEC; + XGBE_SET_SUP(lks, 10000baseR_FEC); pdata->phy_data = phy_data; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 81c45fa65e0a..3304a291aa96 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -709,18 +709,13 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg) static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed) return; - pdata->phy.supported &= ~SUPPORTED_Autoneg; - pdata->phy.supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); - pdata->phy.supported &= ~SUPPORTED_TP; - pdata->phy.supported &= ~SUPPORTED_FIBRE; - pdata->phy.supported &= ~SUPPORTED_100baseT_Full; - pdata->phy.supported &= ~SUPPORTED_1000baseT_Full; - pdata->phy.supported &= ~SUPPORTED_10000baseT_Full; + XGBE_ZERO_SUP(lks); if (phy_data->sfp_mod_absent) { pdata->phy.speed = SPEED_UNKNOWN; @@ -728,18 +723,13 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; - pdata->phy.supported |= SUPPORTED_FIBRE; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) - pdata->phy.supported |= SUPPORTED_100baseT_Full; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); + XGBE_SET_SUP(lks, FIBRE); - pdata->phy.advertising = pdata->phy.supported; + XGBE_LM_COPY(lks, advertising, lks, supported); return; } @@ -753,8 +743,18 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) { + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) + XGBE_SET_SUP(lks, 100baseT_Full); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + XGBE_SET_SUP(lks, 1000baseT_Full); + } else { + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) + XGBE_SET_SUP(lks, 1000baseX_Full); + } break; case XGBE_SFP_BASE_10000_SR: case XGBE_SFP_BASE_10000_LR: @@ -765,6 +765,27 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) pdata->phy.duplex = DUPLEX_FULL; pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.pause_autoneg = AUTONEG_DISABLE; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { + switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_10000_SR: + XGBE_SET_SUP(lks, 10000baseSR_Full); + break; + case XGBE_SFP_BASE_10000_LR: + XGBE_SET_SUP(lks, 10000baseLR_Full); + break; + case XGBE_SFP_BASE_10000_LRM: + XGBE_SET_SUP(lks, 10000baseLRM_Full); + break; + case XGBE_SFP_BASE_10000_ER: + XGBE_SET_SUP(lks, 10000baseER_Full); + break; + case XGBE_SFP_BASE_10000_CR: + XGBE_SET_SUP(lks, 10000baseCR_Full); + break; + default: + break; + } + } break; default: pdata->phy.speed = SPEED_UNKNOWN; @@ -778,38 +799,14 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_CX: case XGBE_SFP_BASE_10000_CR: - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, TP); break; default: - pdata->phy.supported |= SUPPORTED_FIBRE; - } - - switch (phy_data->sfp_speed) { - case XGBE_SFP_SPEED_100_1000: - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) - pdata->phy.supported |= SUPPORTED_100baseT_Full; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; - break; - case XGBE_SFP_SPEED_1000: - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, FIBRE); break; - case XGBE_SFP_SPEED_10000: - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) - pdata->phy.supported |= SUPPORTED_10000baseT_Full; - break; - default: - /* Choose the fastest supported speed */ - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) - pdata->phy.supported |= SUPPORTED_10000baseT_Full; - else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) - pdata->phy.supported |= SUPPORTED_1000baseT_Full; - else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) - pdata->phy.supported |= SUPPORTED_100baseT_Full; } - pdata->phy.advertising = pdata->phy.supported; + XGBE_LM_COPY(lks, advertising, lks, supported); } static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, @@ -886,8 +883,10 @@ static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; struct phy_device *phydev; + u32 advertising; int ret; /* If we already have a PHY, just return */ @@ -943,7 +942,10 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) phy_data->phydev = phydev; xgbe_phy_external_phy_quirks(pdata); - phydev->advertising &= pdata->phy.advertising; + + ethtool_convert_link_mode_to_legacy_u32(&advertising, + lks->link_modes.advertising); + phydev->advertising &= advertising; phy_start_aneg(phy_data->phydev); @@ -1277,6 +1279,7 @@ put: static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; u16 lcl_adv = 0, rmt_adv = 0; u8 fc; @@ -1293,11 +1296,11 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) lcl_adv |= ADVERTISE_PAUSE_ASYM; if (phy_data->phydev->pause) { - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); rmt_adv |= LPA_PAUSE_CAP; } if (phy_data->phydev->asym_pause) { - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); rmt_adv |= LPA_PAUSE_ASYM; } @@ -1310,10 +1313,11 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_TP; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, TP); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) @@ -1322,21 +1326,21 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) { case XGBE_SGMII_AN_LINK_SPEED_100: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { - pdata->phy.lp_advertising |= ADVERTISED_100baseT_Full; + XGBE_SET_LP_ADV(lks, 100baseT_Full); mode = XGBE_MODE_SGMII_100; } else { /* Half-duplex not supported */ - pdata->phy.lp_advertising |= ADVERTISED_100baseT_Half; + XGBE_SET_LP_ADV(lks, 100baseT_Half); mode = XGBE_MODE_UNKNOWN; } break; case XGBE_SGMII_AN_LINK_SPEED_1000: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full; + XGBE_SET_LP_ADV(lks, 1000baseT_Full); mode = XGBE_MODE_SGMII_1000; } else { /* Half-duplex not supported */ - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half; + XGBE_SET_LP_ADV(lks, 1000baseT_Half); mode = XGBE_MODE_UNKNOWN; } break; @@ -1349,19 +1353,20 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_FIBRE; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, FIBRE); /* Compare Advertisement and Link Partner register */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY); if (lp_reg & 0x100) - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ @@ -1379,10 +1384,8 @@ static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) } } - if (lp_reg & 0x40) - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half; if (lp_reg & 0x20) - pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full; + XGBE_SET_LP_ADV(lks, 1000baseX_Full); /* Half duplex is not supported */ ad_reg &= lp_reg; @@ -1393,12 +1396,13 @@ static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_Backplane; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, Backplane); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) @@ -1408,9 +1412,9 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_LP_ADV(lks, 10000baseKR_Full); if (lp_reg & 0x20) - pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_LP_ADV(lks, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) { @@ -1463,26 +1467,27 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) - pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + XGBE_SET_LP_ADV(lks, 10000baseR_FEC); return mode; } static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; - pdata->phy.lp_advertising |= ADVERTISED_Autoneg; - pdata->phy.lp_advertising |= ADVERTISED_Backplane; + XGBE_SET_LP_ADV(lks, Autoneg); + XGBE_SET_LP_ADV(lks, Backplane); /* Compare Advertisement and Link Partner register 1 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); if (lp_reg & 0x400) - pdata->phy.lp_advertising |= ADVERTISED_Pause; + XGBE_SET_LP_ADV(lks, Pause); if (lp_reg & 0x800) - pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause; + XGBE_SET_LP_ADV(lks, Asym_Pause); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ @@ -1504,9 +1509,9 @@ static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) - pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_LP_ADV(lks, 10000baseKR_Full); if (lp_reg & 0x20) - pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_LP_ADV(lks, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) @@ -1520,7 +1525,7 @@ static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) - pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC; + XGBE_SET_LP_ADV(lks, 10000baseR_FEC); return mode; } @@ -1541,41 +1546,43 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) } } -static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata) +static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, + struct ethtool_link_ksettings *dlks) { + struct ethtool_link_ksettings *slks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int advertising; + + XGBE_LM_COPY(dlks, advertising, slks, advertising); /* Without a re-driver, just return current advertising */ if (!phy_data->redrv) - return pdata->phy.advertising; + return; /* With the KR re-driver we need to advertise a single speed */ - advertising = pdata->phy.advertising; - advertising &= ~ADVERTISED_1000baseKX_Full; - advertising &= ~ADVERTISED_10000baseKR_Full; + XGBE_CLR_ADV(dlks, 1000baseKX_Full); + XGBE_CLR_ADV(dlks, 10000baseKR_Full); switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; case XGBE_PORT_MODE_BACKPLANE_2500: - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_NBASE_T: - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_T: if (phy_data->phydev && (phy_data->phydev->speed == SPEED_10000)) - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); else - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_R: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; case XGBE_PORT_MODE_SFP: switch (phy_data->sfp_base) { @@ -1583,24 +1590,24 @@ static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata) case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: - advertising |= ADVERTISED_1000baseKX_Full; + XGBE_SET_ADV(dlks, 1000baseKX_Full); break; default: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; } break; default: - advertising |= ADVERTISED_10000baseKR_Full; + XGBE_SET_ADV(dlks, 10000baseKR_Full); break; } - - return advertising; } static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; + u32 advertising; int ret; ret = xgbe_phy_find_phy_device(pdata); @@ -1610,9 +1617,12 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return 0; + ethtool_convert_link_mode_to_legacy_u32(&advertising, + lks->link_modes.advertising); + phy_data->phydev->autoneg = pdata->phy.autoneg; phy_data->phydev->advertising = phy_data->phydev->supported & - pdata->phy.advertising; + advertising; if (pdata->phy.autoneg != AUTONEG_ENABLE) { phy_data->phydev->speed = pdata->phy.speed; @@ -2073,11 +2083,10 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) } static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, - enum xgbe_mode mode, u32 advert) + enum xgbe_mode mode, bool advert) { if (pdata->phy.autoneg == AUTONEG_ENABLE) { - if (pdata->phy.advertising & advert) - return true; + return advert; } else { enum xgbe_mode cur_mode; @@ -2092,13 +2101,15 @@ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_X: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseX_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseT_Full); + XGBE_ADV(lks, 10000baseKR_Full)); default: return false; } @@ -2107,19 +2118,21 @@ static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_SGMII_100: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_100baseT_Full); + XGBE_ADV(lks, 100baseT_Full)); case XGBE_MODE_SGMII_1000: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseT_Full)); case XGBE_MODE_KX_2500: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_2500baseX_Full); + XGBE_ADV(lks, 2500baseT_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseT_Full); + XGBE_ADV(lks, 10000baseT_Full)); default: return false; } @@ -2128,6 +2141,7 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; switch (mode) { @@ -2135,22 +2149,26 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) return false; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseX_Full)); case XGBE_MODE_SGMII_100: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return false; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_100baseT_Full); + XGBE_ADV(lks, 100baseT_Full)); case XGBE_MODE_SGMII_1000: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return false; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseT_Full); + XGBE_ADV(lks, 1000baseT_Full)); case XGBE_MODE_SFI: if (phy_data->sfp_mod_absent) return true; return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseT_Full); + XGBE_ADV(lks, 10000baseSR_Full) || + XGBE_ADV(lks, 10000baseLR_Full) || + XGBE_ADV(lks, 10000baseLRM_Full) || + XGBE_ADV(lks, 10000baseER_Full) || + XGBE_ADV(lks, 10000baseCR_Full)); default: return false; } @@ -2159,10 +2177,12 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_KX_2500: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_2500baseX_Full); + XGBE_ADV(lks, 2500baseX_Full)); default: return false; } @@ -2171,13 +2191,15 @@ static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; + switch (mode) { case XGBE_MODE_KX_1000: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_1000baseKX_Full); + XGBE_ADV(lks, 1000baseKX_Full)); case XGBE_MODE_KR: return xgbe_phy_check_mode(pdata, mode, - ADVERTISED_10000baseKR_Full); + XGBE_ADV(lks, 10000baseKR_Full)); default: return false; } @@ -2744,6 +2766,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata) static int xgbe_phy_init(struct xgbe_prv_data *pdata) { + struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data; struct mii_bus *mii; unsigned int reg; @@ -2823,32 +2846,33 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) phy_data->cur_mode = XGBE_MODE_UNKNOWN; /* Initialize supported features */ - pdata->phy.supported = 0; + XGBE_ZERO_SUP(lks); switch (phy_data->port_mode) { /* Backplane support */ case XGBE_PORT_MODE_BACKPLANE: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_Backplane; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, Backplane); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseKX_Full; + XGBE_SET_SUP(lks, 1000baseKX_Full); phy_data->start_mode = XGBE_MODE_KX_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { - pdata->phy.supported |= SUPPORTED_10000baseKR_Full; + XGBE_SET_SUP(lks, 10000baseKR_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) - pdata->phy.supported |= - SUPPORTED_10000baseR_FEC; + XGBE_SET_SUP(lks, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_KR; } phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; case XGBE_PORT_MODE_BACKPLANE_2500: - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_Backplane; - pdata->phy.supported |= SUPPORTED_2500baseX_Full; + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, Backplane); + XGBE_SET_SUP(lks, 2500baseX_Full); phy_data->start_mode = XGBE_MODE_KX_2500; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; @@ -2856,15 +2880,16 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* MDIO 1GBase-T support */ case XGBE_PORT_MODE_1000BASE_T: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } @@ -2873,10 +2898,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* MDIO Base-X support */ case XGBE_PORT_MODE_1000BASE_X: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_FIBRE; - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, FIBRE); + XGBE_SET_SUP(lks, 1000baseX_Full); phy_data->start_mode = XGBE_MODE_X; phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; @@ -2884,19 +2910,20 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* MDIO NBase-T support */ case XGBE_PORT_MODE_NBASE_T: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) { - pdata->phy.supported |= SUPPORTED_2500baseX_Full; + XGBE_SET_SUP(lks, 2500baseT_Full); phy_data->start_mode = XGBE_MODE_KX_2500; } @@ -2905,19 +2932,20 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* 10GBase-T support */ case XGBE_PORT_MODE_10GBASE_T: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + XGBE_SET_SUP(lks, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + XGBE_SET_SUP(lks, 10000baseT_Full); phy_data->start_mode = XGBE_MODE_KR; } @@ -2926,12 +2954,16 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* 10GBase-R support */ case XGBE_PORT_MODE_10GBASE_R: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, FIBRE); + XGBE_SET_SUP(lks, 10000baseSR_Full); + XGBE_SET_SUP(lks, 10000baseLR_Full); + XGBE_SET_SUP(lks, 10000baseLRM_Full); + XGBE_SET_SUP(lks, 10000baseER_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) - pdata->phy.supported |= SUPPORTED_10000baseR_FEC; + XGBE_SET_SUP(lks, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_SFI; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; @@ -2939,22 +2971,17 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) /* SFP support */ case XGBE_PORT_MODE_SFP: - pdata->phy.supported |= SUPPORTED_Autoneg; - pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - pdata->phy.supported |= SUPPORTED_TP; - pdata->phy.supported |= SUPPORTED_FIBRE; - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { - pdata->phy.supported |= SUPPORTED_100baseT_Full; + XGBE_SET_SUP(lks, Autoneg); + XGBE_SET_SUP(lks, Pause); + XGBE_SET_SUP(lks, Asym_Pause); + XGBE_SET_SUP(lks, TP); + XGBE_SET_SUP(lks, FIBRE); + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) phy_data->start_mode = XGBE_MODE_SGMII_100; - } - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { - pdata->phy.supported |= SUPPORTED_1000baseT_Full; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) phy_data->start_mode = XGBE_MODE_SGMII_1000; - } - if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { - pdata->phy.supported |= SUPPORTED_10000baseT_Full; + if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) phy_data->start_mode = XGBE_MODE_SFI; - } phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; @@ -2965,8 +2992,9 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } if (netif_msg_probe(pdata)) - dev_dbg(pdata->dev, "phy supported=%#x\n", - pdata->phy.supported); + dev_dbg(pdata->dev, "phy supported=0x%*pb\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, + lks->link_modes.supported); if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) && (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 0e93155bc2d5..48a46a70bf5e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -131,6 +131,7 @@ #include #include #include +#include #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.3" @@ -296,6 +297,48 @@ /* MDIO port types */ #define XGMAC_MAX_C22_PORT 3 +/* Link mode bit operations */ +#define XGBE_ZERO_SUP(_ls) \ + ethtool_link_ksettings_zero_link_mode((_ls), supported) + +#define XGBE_SET_SUP(_ls, _mode) \ + ethtool_link_ksettings_add_link_mode((_ls), supported, _mode) + +#define XGBE_CLR_SUP(_ls, _mode) \ + ethtool_link_ksettings_del_link_mode((_ls), supported, _mode) + +#define XGBE_IS_SUP(_ls, _mode) \ + ethtool_link_ksettings_test_link_mode((_ls), supported, _mode) + +#define XGBE_ZERO_ADV(_ls) \ + ethtool_link_ksettings_zero_link_mode((_ls), advertising) + +#define XGBE_SET_ADV(_ls, _mode) \ + ethtool_link_ksettings_add_link_mode((_ls), advertising, _mode) + +#define XGBE_CLR_ADV(_ls, _mode) \ + ethtool_link_ksettings_del_link_mode((_ls), advertising, _mode) + +#define XGBE_ADV(_ls, _mode) \ + ethtool_link_ksettings_test_link_mode((_ls), advertising, _mode) + +#define XGBE_ZERO_LP_ADV(_ls) \ + ethtool_link_ksettings_zero_link_mode((_ls), lp_advertising) + +#define XGBE_SET_LP_ADV(_ls, _mode) \ + ethtool_link_ksettings_add_link_mode((_ls), lp_advertising, _mode) + +#define XGBE_CLR_LP_ADV(_ls, _mode) \ + ethtool_link_ksettings_del_link_mode((_ls), lp_advertising, _mode) + +#define XGBE_LP_ADV(_ls, _mode) \ + ethtool_link_ksettings_test_link_mode((_ls), lp_advertising, _mode) + +#define XGBE_LM_COPY(_dst, _dname, _src, _sname) \ + bitmap_copy((_dst)->link_modes._dname, \ + (_src)->link_modes._sname, \ + __ETHTOOL_LINK_MODE_MASK_NBITS) + struct xgbe_prv_data; struct xgbe_packet_data { @@ -563,9 +606,7 @@ enum xgbe_mdio_mode { }; struct xgbe_phy { - u32 supported; - u32 advertising; - u32 lp_advertising; + struct ethtool_link_ksettings lks; int address; @@ -817,7 +858,8 @@ struct xgbe_phy_impl_if { int (*an_config)(struct xgbe_prv_data *); /* Set/override auto-negotiation advertisement settings */ - unsigned int (*an_advertising)(struct xgbe_prv_data *); + void (*an_advertising)(struct xgbe_prv_data *, + struct ethtool_link_ksettings *); /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); -- cgit v1.2.3-55-g7522 From 1a510ccf5869a95c0ff324c61518e42e95712844 Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:04:04 -0500 Subject: amd-xgbe: Add support for VXLAN offload capabilities The hardware has the capability to perform checksum offload support (both Tx and Rx) and TSO support for VXLAN packets. Add the support required to enable this. The hardware can only support a single VXLAN port for offload. If more than one VXLAN port is added then the offload capabilities have to be disabled and can no longer be advertised. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 24 ++ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 92 ++++++- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 365 +++++++++++++++++++++++++++- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 23 ++ drivers/net/ethernet/amd/xgbe/xgbe.h | 22 ++ 5 files changed, 520 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index d07edf9eaa69..943133034879 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -298,6 +298,7 @@ #define MAC_RWKPFR 0x00c4 #define MAC_LPICSR 0x00d0 #define MAC_LPITCR 0x00d4 +#define MAC_TIR 0x00e0 #define MAC_VR 0x0110 #define MAC_DR 0x0114 #define MAC_HWF0R 0x011c @@ -364,6 +365,8 @@ #define MAC_HWF0R_TXCOESEL_WIDTH 1 #define MAC_HWF0R_VLHASH_INDEX 4 #define MAC_HWF0R_VLHASH_WIDTH 1 +#define MAC_HWF0R_VXN_INDEX 29 +#define MAC_HWF0R_VXN_WIDTH 1 #define MAC_HWF1R_ADDR64_INDEX 14 #define MAC_HWF1R_ADDR64_WIDTH 2 #define MAC_HWF1R_ADVTHWORD_INDEX 13 @@ -448,6 +451,8 @@ #define MAC_PFR_PR_WIDTH 1 #define MAC_PFR_VTFE_INDEX 16 #define MAC_PFR_VTFE_WIDTH 1 +#define MAC_PFR_VUCC_INDEX 22 +#define MAC_PFR_VUCC_WIDTH 1 #define MAC_PMTCSR_MGKPKTEN_INDEX 1 #define MAC_PMTCSR_MGKPKTEN_WIDTH 1 #define MAC_PMTCSR_PWRDWN_INDEX 0 @@ -510,6 +515,12 @@ #define MAC_TCR_SS_WIDTH 2 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 +#define MAC_TCR_VNE_INDEX 24 +#define MAC_TCR_VNE_WIDTH 1 +#define MAC_TCR_VNM_INDEX 25 +#define MAC_TCR_VNM_WIDTH 1 +#define MAC_TIR_TNID_INDEX 0 +#define MAC_TIR_TNID_WIDTH 16 #define MAC_TSCR_AV8021ASMEN_INDEX 28 #define MAC_TSCR_AV8021ASMEN_WIDTH 1 #define MAC_TSCR_SNAPTYPSEL_INDEX 16 @@ -1153,11 +1164,17 @@ #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 #define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 #define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_TNP_INDEX 8 +#define RX_PACKET_ATTRIBUTES_TNP_WIDTH 1 +#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_INDEX 9 +#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC2_HL_INDEX 0 #define RX_NORMAL_DESC2_HL_WIDTH 10 +#define RX_NORMAL_DESC2_TNP_INDEX 11 +#define RX_NORMAL_DESC2_TNP_WIDTH 1 #define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CTXT_INDEX 30 @@ -1184,9 +1201,11 @@ #define RX_DESC3_L34T_IPV4_TCP 1 #define RX_DESC3_L34T_IPV4_UDP 2 #define RX_DESC3_L34T_IPV4_ICMP 3 +#define RX_DESC3_L34T_IPV4_UNKNOWN 7 #define RX_DESC3_L34T_IPV6_TCP 9 #define RX_DESC3_L34T_IPV6_UDP 10 #define RX_DESC3_L34T_IPV6_ICMP 11 +#define RX_DESC3_L34T_IPV6_UNKNOWN 15 #define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_WIDTH 1 @@ -1201,6 +1220,8 @@ #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 #define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 +#define TX_PACKET_ATTRIBUTES_VXLAN_INDEX 4 +#define TX_PACKET_ATTRIBUTES_VXLAN_WIDTH 1 #define TX_CONTEXT_DESC2_MSS_INDEX 0 #define TX_CONTEXT_DESC2_MSS_WIDTH 15 @@ -1241,8 +1262,11 @@ #define TX_NORMAL_DESC3_TCPPL_WIDTH 18 #define TX_NORMAL_DESC3_TSE_INDEX 18 #define TX_NORMAL_DESC3_TSE_WIDTH 1 +#define TX_NORMAL_DESC3_VNP_INDEX 23 +#define TX_NORMAL_DESC3_VNP_WIDTH 3 #define TX_NORMAL_DESC2_VLAN_INSERT 0x2 +#define TX_NORMAL_DESC3_VXLAN_PACKET 0x3 /* MDIO undefined or vendor specific registers */ #ifndef MDIO_PMA_10GBR_PMD_CTRL diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index a9784084202a..1bf671e02c0a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -479,6 +479,50 @@ static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata, return false; } +static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata) +{ + /* Program the VXLAN port */ + XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n", + pdata->vxlan_port); +} + +static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.vxn) + return; + + /* Program the VXLAN port */ + xgbe_set_vxlan_id(pdata); + + /* Allow for IPv6/UDP zero-checksum VXLAN packets */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1); + + /* Enable VXLAN tunneling mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0); + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n"); +} + +static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) +{ + if (!pdata->hw_feat.vxn) + return; + + /* Disable tunneling mode */ + XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0); + + /* Clear IPv6/UDP zero-checksum VXLAN packets setting */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0); + + /* Clear the VXLAN port */ + XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0); + + netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { unsigned int max_q_count, q_count; @@ -1610,7 +1654,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) struct xgbe_ring_desc *rdesc; struct xgbe_packet_data *packet = &ring->packet_data; unsigned int tx_packets, tx_bytes; - unsigned int csum, tso, vlan; + unsigned int csum, tso, vlan, vxlan; unsigned int tso_context, vlan_context; unsigned int tx_set_ic; int start_index = ring->cur; @@ -1628,6 +1672,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) TSO_ENABLE); vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VLAN_CTAG); + vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VXLAN); if (tso && (packet->mss != ring->tx.cur_mss)) tso_context = 1; @@ -1759,6 +1805,10 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) packet->length); } + if (vxlan) + XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, + TX_NORMAL_DESC3_VXLAN_PACKET); + for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); @@ -1920,9 +1970,27 @@ static int xgbe_dev_read(struct xgbe_channel *channel) rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); /* Set checksum done indicator as appropriate */ - if (netdev->features & NETIF_F_RXCSUM) + if (netdev->features & NETIF_F_RXCSUM) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 1); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 1); + } + + /* Set the tunneled packet indicator */ + if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNP, 1); + + l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); + switch (l34t) { + case RX_DESC3_L34T_IPV4_UNKNOWN: + case RX_DESC3_L34T_IPV6_UNKNOWN: + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + break; + } + } /* Check for errors (only valid in last descriptor) */ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES); @@ -1942,12 +2010,23 @@ static int xgbe_dev_read(struct xgbe_channel *channel) packet->vlan_ctag); } } else { - if ((etlt == 0x05) || (etlt == 0x06)) + unsigned int tnp = XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNP); + + if ((etlt == 0x05) || (etlt == 0x06)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); - else + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + CSUM_DONE, 0); + XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, + TNPCSUM_DONE, 0); + } else { XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, FRAME, 1); + } } pdata->ext_stats.rxq_packets[channel->queue_index]++; @@ -3536,5 +3615,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->disable_ecc_ded = xgbe_disable_ecc_ded; hw_if->disable_ecc_sec = xgbe_disable_ecc_sec; + /* For VXLAN */ + hw_if->enable_vxlan = xgbe_enable_vxlan; + hw_if->disable_vxlan = xgbe_disable_vxlan; + hw_if->set_vxlan_id = xgbe_set_vxlan_id; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 7498bb81f918..608693d11bd7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -124,6 +124,7 @@ #include #include #include +#include #include "xgbe.h" #include "xgbe-common.h" @@ -756,6 +757,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) ADDMACADRSEL); hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); + hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN); /* Hardware feature register 1 */ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, @@ -860,6 +862,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) (hw_feat->ts_src == 3) ? "internal/external" : "n/a"); dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n", hw_feat->sa_vlan_ins ? "yes" : "no"); + dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n", + hw_feat->vxn ? "yes" : "no"); /* Hardware feature register 1 */ dev_dbg(pdata->dev, " RX fifo size : %u\n", @@ -903,6 +907,116 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) } } +static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + + if (!pdata->vxlan_offloads_set) + return; + + netdev_info(netdev, "disabling VXLAN offloads\n"); + + netdev->hw_enc_features &= ~(NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + + netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM); + + pdata->vxlan_offloads_set = 0; +} + +static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata) +{ + if (!pdata->vxlan_port_set) + return; + + pdata->hw_if.disable_vxlan(pdata); + + pdata->vxlan_port_set = 0; + pdata->vxlan_port = 0; +} + +static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata) +{ + xgbe_disable_vxlan_offloads(pdata); + + xgbe_disable_vxlan_hw(pdata); +} + +static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata) +{ + struct net_device *netdev = pdata->netdev; + + if (pdata->vxlan_offloads_set) + return; + + netdev_info(netdev, "enabling VXLAN offloads\n"); + + netdev->hw_enc_features |= NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + pdata->vxlan_features; + + netdev->features |= pdata->vxlan_features; + + pdata->vxlan_offloads_set = 1; +} + +static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata) +{ + struct xgbe_vxlan_data *vdata; + + if (pdata->vxlan_port_set) + return; + + if (list_empty(&pdata->vxlan_ports)) + return; + + vdata = list_first_entry(&pdata->vxlan_ports, + struct xgbe_vxlan_data, list); + + pdata->vxlan_port_set = 1; + pdata->vxlan_port = be16_to_cpu(vdata->port); + + pdata->hw_if.enable_vxlan(pdata); +} + +static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata) +{ + /* VXLAN acceleration desired? */ + if (!pdata->vxlan_features) + return; + + /* VXLAN acceleration possible? */ + if (pdata->vxlan_force_disable) + return; + + xgbe_enable_vxlan_hw(pdata); + + xgbe_enable_vxlan_offloads(pdata); +} + +static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata) +{ + xgbe_disable_vxlan_hw(pdata); + + if (pdata->vxlan_features) + xgbe_enable_vxlan_offloads(pdata); + + pdata->vxlan_force_disable = 0; +} + static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add) { struct xgbe_channel *channel; @@ -1226,6 +1340,8 @@ static int xgbe_start(struct xgbe_prv_data *pdata) hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); + udp_tunnel_get_rx_info(netdev); + netif_tx_start_all_queues(netdev); xgbe_start_timers(pdata); @@ -1267,6 +1383,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) xgbe_stop_timers(pdata); flush_workqueue(pdata->dev_workqueue); + xgbe_reset_vxlan_accel(pdata); + hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); @@ -1555,10 +1673,18 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) if (ret) return ret; - packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb); - packet->tcp_header_len = tcp_hdrlen(skb); + if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) { + packet->header_len = skb_inner_transport_offset(skb) + + inner_tcp_hdrlen(skb); + packet->tcp_header_len = inner_tcp_hdrlen(skb); + } else { + packet->header_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + packet->tcp_header_len = tcp_hdrlen(skb); + } packet->tcp_payload_len = skb->len - packet->header_len; packet->mss = skb_shinfo(skb)->gso_size; + DBGPR(" packet->header_len=%u\n", packet->header_len); DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n", packet->tcp_header_len, packet->tcp_payload_len); @@ -1573,6 +1699,49 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet) return 0; } +static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb) +{ + struct xgbe_vxlan_data *vdata; + + if (pdata->vxlan_force_disable) + return false; + + if (!skb->encapsulation) + return false; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->protocol) { + case htons(ETH_P_IP): + if (ip_hdr(skb)->protocol != IPPROTO_UDP) + return false; + break; + + case htons(ETH_P_IPV6): + if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP) + return false; + break; + + default: + return false; + } + + /* See if we have the UDP port in our list */ + list_for_each_entry(vdata, &pdata->vxlan_ports, list) { + if ((skb->protocol == htons(ETH_P_IP)) && + (vdata->sa_family == AF_INET) && + (vdata->port == udp_hdr(skb)->dest)) + return true; + else if ((skb->protocol == htons(ETH_P_IPV6)) && + (vdata->sa_family == AF_INET6) && + (vdata->port == udp_hdr(skb)->dest)) + return true; + } + + return false; +} + static int xgbe_is_tso(struct sk_buff *skb) { if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1621,6 +1790,10 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, CSUM_ENABLE, 1); + if (xgbe_is_vxlan(pdata, skb)) + XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, + VXLAN, 1); + if (skb_vlan_tag_present(skb)) { /* VLAN requires an extra descriptor if tag is different */ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag) @@ -2050,18 +2223,83 @@ static int xgbe_setup_tc(struct net_device *netdev, enum tc_setup_type type, return 0; } +static netdev_features_t xgbe_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + netdev_features_t vxlan_base, vxlan_mask; + + vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT; + vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM; + + pdata->vxlan_features = features & vxlan_mask; + + /* Only fix VXLAN-related features */ + if (!pdata->vxlan_features) + return features; + + /* If VXLAN isn't supported then clear any features: + * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets + * automatically set if ndo_udp_tunnel_add is set. + */ + if (!pdata->hw_feat.vxn) + return features & ~vxlan_mask; + + /* VXLAN CSUM requires VXLAN base */ + if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) && + !(features & NETIF_F_GSO_UDP_TUNNEL)) { + netdev_notice(netdev, + "forcing tx udp tunnel support\n"); + features |= NETIF_F_GSO_UDP_TUNNEL; + } + + /* Can't do one without doing the other */ + if ((features & vxlan_base) != vxlan_base) { + netdev_notice(netdev, + "forcing both tx and rx udp tunnel support\n"); + features |= vxlan_base; + } + + if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { + if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) { + netdev_notice(netdev, + "forcing tx udp tunnel checksumming on\n"); + features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + } else { + if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) { + netdev_notice(netdev, + "forcing tx udp tunnel checksumming off\n"); + features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM; + } + } + + pdata->vxlan_features = features & vxlan_mask; + + /* Adjust UDP Tunnel based on current state */ + if (pdata->vxlan_force_disable) { + netdev_notice(netdev, + "VXLAN acceleration disabled, turning off udp tunnel features\n"); + features &= ~vxlan_mask; + } + + return features; +} + static int xgbe_set_features(struct net_device *netdev, netdev_features_t features) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter; + netdev_features_t udp_tunnel; int ret = 0; rxhash = pdata->netdev_features & NETIF_F_RXHASH; rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER; + udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL; if ((features & NETIF_F_RXHASH) && !rxhash) ret = hw_if->enable_rss(pdata); @@ -2085,6 +2323,11 @@ static int xgbe_set_features(struct net_device *netdev, else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter) hw_if->disable_rx_vlan_filtering(pdata); + if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel) + xgbe_enable_vxlan_accel(pdata); + else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel) + xgbe_disable_vxlan_accel(pdata); + pdata->netdev_features = features; DBGPR("<--xgbe_set_features\n"); @@ -2092,6 +2335,111 @@ static int xgbe_set_features(struct net_device *netdev, return 0; } +static void xgbe_udp_tunnel_add(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_vxlan_data *vdata; + + if (!pdata->hw_feat.vxn) + return; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + pdata->vxlan_port_count++; + + netif_dbg(pdata, drv, netdev, + "adding VXLAN tunnel, family=%hx/port=%hx\n", + ti->sa_family, be16_to_cpu(ti->port)); + + if (pdata->vxlan_force_disable) + return; + + vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC); + if (!vdata) { + /* Can no longer properly track VXLAN ports */ + pdata->vxlan_force_disable = 1; + netif_dbg(pdata, drv, netdev, + "internal error, disabling VXLAN accelerations\n"); + + xgbe_disable_vxlan_accel(pdata); + + return; + } + vdata->sa_family = ti->sa_family; + vdata->port = ti->port; + + list_add_tail(&vdata->list, &pdata->vxlan_ports); + + /* First port added? */ + if (pdata->vxlan_port_count == 1) { + xgbe_enable_vxlan_accel(pdata); + + return; + } +} + +static void xgbe_udp_tunnel_del(struct net_device *netdev, + struct udp_tunnel_info *ti) +{ + struct xgbe_prv_data *pdata = netdev_priv(netdev); + struct xgbe_vxlan_data *vdata; + + if (!pdata->hw_feat.vxn) + return; + + if (ti->type != UDP_TUNNEL_TYPE_VXLAN) + return; + + netif_dbg(pdata, drv, netdev, + "deleting VXLAN tunnel, family=%hx/port=%hx\n", + ti->sa_family, be16_to_cpu(ti->port)); + + /* Don't need safe version since loop terminates with deletion */ + list_for_each_entry(vdata, &pdata->vxlan_ports, list) { + if (vdata->sa_family != ti->sa_family) + continue; + + if (vdata->port != ti->port) + continue; + + list_del(&vdata->list); + kfree(vdata); + + break; + } + + pdata->vxlan_port_count--; + if (!pdata->vxlan_port_count) { + xgbe_reset_vxlan_accel(pdata); + + return; + } + + if (pdata->vxlan_force_disable) + return; + + /* See if VXLAN tunnel id needs to be changed */ + vdata = list_first_entry(&pdata->vxlan_ports, + struct xgbe_vxlan_data, list); + if (pdata->vxlan_port == be16_to_cpu(vdata->port)) + return; + + pdata->vxlan_port = be16_to_cpu(vdata->port); + pdata->hw_if.set_vxlan_id(pdata); +} + +static netdev_features_t xgbe_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + return features; +} + static const struct net_device_ops xgbe_netdev_ops = { .ndo_open = xgbe_open, .ndo_stop = xgbe_close, @@ -2109,7 +2457,11 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_poll_controller = xgbe_poll_controller, #endif .ndo_setup_tc = xgbe_setup_tc, + .ndo_fix_features = xgbe_fix_features, .ndo_set_features = xgbe_set_features, + .ndo_udp_tunnel_add = xgbe_udp_tunnel_add, + .ndo_udp_tunnel_del = xgbe_udp_tunnel_del, + .ndo_features_check = xgbe_features_check, }; const struct net_device_ops *xgbe_get_netdev_ops(void) @@ -2421,6 +2773,15 @@ skip_data: RX_PACKET_ATTRIBUTES, CSUM_DONE)) skb->ip_summed = CHECKSUM_UNNECESSARY; + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNP)) { + skb->encapsulation = 1; + + if (XGMAC_GET_BITS(packet->attributes, + RX_PACKET_ATTRIBUTES, TNPCSUM_DONE)) + skb->csum_level = 1; + } + if (XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, VLAN_CTAG)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index c5ff385d51ae..d91fa595be98 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -193,6 +193,7 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev) mutex_init(&pdata->i2c_mutex); init_completion(&pdata->i2c_complete); init_completion(&pdata->mdio_complete); + INIT_LIST_HEAD(&pdata->vxlan_ports); pdata->msg_enable = netif_msg_init(debug, default_msg_level); @@ -374,6 +375,28 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) if (pdata->hw_feat.rss) netdev->hw_features |= NETIF_F_RXHASH; + if (pdata->hw_feat.vxn) { + netdev->hw_enc_features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_RXCSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_GRO | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_RX_UDP_TUNNEL_PORT; + + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_RX_UDP_TUNNEL_PORT; + + pdata->vxlan_offloads_set = 1; + pdata->vxlan_features = NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | + NETIF_F_RX_UDP_TUNNEL_PORT; + } + netdev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 48a46a70bf5e..db155fe7f74d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -132,6 +132,7 @@ #include #include #include +#include #define XGBE_DRV_NAME "amd-xgbe" #define XGBE_DRV_VERSION "1.0.3" @@ -817,6 +818,11 @@ struct xgbe_hw_if { /* For ECC */ void (*disable_ecc_ded)(struct xgbe_prv_data *); void (*disable_ecc_sec)(struct xgbe_prv_data *, enum xgbe_ecc_sec); + + /* For VXLAN */ + void (*enable_vxlan)(struct xgbe_prv_data *); + void (*disable_vxlan)(struct xgbe_prv_data *); + void (*set_vxlan_id)(struct xgbe_prv_data *); }; /* This structure represents implementation specific routines for an @@ -941,6 +947,7 @@ struct xgbe_hw_features { unsigned int addn_mac; /* Additional MAC Addresses */ unsigned int ts_src; /* Timestamp Source */ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + unsigned int vxn; /* VXLAN/NVGRE */ /* HW Feature Register1 */ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ @@ -979,6 +986,12 @@ struct xgbe_version_data { unsigned int rx_desc_prefetch; }; +struct xgbe_vxlan_data { + struct list_head list; + sa_family_t sa_family; + __be16 port; +}; + struct xgbe_prv_data { struct net_device *netdev; struct pci_dev *pcidev; @@ -1120,6 +1133,15 @@ struct xgbe_prv_data { u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE]; u32 rss_options; + /* VXLAN settings */ + unsigned int vxlan_port_set; + unsigned int vxlan_offloads_set; + unsigned int vxlan_force_disable; + unsigned int vxlan_port_count; + struct list_head vxlan_ports; + u16 vxlan_port; + netdev_features_t vxlan_features; + /* Netdev related settings */ unsigned char mac_addr[ETH_ALEN]; netdev_features_t netdev_features; -- cgit v1.2.3-55-g7522 From 3010608d033c7cae73d1662493c85b4c3b27015a Mon Sep 17 00:00:00 2001 From: Lendacky, Thomas Date: Fri, 18 Aug 2017 09:04:14 -0500 Subject: amd-xgbe: Add additional ethtool statistics Add some additional statistics for tracking VXLAN packets and checksum errors. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 8 +++++++- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 4 ++++ drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +++++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 1bf671e02c0a..671203dbea3d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1805,10 +1805,13 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel) packet->length); } - if (vxlan) + if (vxlan) { XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP, TX_NORMAL_DESC3_VXLAN_PACKET); + pdata->ext_stats.tx_vxlan_packets += packet->tx_packets; + } + for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) { cur_index++; rdata = XGBE_GET_DESC_DATA(ring, cur_index); @@ -1981,6 +1984,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel) if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNP, 1); + pdata->ext_stats.rx_vxlan_packets++; l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T); switch (l34t) { @@ -2018,11 +2022,13 @@ static int xgbe_dev_read(struct xgbe_channel *channel) CSUM_DONE, 0); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNPCSUM_DONE, 0); + pdata->ext_stats.rx_csum_errors++; } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) { XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CSUM_DONE, 0); XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, TNPCSUM_DONE, 0); + pdata->ext_stats.rx_vxlan_csum_errors++; } else { XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS, FRAME, 1); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index cea25acd5de2..ff397bb25042 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -146,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb), XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb), XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g), + XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets), XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets), XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb), XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb), @@ -162,6 +163,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g), XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g), XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb), + XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets), XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb), XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb), XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb), @@ -177,6 +179,8 @@ static const struct xgbe_stats xgbe_gstring_stats[] = { XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype), XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow), XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror), + XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors), + XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors), XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes), XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets), XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable), diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index db155fe7f74d..ad102c8bac7b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -715,6 +715,11 @@ struct xgbe_ext_stats { u64 txq_bytes[XGBE_MAX_DMA_CHANNELS]; u64 rxq_packets[XGBE_MAX_DMA_CHANNELS]; u64 rxq_bytes[XGBE_MAX_DMA_CHANNELS]; + + u64 tx_vxlan_packets; + u64 rx_vxlan_packets; + u64 rx_csum_errors; + u64 rx_vxlan_csum_errors; }; struct xgbe_hw_if { -- cgit v1.2.3-55-g7522 From d0d6683716791b2a2761a1bb025c613eb73da6c3 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:19 -0700 Subject: net: don't decrement kobj reference count on init failure If kobject_init_and_add failed, then the failure path would decrement the reference count of the queue kobject whose reference count was already zero. Fixes: 114cf5802165 ("bql: Byte queue limits") Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b4f9922b6f23..46ff41bf0210 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -917,20 +917,19 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, "rx-%u", index); if (error) - goto exit; + return error; if (dev->sysfs_rx_queue_group) { error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); - if (error) - goto exit; + if (error) { + kobject_put(kobj); + return error; + } } kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); - return error; -exit: - kobject_put(kobj); return error; } #endif /* CONFIG_SYSFS */ @@ -1339,21 +1338,20 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, "tx-%u", index); if (error) - goto exit; + return error; #ifdef CONFIG_BQL error = sysfs_create_group(kobj, &dql_group); - if (error) - goto exit; + if (error) { + kobject_put(kobj); + return error; + } #endif kobject_uevent(kobj, KOBJ_ADD); dev_hold(queue->dev); return 0; -exit: - kobject_put(kobj); - return error; } #endif /* CONFIG_SYSFS */ -- cgit v1.2.3-55-g7522 From b793dc5c6edfb106fd57d12ad6aca64bf160b403 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:20 -0700 Subject: net: constify netdev_class_file These functions are wrapper arount class_create_file which can take a const attribute. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/netdevice.h | 8 ++++---- net/core/net-sysfs.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0f1c4cb2441e..eaa77bd9cb80 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4013,17 +4013,17 @@ static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_devi return rc; } -int netdev_class_create_file_ns(struct class_attribute *class_attr, +int netdev_class_create_file_ns(const struct class_attribute *class_attr, const void *ns); -void netdev_class_remove_file_ns(struct class_attribute *class_attr, +void netdev_class_remove_file_ns(const struct class_attribute *class_attr, const void *ns); -static inline int netdev_class_create_file(struct class_attribute *class_attr) +static inline int netdev_class_create_file(const struct class_attribute *class_attr) { return netdev_class_create_file_ns(class_attr, NULL); } -static inline void netdev_class_remove_file(struct class_attribute *class_attr) +static inline void netdev_class_remove_file(const struct class_attribute *class_attr) { netdev_class_remove_file_ns(class_attr, NULL); } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 46ff41bf0210..40937ee63f14 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1618,14 +1618,14 @@ int netdev_register_kobject(struct net_device *ndev) return error; } -int netdev_class_create_file_ns(struct class_attribute *class_attr, +int netdev_class_create_file_ns(const struct class_attribute *class_attr, const void *ns) { return class_create_file_ns(&net_class, class_attr, ns); } EXPORT_SYMBOL(netdev_class_create_file_ns); -void netdev_class_remove_file_ns(struct class_attribute *class_attr, +void netdev_class_remove_file_ns(const struct class_attribute *class_attr, const void *ns) { class_remove_file_ns(&net_class, class_attr, ns); -- cgit v1.2.3-55-g7522 From e6d473e63536ea1989dc63c066f0c0464a969324 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:21 -0700 Subject: net: make net_class ro_after_init The net_class in sysfs is only modified on init. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 40937ee63f14..99061b0a1ebd 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1511,7 +1511,7 @@ static const void *net_namespace(struct device *d) return dev_net(dev); } -static struct class net_class = { +static struct class net_class __ro_after_init = { .name = "net", .dev_release = netdev_release, .dev_groups = net_class_groups, -- cgit v1.2.3-55-g7522 From 737aec57c672c1308d602afecd841455c39561e5 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:22 -0700 Subject: net: constify net_ns_type_operations This can be const. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 +- net/core/net-sysfs.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eaa77bd9cb80..b0c928598dab 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4028,7 +4028,7 @@ static inline void netdev_class_remove_file(const struct class_attribute *class_ netdev_class_remove_file_ns(class_attr, NULL); } -extern struct kobj_ns_type_operations net_ns_type_operations; +extern const struct kobj_ns_type_operations net_ns_type_operations; const char *netdev_drivername(const struct net_device *dev); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 99061b0a1ebd..2de441692f28 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1461,7 +1461,7 @@ static const void *net_netlink_ns(struct sock *sk) return sock_net(sk); } -struct kobj_ns_type_operations net_ns_type_operations = { +const struct kobj_ns_type_operations net_ns_type_operations = { .type = KOBJ_NS_TYPE_NET, .current_may_mount = net_current_may_mount, .grab_current_ns = net_grab_current_ns, -- cgit v1.2.3-55-g7522 From ec6cc5993c0f0c1e5a7cbb630c7ecc165e6d9e84 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:23 -0700 Subject: net: make net sysfs attributes ro_after_init The attributes of net devices are immutable. Ideally, attribute groups would contain const attributes but there are too many places that do modifications of list during startup (in other code) to allow that. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 2de441692f28..76ec74d4a65b 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -508,7 +508,7 @@ static ssize_t phys_switch_id_show(struct device *dev, } static DEVICE_ATTR_RO(phys_switch_id); -static struct attribute *net_class_attrs[] = { +static struct attribute *net_class_attrs[] __ro_after_init = { &dev_attr_netdev_group.attr, &dev_attr_type.attr, &dev_attr_dev_id.attr, @@ -597,7 +597,7 @@ NETSTAT_ENTRY(rx_compressed); NETSTAT_ENTRY(tx_compressed); NETSTAT_ENTRY(rx_nohandler); -static struct attribute *netstat_attrs[] = { +static struct attribute *netstat_attrs[] __ro_after_init = { &dev_attr_rx_packets.attr, &dev_attr_tx_packets.attr, &dev_attr_rx_bytes.attr, -- cgit v1.2.3-55-g7522 From 718ad681eff47d3d04879ff5f5290bdab0b8bad6 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:24 -0700 Subject: net: drop unused attribute argument from sysfs queue funcs The show and store functions don't need/use the attribute. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 2 +- include/linux/netdevice.h | 5 ++--- net/core/net-sysfs.c | 37 +++++++++++-------------------------- 3 files changed, 14 insertions(+), 30 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4302f313d9a7..52ae78ca3d38 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2376,7 +2376,7 @@ err: #ifdef CONFIG_SYSFS static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attribute, char *buf) + char *buf) { struct virtnet_info *vi = netdev_priv(queue->dev); unsigned int queue_index = get_netdev_rx_queue_index(queue); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b0c928598dab..c5475b37a631 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -694,10 +694,9 @@ struct netdev_rx_queue { */ struct rx_queue_attribute { struct attribute attr; - ssize_t (*show)(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attr, char *buf); + ssize_t (*show)(struct netdev_rx_queue *queue, char *buf); ssize_t (*store)(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attr, const char *buf, size_t len); + const char *buf, size_t len); }; #ifdef CONFIG_XPS diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 76ec74d4a65b..48714c8024f3 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -661,7 +661,7 @@ static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, if (!attribute->show) return -EIO; - return attribute->show(queue, attribute, buf); + return attribute->show(queue, buf); } static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, @@ -673,7 +673,7 @@ static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, if (!attribute->store) return -EIO; - return attribute->store(queue, attribute, buf, count); + return attribute->store(queue, buf, count); } static const struct sysfs_ops rx_queue_sysfs_ops = { @@ -682,8 +682,7 @@ static const struct sysfs_ops rx_queue_sysfs_ops = { }; #ifdef CONFIG_RPS -static ssize_t show_rps_map(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attribute, char *buf) +static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) { struct rps_map *map; cpumask_var_t mask; @@ -706,8 +705,7 @@ static ssize_t show_rps_map(struct netdev_rx_queue *queue, } static ssize_t store_rps_map(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attribute, - const char *buf, size_t len) + const char *buf, size_t len) { struct rps_map *old_map, *map; cpumask_var_t mask; @@ -765,7 +763,6 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, } static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attr, char *buf) { struct rps_dev_flow_table *flow_table; @@ -788,8 +785,7 @@ static void rps_dev_flow_table_release(struct rcu_head *rcu) } static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, - struct rx_queue_attribute *attr, - const char *buf, size_t len) + const char *buf, size_t len) { unsigned long mask, count; struct rps_dev_flow_table *table, *old_table; @@ -975,10 +971,9 @@ net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num) */ struct netdev_queue_attribute { struct attribute attr; - ssize_t (*show)(struct netdev_queue *queue, - struct netdev_queue_attribute *attr, char *buf); + ssize_t (*show)(struct netdev_queue *queue, char *buf); ssize_t (*store)(struct netdev_queue *queue, - struct netdev_queue_attribute *attr, const char *buf, size_t len); + const char *buf, size_t len); }; #define to_netdev_queue_attr(_attr) container_of(_attr, \ struct netdev_queue_attribute, attr) @@ -994,7 +989,7 @@ static ssize_t netdev_queue_attr_show(struct kobject *kobj, if (!attribute->show) return -EIO; - return attribute->show(queue, attribute, buf); + return attribute->show(queue, buf); } static ssize_t netdev_queue_attr_store(struct kobject *kobj, @@ -1007,7 +1002,7 @@ static ssize_t netdev_queue_attr_store(struct kobject *kobj, if (!attribute->store) return -EIO; - return attribute->store(queue, attribute, buf, count); + return attribute->store(queue, buf, count); } static const struct sysfs_ops netdev_queue_sysfs_ops = { @@ -1016,7 +1011,6 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = { }; static ssize_t show_trans_timeout(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, char *buf) { unsigned long trans_timeout; @@ -1040,7 +1034,6 @@ static unsigned int get_netdev_queue_index(struct netdev_queue *queue) } static ssize_t show_traffic_class(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, char *buf) { struct net_device *dev = queue->dev; @@ -1055,14 +1048,12 @@ static ssize_t show_traffic_class(struct netdev_queue *queue, #ifdef CONFIG_XPS static ssize_t show_tx_maxrate(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, char *buf) { return sprintf(buf, "%lu\n", queue->tx_maxrate); } static ssize_t set_tx_maxrate(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, const char *buf, size_t len) { struct net_device *dev = queue->dev; @@ -1130,7 +1121,6 @@ static ssize_t bql_set(const char *buf, const size_t count, } static ssize_t bql_show_hold_time(struct netdev_queue *queue, - struct netdev_queue_attribute *attr, char *buf) { struct dql *dql = &queue->dql; @@ -1139,7 +1129,6 @@ static ssize_t bql_show_hold_time(struct netdev_queue *queue, } static ssize_t bql_set_hold_time(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, const char *buf, size_t len) { struct dql *dql = &queue->dql; @@ -1160,7 +1149,6 @@ static struct netdev_queue_attribute bql_hold_time_attribute = bql_set_hold_time); static ssize_t bql_show_inflight(struct netdev_queue *queue, - struct netdev_queue_attribute *attr, char *buf) { struct dql *dql = &queue->dql; @@ -1173,14 +1161,12 @@ static struct netdev_queue_attribute bql_inflight_attribute = #define BQL_ATTR(NAME, FIELD) \ static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \ - struct netdev_queue_attribute *attr, \ char *buf) \ { \ return bql_show(buf, queue->dql.FIELD); \ } \ \ static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ - struct netdev_queue_attribute *attr, \ const char *buf, size_t len) \ { \ return bql_set(buf, len, &queue->dql.FIELD); \ @@ -1211,7 +1197,7 @@ static const struct attribute_group dql_group = { #ifdef CONFIG_XPS static ssize_t show_xps_map(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, char *buf) + char *buf) { struct net_device *dev = queue->dev; int cpu, len, num_tc = 1, tc = 0; @@ -1258,8 +1244,7 @@ static ssize_t show_xps_map(struct netdev_queue *queue, } static ssize_t store_xps_map(struct netdev_queue *queue, - struct netdev_queue_attribute *attribute, - const char *buf, size_t len) + const char *buf, size_t len) { struct net_device *dev = queue->dev; unsigned long index; -- cgit v1.2.3-55-g7522 From 170c658afc43c2d18a167783824f4e122a07abd2 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:25 -0700 Subject: net: make BQL sysfs attributes ro_after_init Also fix macro to not have ; at end. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 48714c8024f3..a4af5e2ff398 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1144,9 +1144,9 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue, return len; } -static struct netdev_queue_attribute bql_hold_time_attribute = - __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time, - bql_set_hold_time); +static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init + = __ATTR(hold_time, S_IRUGO | S_IWUSR, + bql_show_hold_time, bql_set_hold_time); static ssize_t bql_show_inflight(struct netdev_queue *queue, char *buf) @@ -1156,7 +1156,7 @@ static ssize_t bql_show_inflight(struct netdev_queue *queue, return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed); } -static struct netdev_queue_attribute bql_inflight_attribute = +static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init = __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL); #define BQL_ATTR(NAME, FIELD) \ @@ -1172,15 +1172,15 @@ static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \ return bql_set(buf, len, &queue->dql.FIELD); \ } \ \ -static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \ - __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \ - bql_set_ ## NAME); +static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \ + = __ATTR(NAME, S_IRUGO | S_IWUSR, \ + bql_show_ ## NAME, bql_set_ ## NAME) -BQL_ATTR(limit, limit) -BQL_ATTR(limit_max, max_limit) -BQL_ATTR(limit_min, min_limit) +BQL_ATTR(limit, limit); +BQL_ATTR(limit_max, max_limit); +BQL_ATTR(limit_min, min_limit); -static struct attribute *dql_attrs[] = { +static struct attribute *dql_attrs[] __ro_after_init = { &bql_limit_attribute.attr, &bql_limit_max_attribute.attr, &bql_limit_min_attribute.attr, -- cgit v1.2.3-55-g7522 From 2b9c758129d7eb2d709291426004e5a8efcba19f Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:26 -0700 Subject: net: make queue attributes ro_after_init The XPS queue attributes can be ro_after_init. Also use __ATTR_RX macros to simplify initialization. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 40 +++++++++++++++++++--------------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index a4af5e2ff398..808fbb837f25 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -1010,8 +1010,7 @@ static const struct sysfs_ops netdev_queue_sysfs_ops = { .store = netdev_queue_attr_store, }; -static ssize_t show_trans_timeout(struct netdev_queue *queue, - char *buf) +static ssize_t tx_timeout_show(struct netdev_queue *queue, char *buf) { unsigned long trans_timeout; @@ -1033,7 +1032,7 @@ static unsigned int get_netdev_queue_index(struct netdev_queue *queue) return i; } -static ssize_t show_traffic_class(struct netdev_queue *queue, +static ssize_t traffic_class_show(struct netdev_queue *queue, char *buf) { struct net_device *dev = queue->dev; @@ -1047,14 +1046,14 @@ static ssize_t show_traffic_class(struct netdev_queue *queue, } #ifdef CONFIG_XPS -static ssize_t show_tx_maxrate(struct netdev_queue *queue, +static ssize_t tx_maxrate_show(struct netdev_queue *queue, char *buf) { return sprintf(buf, "%lu\n", queue->tx_maxrate); } -static ssize_t set_tx_maxrate(struct netdev_queue *queue, - const char *buf, size_t len) +static ssize_t tx_maxrate_store(struct netdev_queue *queue, + const char *buf, size_t len) { struct net_device *dev = queue->dev; int err, index = get_netdev_queue_index(queue); @@ -1079,16 +1078,15 @@ static ssize_t set_tx_maxrate(struct netdev_queue *queue, return err; } -static struct netdev_queue_attribute queue_tx_maxrate = - __ATTR(tx_maxrate, S_IRUGO | S_IWUSR, - show_tx_maxrate, set_tx_maxrate); +static struct netdev_queue_attribute queue_tx_maxrate __ro_after_init + = __ATTR_RW(tx_maxrate); #endif -static struct netdev_queue_attribute queue_trans_timeout = - __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL); +static struct netdev_queue_attribute queue_trans_timeout __ro_after_init + = __ATTR_RO(tx_timeout); -static struct netdev_queue_attribute queue_traffic_class = - __ATTR(traffic_class, S_IRUGO, show_traffic_class, NULL); +static struct netdev_queue_attribute queue_traffic_class __ro_after_init + = __ATTR_RO(traffic_class); #ifdef CONFIG_BQL /* @@ -1196,8 +1194,8 @@ static const struct attribute_group dql_group = { #endif /* CONFIG_BQL */ #ifdef CONFIG_XPS -static ssize_t show_xps_map(struct netdev_queue *queue, - char *buf) +static ssize_t xps_cpus_show(struct netdev_queue *queue, + char *buf) { struct net_device *dev = queue->dev; int cpu, len, num_tc = 1, tc = 0; @@ -1243,8 +1241,8 @@ static ssize_t show_xps_map(struct netdev_queue *queue, return len < PAGE_SIZE ? len : -EINVAL; } -static ssize_t store_xps_map(struct netdev_queue *queue, - const char *buf, size_t len) +static ssize_t xps_cpus_store(struct netdev_queue *queue, + const char *buf, size_t len) { struct net_device *dev = queue->dev; unsigned long index; @@ -1272,11 +1270,11 @@ static ssize_t store_xps_map(struct netdev_queue *queue, return err ? : len; } -static struct netdev_queue_attribute xps_cpus_attribute = - __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map); +static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init + = __ATTR_RW(xps_cpus); #endif /* CONFIG_XPS */ -static struct attribute *netdev_queue_default_attrs[] = { +static struct attribute *netdev_queue_default_attrs[] __ro_after_init = { &queue_trans_timeout.attr, &queue_traffic_class.attr, #ifdef CONFIG_XPS @@ -1306,7 +1304,7 @@ static const void *netdev_queue_namespace(struct kobject *kobj) return ns; } -static struct kobj_type netdev_queue_ktype = { +static struct kobj_type netdev_queue_ktype __ro_after_init = { .sysfs_ops = &netdev_queue_sysfs_ops, .release = netdev_queue_release, .default_attrs = netdev_queue_default_attrs, -- cgit v1.2.3-55-g7522 From 667e427bc356a43e130cfc03ea4273603487cc69 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:27 -0700 Subject: net: mark receive queue attributes ro_after_init Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 808fbb837f25..ca82c4a72350 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -655,7 +655,7 @@ static const struct attribute_group wireless_group = { static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { - struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); + const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->show) @@ -667,7 +667,7 @@ static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr, static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { - struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); + const struct rx_queue_attribute *attribute = to_rx_queue_attr(attr); struct netdev_rx_queue *queue = to_rx_queue(kobj); if (!attribute->store) @@ -842,16 +842,15 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, return len; } -static struct rx_queue_attribute rps_cpus_attribute = - __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); +static struct rx_queue_attribute rps_cpus_attribute __ro_after_init + = __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map); - -static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute = - __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR, - show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); +static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute __ro_after_init + = __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR, + show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt); #endif /* CONFIG_RPS */ -static struct attribute *rx_queue_default_attrs[] = { +static struct attribute *rx_queue_default_attrs[] __ro_after_init = { #ifdef CONFIG_RPS &rps_cpus_attribute.attr, &rps_dev_flow_table_cnt_attribute.attr, @@ -896,7 +895,7 @@ static const void *rx_queue_namespace(struct kobject *kobj) return ns; } -static struct kobj_type rx_queue_ktype = { +static struct kobj_type rx_queue_ktype __ro_after_init = { .sysfs_ops = &rx_queue_sysfs_ops, .release = rx_queue_release, .default_attrs = rx_queue_default_attrs, @@ -983,7 +982,8 @@ struct netdev_queue_attribute { static ssize_t netdev_queue_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { - struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); + const struct netdev_queue_attribute *attribute + = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->show) @@ -996,7 +996,8 @@ static ssize_t netdev_queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { - struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr); + const struct netdev_queue_attribute *attribute + = to_netdev_queue_attr(attr); struct netdev_queue *queue = to_netdev_queue(kobj); if (!attribute->store) -- cgit v1.2.3-55-g7522 From 6648c65e7ea72c3b19ea908d046e4a47e90fd907 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Fri, 18 Aug 2017 13:46:28 -0700 Subject: net: style cleanups Make code closer to current style. Mostly whitespace changes. Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- net/core/net-sysfs.c | 68 +++++++++++++++++++++++++++------------------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index ca82c4a72350..927a6dcbad96 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -97,7 +97,8 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr, return restart_syscall(); if (dev_isalive(netdev)) { - if ((ret = (*set)(netdev, new)) == 0) + ret = (*set)(netdev, new); + if (ret == 0) ret = len; } rtnl_unlock(); @@ -160,6 +161,7 @@ static ssize_t broadcast_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); + if (dev_isalive(ndev)) return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len); return -EINVAL; @@ -170,7 +172,7 @@ static int change_carrier(struct net_device *dev, unsigned long new_carrier) { if (!netif_running(dev)) return -EINVAL; - return dev_change_carrier(dev, (bool) new_carrier); + return dev_change_carrier(dev, (bool)new_carrier); } static ssize_t carrier_store(struct device *dev, struct device_attribute *attr, @@ -183,9 +185,10 @@ static ssize_t carrier_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); - if (netif_running(netdev)) { + + if (netif_running(netdev)) return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev)); - } + return -EINVAL; } static DEVICE_ATTR_RW(carrier); @@ -290,6 +293,7 @@ static ssize_t carrier_changes_show(struct device *dev, char *buf) { struct net_device *netdev = to_net_dev(dev); + return sprintf(buf, fmt_dec, atomic_read(&netdev->carrier_changes)); } @@ -299,7 +303,7 @@ static DEVICE_ATTR_RO(carrier_changes); static int change_mtu(struct net_device *dev, unsigned long new_mtu) { - return dev_set_mtu(dev, (int) new_mtu); + return dev_set_mtu(dev, (int)new_mtu); } static ssize_t mtu_store(struct device *dev, struct device_attribute *attr, @@ -311,7 +315,7 @@ NETDEVICE_SHOW_RW(mtu, fmt_dec); static int change_flags(struct net_device *dev, unsigned long new_flags) { - return dev_change_flags(dev, (unsigned int) new_flags); + return dev_change_flags(dev, (unsigned int)new_flags); } static ssize_t flags_store(struct device *dev, struct device_attribute *attr, @@ -362,8 +366,8 @@ static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) } static ssize_t gro_flush_timeout_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t len) + struct device_attribute *attr, + const char *buf, size_t len) { if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -412,7 +416,7 @@ static DEVICE_ATTR_RW(ifalias); static int change_group(struct net_device *dev, unsigned long new_group) { - dev_set_group(dev, (int) new_group); + dev_set_group(dev, (int)new_group); return 0; } @@ -426,7 +430,7 @@ static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store); static int change_proto_down(struct net_device *dev, unsigned long proto_down) { - return dev_change_proto_down(dev, (bool) proto_down); + return dev_change_proto_down(dev, (bool)proto_down); } static ssize_t proto_down_store(struct device *dev, @@ -549,14 +553,14 @@ static ssize_t netstat_show(const struct device *d, ssize_t ret = -EINVAL; WARN_ON(offset > sizeof(struct rtnl_link_stats64) || - offset % sizeof(u64) != 0); + offset % sizeof(u64) != 0); read_lock(&dev_base_lock); if (dev_isalive(dev)) { struct rtnl_link_stats64 temp; const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp); - ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset)); + ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *)stats) + offset)); } read_unlock(&dev_base_lock); return ret; @@ -565,7 +569,7 @@ static ssize_t netstat_show(const struct device *d, /* generate a read-only statistics attribute */ #define NETSTAT_ENTRY(name) \ static ssize_t name##_show(struct device *d, \ - struct device_attribute *attr, char *buf) \ + struct device_attribute *attr, char *buf) \ { \ return netstat_show(d, attr, buf, \ offsetof(struct rtnl_link_stats64, name)); \ @@ -625,7 +629,6 @@ static struct attribute *netstat_attrs[] __ro_after_init = { NULL }; - static const struct attribute_group netstat_group = { .name = "statistics", .attrs = netstat_attrs, @@ -647,8 +650,8 @@ static const struct attribute_group wireless_group = { #endif /* CONFIG_SYSFS */ #ifdef CONFIG_SYSFS -#define to_rx_queue_attr(_attr) container_of(_attr, \ - struct rx_queue_attribute, attr) +#define to_rx_queue_attr(_attr) \ + container_of(_attr, struct rx_queue_attribute, attr) #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj) @@ -725,8 +728,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, } map = kzalloc(max_t(unsigned int, - RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), - GFP_KERNEL); + RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), + GFP_KERNEL); if (!map) { free_cpumask_var(mask); return -ENOMEM; @@ -736,9 +739,9 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, for_each_cpu_and(cpu, mask, cpu_online_mask) map->cpus[i++] = cpu; - if (i) + if (i) { map->len = i; - else { + } else { kfree(map); map = NULL; } @@ -827,8 +830,9 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, table->mask = mask; for (count = 0; count <= mask; count++) table->flows[count].cpu = RPS_NO_CPU; - } else + } else { table = NULL; + } spin_lock(&rps_dev_flow_lock); old_table = rcu_dereference_protected(queue->rps_flow_table, @@ -865,7 +869,6 @@ static void rx_queue_release(struct kobject *kobj) struct rps_map *map; struct rps_dev_flow_table *flow_table; - map = rcu_dereference_protected(queue->rps_map, 1); if (map) { RCU_INIT_POINTER(queue->rps_map, NULL); @@ -910,7 +913,7 @@ static int rx_queue_add_kobject(struct net_device *dev, int index) kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, - "rx-%u", index); + "rx-%u", index); if (error) return error; @@ -974,8 +977,8 @@ struct netdev_queue_attribute { ssize_t (*store)(struct netdev_queue *queue, const char *buf, size_t len); }; -#define to_netdev_queue_attr(_attr) container_of(_attr, \ - struct netdev_queue_attribute, attr) +#define to_netdev_queue_attr(_attr) \ + container_of(_attr, struct netdev_queue_attribute, attr) #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj) @@ -1104,9 +1107,9 @@ static ssize_t bql_set(const char *buf, const size_t count, unsigned int value; int err; - if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) + if (!strcmp(buf, "max") || !strcmp(buf, "max\n")) { value = DQL_MAX_LIMIT; - else { + } else { err = kstrtouint(buf, 10, &value); if (err < 0) return err; @@ -1320,7 +1323,7 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index) kobj->kset = dev->queues_kset; error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, - "tx-%u", index); + "tx-%u", index); if (error) return error; @@ -1377,7 +1380,7 @@ static int register_queue_kobjects(struct net_device *dev) #ifdef CONFIG_SYSFS dev->queues_kset = kset_create_and_add("queues", - NULL, &dev->dev.kobj); + NULL, &dev->dev.kobj); if (!dev->queues_kset) return -ENOMEM; real_rx = dev->real_num_rx_queues; @@ -1467,7 +1470,8 @@ static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) /* pass ifindex to uevent. * ifindex is useful as it won't change (interface name may change) - * and is what RtNetlink uses natively. */ + * and is what RtNetlink uses natively. + */ retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex); exit: @@ -1542,7 +1546,7 @@ EXPORT_SYMBOL(of_find_net_device_by_node); */ void netdev_unregister_kobject(struct net_device *ndev) { - struct device *dev = &(ndev->dev); + struct device *dev = &ndev->dev; if (!atomic_read(&dev_net(ndev)->count)) dev_set_uevent_suppress(dev, 1); @@ -1559,7 +1563,7 @@ void netdev_unregister_kobject(struct net_device *ndev) /* Create sysfs entries for network device. */ int netdev_register_kobject(struct net_device *ndev) { - struct device *dev = &(ndev->dev); + struct device *dev = &ndev->dev; const struct attribute_group **groups = ndev->sysfs_groups; int error = 0; -- cgit v1.2.3-55-g7522 From 06726f30364c38f12519e2c6c046f19b2aa509bc Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:11 -0700 Subject: nfp: link basic ethtool ops to representors Start linking ethtool ops to representors. Begin by adding a separate ops structure and providing link state. Next patches will convert appropriate functions to only use nfp_port, which will make them reusable on representors. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | 4 ++++ drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 2 ++ drivers/net/ethernet/netronome/nfp/nfp_port.h | 1 + 3 files changed, 7 insertions(+) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6e31355c3567..3edc5d62ad5b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -939,6 +939,10 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_link_ksettings = nfp_net_set_link_ksettings, }; +const struct ethtool_ops nfp_port_ethtool_ops = { + .get_link = ethtool_op_get_link, +}; + void nfp_net_set_ethtool_ops(struct net_device *netdev) { netdev->ethtool_ops = &nfp_net_ethtool_ops; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 47daad30756c..50f7cc057cc9 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -320,6 +320,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, repr->dst->u.port_info.lower_dev = pf_netdev; netdev->netdev_ops = &nfp_repr_netdev_ops; + netdev->ethtool_ops = &nfp_port_ethtool_ops; + SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); if (nfp_app_has_tc(app)) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index c88e376dcf0f..784d82c2f32c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -106,6 +106,7 @@ struct nfp_port { struct list_head port_list; }; +extern const struct ethtool_ops nfp_port_ethtool_ops; extern const struct switchdev_ops nfp_port_switchdev_ops; int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, -- cgit v1.2.3-55-g7522 From 9e4c2cfc67816276a80da5888a1d9430b60c2183 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:12 -0700 Subject: nfp: provide ethtool_drvinfo on representors Extend representors' ethtool ops to show basic info like firmware version, driver version, and driver name. While at it don't set drvinfo.n_stats and drvinfo.regdump_len, core will invoke appropriate handlers to get those. A helper is added to turn a netdev into nfp_app for convenience. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_app.c | 20 ++++++++++ drivers/net/ethernet/netronome/nfp/nfp_app.h | 2 + .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 44 ++++++++++++++++------ 3 files changed, 54 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index c704c022574f..505e63f47419 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -38,6 +38,7 @@ #include "nfpcore/nfp_nffw.h" #include "nfp_app.h" #include "nfp_main.h" +#include "nfp_net.h" #include "nfp_net_repr.h" static const struct nfp_app_type *apps[] = { @@ -48,6 +49,25 @@ static const struct nfp_app_type *apps[] = { #endif }; +struct nfp_app *nfp_app_from_netdev(struct net_device *netdev) +{ + if (nfp_netdev_is_nfp_net(netdev)) { + struct nfp_net *nn = netdev_priv(netdev); + + return nn->app; + } + + if (nfp_netdev_is_nfp_repr(netdev)) { + struct nfp_repr *repr = netdev_priv(netdev); + + return repr->app; + } + + WARN(1, "Unknown netdev type for nfp_app\n"); + + return NULL; +} + const char *nfp_app_mip_name(struct nfp_app *app) { if (!app || !app->pf->mip) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index f34e8778fae2..c13b9bbe7e62 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -293,6 +293,8 @@ static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id) return app->type->repr_get(app, id); } +struct nfp_app *nfp_app_from_netdev(struct net_device *netdev); + struct nfp_reprs * nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, struct nfp_reprs *reprs); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 3edc5d62ad5b..3c34c8b27dcf 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -147,34 +147,53 @@ static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) if (IS_ERR(nsp)) return; - snprintf(version, ETHTOOL_FWVERS_LEN, "sp:%hu.%hu", + snprintf(version, ETHTOOL_FWVERS_LEN, "%hu.%hu", nfp_nsp_get_abi_ver_major(nsp), nfp_nsp_get_abi_ver_minor(nsp)); nfp_nsp_close(nsp); } -static void nfp_net_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *drvinfo) +static void +nfp_get_drvinfo(struct nfp_app *app, struct pci_dev *pdev, + const char *vnic_version, struct ethtool_drvinfo *drvinfo) { char nsp_version[ETHTOOL_FWVERS_LEN] = {}; - struct nfp_net *nn = netdev_priv(netdev); - strlcpy(drvinfo->driver, nn->pdev->driver->name, - sizeof(drvinfo->driver)); + strlcpy(drvinfo->driver, pdev->driver->name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, nfp_driver_version, sizeof(drvinfo->version)); - nfp_net_get_nspinfo(nn->app, nsp_version); + nfp_net_get_nspinfo(app, nsp_version); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), - "%d.%d.%d.%d %s %s %s", + "%s %s %s %s", vnic_version, nsp_version, + nfp_app_mip_name(app), nfp_app_name(app)); +} + +static void +nfp_net_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + char vnic_version[ETHTOOL_FWVERS_LEN] = {}; + struct nfp_net *nn = netdev_priv(netdev); + + snprintf(vnic_version, sizeof(vnic_version), "%d.%d.%d.%d", nn->fw_ver.resv, nn->fw_ver.class, - nn->fw_ver.major, nn->fw_ver.minor, nsp_version, - nfp_app_mip_name(nn->app), nfp_app_name(nn->app)); + nn->fw_ver.major, nn->fw_ver.minor); strlcpy(drvinfo->bus_info, pci_name(nn->pdev), sizeof(drvinfo->bus_info)); - drvinfo->n_stats = NN_ET_STATS_LEN; - drvinfo->regdump_len = NFP_NET_CFG_BAR_SZ; + nfp_get_drvinfo(nn->app, nn->pdev, vnic_version, drvinfo); +} + +static void +nfp_app_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct nfp_app *app; + + app = nfp_app_from_netdev(netdev); + if (!app) + return; + + nfp_get_drvinfo(app, app->pdev, "*", drvinfo); } /** @@ -940,6 +959,7 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { }; const struct ethtool_ops nfp_port_ethtool_ops = { + .get_drvinfo = nfp_app_get_drvinfo, .get_link = ethtool_op_get_link, }; -- cgit v1.2.3-55-g7522 From a2f4c3d9bd4a8a85d7cdd8221f7995fdbc98aa4a Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:13 -0700 Subject: nfp: allow retreiving management FW logs on representors Users should be able to dump the management FW logs on any of the driver's netdevs. Make the code only depend on the nfp_app and share it between vNICs and representors. Storing the dump flag is simply dropped for now, since we only support the argument being set to 0. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net.h | 2 -- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 33 +++++++++++----------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index b1fa77bd708b..d51d8237b984 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -573,7 +573,6 @@ struct nfp_net_dp { * @tx_bar: Pointer to mapped TX queues * @rx_bar: Pointer to mapped FL/RX queues * @debugfs_dir: Device directory in debugfs - * @ethtool_dump_flag: Ethtool dump flag * @vnic_list: Entry on device vNIC list * @pdev: Backpointer to PCI device * @app: APP handle if available @@ -640,7 +639,6 @@ struct nfp_net { u8 __iomem *rx_bar; struct dentry *debugfs_dir; - u32 ethtool_dump_flag; struct list_head vnic_list; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 3c34c8b27dcf..289fa318a651 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -727,18 +727,18 @@ static int nfp_net_get_coalesce(struct net_device *netdev, /* Other debug dumps */ static int -nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) +nfp_dump_nsp_diag(struct nfp_app *app, struct ethtool_dump *dump, void *buffer) { struct nfp_resource *res; int ret; - if (!nn->app) + if (!app) return -EOPNOTSUPP; dump->version = 1; dump->flag = NFP_DUMP_NSP_DIAG; - res = nfp_resource_acquire(nn->app->cpp, NFP_RESOURCE_NSP_DIAG); + res = nfp_resource_acquire(app->cpp, NFP_RESOURCE_NSP_DIAG); if (IS_ERR(res)) return PTR_ERR(res); @@ -748,7 +748,7 @@ nfp_dump_nsp_diag(struct nfp_net *nn, struct ethtool_dump *dump, void *buffer) goto exit_release; } - ret = nfp_cpp_read(nn->app->cpp, nfp_resource_cpp_id(res), + ret = nfp_cpp_read(app->cpp, nfp_resource_cpp_id(res), nfp_resource_address(res), buffer, dump->len); if (ret != dump->len) @@ -765,32 +765,30 @@ exit_release: return ret; } -static int nfp_net_set_dump(struct net_device *netdev, struct ethtool_dump *val) +static int nfp_app_set_dump(struct net_device *netdev, struct ethtool_dump *val) { - struct nfp_net *nn = netdev_priv(netdev); + struct nfp_app *app = nfp_app_from_netdev(netdev); - if (!nn->app) + if (!app) return -EOPNOTSUPP; if (val->flag != NFP_DUMP_NSP_DIAG) return -EINVAL; - nn->ethtool_dump_flag = val->flag; - return 0; } static int -nfp_net_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) +nfp_app_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump) { - return nfp_dump_nsp_diag(netdev_priv(netdev), dump, NULL); + return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, NULL); } static int -nfp_net_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, +nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump, void *buffer) { - return nfp_dump_nsp_diag(netdev_priv(netdev), dump, buffer); + return nfp_dump_nsp_diag(nfp_app_from_netdev(netdev), dump, buffer); } static int nfp_net_set_coalesce(struct net_device *netdev, @@ -947,9 +945,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { .set_rxfh = nfp_net_set_rxfh, .get_regs_len = nfp_net_get_regs_len, .get_regs = nfp_net_get_regs, - .set_dump = nfp_net_set_dump, - .get_dump_flag = nfp_net_get_dump_flag, - .get_dump_data = nfp_net_get_dump_data, + .set_dump = nfp_app_set_dump, + .get_dump_flag = nfp_app_get_dump_flag, + .get_dump_data = nfp_app_get_dump_data, .get_coalesce = nfp_net_get_coalesce, .set_coalesce = nfp_net_set_coalesce, .get_channels = nfp_net_get_channels, @@ -961,6 +959,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { const struct ethtool_ops nfp_port_ethtool_ops = { .get_drvinfo = nfp_app_get_drvinfo, .get_link = ethtool_op_get_link, + .set_dump = nfp_app_set_dump, + .get_dump_flag = nfp_app_get_dump_flag, + .get_dump_data = nfp_app_get_dump_data, }; void nfp_net_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3-55-g7522 From 1cfcc97bb1575446016e0c061c0629b1e6572523 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:14 -0700 Subject: nfp: don't report standard netdev statistics in ethtool We have been recently called out as a bad example for reporting standard netdev statistics as part of ethtool. Fix that :) Removing standard statistics allows us to simplify the structure holding definitions since we no longer have to mux different types of statistics. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 109 ++++++--------------- 1 file changed, 32 insertions(+), 77 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 289fa318a651..1753bfbc8b47 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -59,74 +59,44 @@ enum nfp_dump_diag { NFP_DUMP_NSP_DIAG = 0, }; -/* Support for stats. Returns netdev, driver, and device stats */ -enum { NETDEV_ET_STATS, NFP_NET_DRV_ET_STATS, NFP_NET_DEV_ET_STATS }; -struct _nfp_net_et_stats { +struct nfp_et_stat { char name[ETH_GSTRING_LEN]; - int type; - int sz; int off; }; -#define NN_ET_NETDEV_STAT(m) NETDEV_ET_STATS, \ - FIELD_SIZEOF(struct net_device_stats, m), \ - offsetof(struct net_device_stats, m) -/* For stats in the control BAR (other than Q stats) */ -#define NN_ET_DEV_STAT(m) NFP_NET_DEV_ET_STATS, \ - sizeof(u64), \ - (m) -static const struct _nfp_net_et_stats nfp_net_et_stats[] = { - /* netdev stats */ - {"rx_packets", NN_ET_NETDEV_STAT(rx_packets)}, - {"tx_packets", NN_ET_NETDEV_STAT(tx_packets)}, - {"rx_bytes", NN_ET_NETDEV_STAT(rx_bytes)}, - {"tx_bytes", NN_ET_NETDEV_STAT(tx_bytes)}, - {"rx_errors", NN_ET_NETDEV_STAT(rx_errors)}, - {"tx_errors", NN_ET_NETDEV_STAT(tx_errors)}, - {"rx_dropped", NN_ET_NETDEV_STAT(rx_dropped)}, - {"tx_dropped", NN_ET_NETDEV_STAT(tx_dropped)}, - {"multicast", NN_ET_NETDEV_STAT(multicast)}, - {"collisions", NN_ET_NETDEV_STAT(collisions)}, - {"rx_over_errors", NN_ET_NETDEV_STAT(rx_over_errors)}, - {"rx_crc_errors", NN_ET_NETDEV_STAT(rx_crc_errors)}, - {"rx_frame_errors", NN_ET_NETDEV_STAT(rx_frame_errors)}, - {"rx_fifo_errors", NN_ET_NETDEV_STAT(rx_fifo_errors)}, - {"rx_missed_errors", NN_ET_NETDEV_STAT(rx_missed_errors)}, - {"tx_aborted_errors", NN_ET_NETDEV_STAT(tx_aborted_errors)}, - {"tx_carrier_errors", NN_ET_NETDEV_STAT(tx_carrier_errors)}, - {"tx_fifo_errors", NN_ET_NETDEV_STAT(tx_fifo_errors)}, +static const struct nfp_et_stat nfp_net_et_stats[] = { /* Stats from the device */ - {"dev_rx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_DISCARDS)}, - {"dev_rx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_ERRORS)}, - {"dev_rx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_OCTETS)}, - {"dev_rx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_UC_OCTETS)}, - {"dev_rx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_OCTETS)}, - {"dev_rx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_OCTETS)}, - {"dev_rx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_FRAMES)}, - {"dev_rx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_MC_FRAMES)}, - {"dev_rx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_RX_BC_FRAMES)}, - - {"dev_tx_discards", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_DISCARDS)}, - {"dev_tx_errors", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_ERRORS)}, - {"dev_tx_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_OCTETS)}, - {"dev_tx_uc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_UC_OCTETS)}, - {"dev_tx_mc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_OCTETS)}, - {"dev_tx_bc_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_OCTETS)}, - {"dev_tx_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_FRAMES)}, - {"dev_tx_mc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_MC_FRAMES)}, - {"dev_tx_bc_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_TX_BC_FRAMES)}, - - {"bpf_pass_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_FRAMES)}, - {"bpf_pass_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP0_BYTES)}, + { "dev_rx_discards", NFP_NET_CFG_STATS_RX_DISCARDS }, + { "dev_rx_errors", NFP_NET_CFG_STATS_RX_ERRORS }, + { "dev_rx_bytes", NFP_NET_CFG_STATS_RX_OCTETS }, + { "dev_rx_uc_bytes", NFP_NET_CFG_STATS_RX_UC_OCTETS }, + { "dev_rx_mc_bytes", NFP_NET_CFG_STATS_RX_MC_OCTETS }, + { "dev_rx_bc_bytes", NFP_NET_CFG_STATS_RX_BC_OCTETS }, + { "dev_rx_pkts", NFP_NET_CFG_STATS_RX_FRAMES }, + { "dev_rx_mc_pkts", NFP_NET_CFG_STATS_RX_MC_FRAMES }, + { "dev_rx_bc_pkts", NFP_NET_CFG_STATS_RX_BC_FRAMES }, + + { "dev_tx_discards", NFP_NET_CFG_STATS_TX_DISCARDS }, + { "dev_tx_errors", NFP_NET_CFG_STATS_TX_ERRORS }, + { "dev_tx_bytes", NFP_NET_CFG_STATS_TX_OCTETS }, + { "dev_tx_uc_bytes", NFP_NET_CFG_STATS_TX_UC_OCTETS }, + { "dev_tx_mc_bytes", NFP_NET_CFG_STATS_TX_MC_OCTETS }, + { "dev_tx_bc_bytes", NFP_NET_CFG_STATS_TX_BC_OCTETS }, + { "dev_tx_pkts", NFP_NET_CFG_STATS_TX_FRAMES }, + { "dev_tx_mc_pkts", NFP_NET_CFG_STATS_TX_MC_FRAMES }, + { "dev_tx_bc_pkts", NFP_NET_CFG_STATS_TX_BC_FRAMES }, + + { "bpf_pass_pkts", NFP_NET_CFG_STATS_APP0_FRAMES }, + { "bpf_pass_bytes", NFP_NET_CFG_STATS_APP0_BYTES }, /* see comments in outro functions in nfp_bpf_jit.c to find out * how different BPF modes use app-specific counters */ - {"bpf_app1_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_FRAMES)}, - {"bpf_app1_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP1_BYTES)}, - {"bpf_app2_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_FRAMES)}, - {"bpf_app2_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP2_BYTES)}, - {"bpf_app3_pkts", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_FRAMES)}, - {"bpf_app3_bytes", NN_ET_DEV_STAT(NFP_NET_CFG_STATS_APP3_BYTES)}, + { "bpf_app1_pkts", NFP_NET_CFG_STATS_APP1_FRAMES }, + { "bpf_app1_bytes", NFP_NET_CFG_STATS_APP1_BYTES }, + { "bpf_app2_pkts", NFP_NET_CFG_STATS_APP2_FRAMES }, + { "bpf_app2_bytes", NFP_NET_CFG_STATS_APP2_BYTES }, + { "bpf_app3_pkts", NFP_NET_CFG_STATS_APP3_FRAMES }, + { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES }, }; #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) @@ -421,28 +391,13 @@ static void nfp_net_get_stats(struct net_device *netdev, { u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; struct nfp_net *nn = netdev_priv(netdev); - struct rtnl_link_stats64 *netdev_stats; - struct rtnl_link_stats64 temp = {}; u64 tmp[NN_ET_RVEC_GATHER_STATS]; u8 __iomem *io_p; int i, j, k; - u8 *p; - - netdev_stats = dev_get_stats(netdev, &temp); for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { - switch (nfp_net_et_stats[i].type) { - case NETDEV_ET_STATS: - p = (char *)netdev_stats + nfp_net_et_stats[i].off; - data[i] = nfp_net_et_stats[i].sz == sizeof(u64) ? - *(u64 *)p : *(u32 *)p; - break; - - case NFP_NET_DEV_ET_STATS: - io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; - data[i] = readq(io_p); - break; - } + io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; + data[i] = readq(io_p); } for (j = 0; j < nn->dp.num_r_vecs; j++) { unsigned int start; -- cgit v1.2.3-55-g7522 From 634287ba7544f10e81d2ffcb58e934b2a3514ed2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:15 -0700 Subject: nfp: add helper for printing ethtool strings Add a helper for printing ethtool strings and advancing the pointer correctly. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 65 +++++++++++----------- 1 file changed, 32 insertions(+), 33 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 1753bfbc8b47..ba1c28b8791b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -335,53 +335,52 @@ static int nfp_net_set_ringparam(struct net_device *netdev, return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt); } +static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + vsnprintf(data, ETH_GSTRING_LEN, fmt, args); + va_end(args); + + return data + ETH_GSTRING_LEN; +} + static void nfp_net_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { struct nfp_net *nn = netdev_priv(netdev); - u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { - memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) + data = nfp_pr_et(data, nfp_net_et_stats[i].name); + for (i = 0; i < nn->dp.num_r_vecs; i++) { - sprintf(p, "rvec_%u_rx_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rvec_%u_tx_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rvec_%u_tx_busy", i); - p += ETH_GSTRING_LEN; + data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); + data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); + data = nfp_pr_et(data, "rvec_%u_tx_busy", i); } - strncpy(p, "hw_rx_csum_ok", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_rx_csum_inner_ok", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_rx_csum_err", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_tx_csum", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "hw_tx_inner_csum", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "tx_gather", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - strncpy(p, "tx_lso", ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; + + data = nfp_pr_et(data, "hw_rx_csum_ok"); + data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); + data = nfp_pr_et(data, "hw_rx_csum_err"); + data = nfp_pr_et(data, "hw_tx_csum"); + data = nfp_pr_et(data, "hw_tx_inner_csum"); + data = nfp_pr_et(data, "tx_gather"); + data = nfp_pr_et(data, "tx_lso"); + for (i = 0; i < nn->dp.num_tx_rings; i++) { - sprintf(p, "txq_%u_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "txq_%u_bytes", i); - p += ETH_GSTRING_LEN; + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); } + for (i = 0; i < nn->dp.num_rx_rings; i++) { - sprintf(p, "rxq_%u_pkts", i); - p += ETH_GSTRING_LEN; - sprintf(p, "rxq_%u_bytes", i); - p += ETH_GSTRING_LEN; + data = nfp_pr_et(data, "rxq_%u_pkts", i); + data = nfp_pr_et(data, "rxq_%u_bytes", i); } + break; } } -- cgit v1.2.3-55-g7522 From 325945ede6d49fd94b79c8c243fc3c0f2db2fcdb Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:16 -0700 Subject: nfp: split software and hardware vNIC statistics In preparation for reporting vNIC HW stats on representors split handling of the SW and HW stats in ethtool -S. Representors don't have SW stats (since vNIC is assigned to the VM). Remove the questionable defines which assume nn variable exists in the scope. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 192 +++++++++++++-------- 1 file changed, 120 insertions(+), 72 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index ba1c28b8791b..169f3e3714fd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -100,11 +100,7 @@ static const struct nfp_et_stat nfp_net_et_stats[] = { }; #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) -#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3) #define NN_ET_RVEC_GATHER_STATS 7 -#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2) -#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \ - NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN) static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) { @@ -346,96 +342,146 @@ static __printf(2, 3) u8 *nfp_pr_et(u8 *data, const char *fmt, ...) return data + ETH_GSTRING_LEN; } -static void nfp_net_get_strings(struct net_device *netdev, - u32 stringset, u8 *data) +static unsigned int nfp_vnic_get_sw_stats_count(struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); - int i; - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) - data = nfp_pr_et(data, nfp_net_et_stats[i].name); + return NN_ET_RVEC_GATHER_STATS + nn->dp.num_r_vecs * 3; +} - for (i = 0; i < nn->dp.num_r_vecs; i++) { - data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); - data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); - data = nfp_pr_et(data, "rvec_%u_tx_busy", i); - } +static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data) +{ + struct nfp_net *nn = netdev_priv(netdev); + int i; - data = nfp_pr_et(data, "hw_rx_csum_ok"); - data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); - data = nfp_pr_et(data, "hw_rx_csum_err"); - data = nfp_pr_et(data, "hw_tx_csum"); - data = nfp_pr_et(data, "hw_tx_inner_csum"); - data = nfp_pr_et(data, "tx_gather"); - data = nfp_pr_et(data, "tx_lso"); - - for (i = 0; i < nn->dp.num_tx_rings; i++) { - data = nfp_pr_et(data, "txq_%u_pkts", i); - data = nfp_pr_et(data, "txq_%u_bytes", i); - } + for (i = 0; i < nn->dp.num_r_vecs; i++) { + data = nfp_pr_et(data, "rvec_%u_rx_pkts", i); + data = nfp_pr_et(data, "rvec_%u_tx_pkts", i); + data = nfp_pr_et(data, "rvec_%u_tx_busy", i); + } - for (i = 0; i < nn->dp.num_rx_rings; i++) { - data = nfp_pr_et(data, "rxq_%u_pkts", i); - data = nfp_pr_et(data, "rxq_%u_bytes", i); - } + data = nfp_pr_et(data, "hw_rx_csum_ok"); + data = nfp_pr_et(data, "hw_rx_csum_inner_ok"); + data = nfp_pr_et(data, "hw_rx_csum_err"); + data = nfp_pr_et(data, "hw_tx_csum"); + data = nfp_pr_et(data, "hw_tx_inner_csum"); + data = nfp_pr_et(data, "tx_gather"); + data = nfp_pr_et(data, "tx_lso"); - break; - } + return data; } -static void nfp_net_get_stats(struct net_device *netdev, - struct ethtool_stats *stats, u64 *data) +static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) { u64 gathered_stats[NN_ET_RVEC_GATHER_STATS] = {}; struct nfp_net *nn = netdev_priv(netdev); u64 tmp[NN_ET_RVEC_GATHER_STATS]; - u8 __iomem *io_p; - int i, j, k; + unsigned int i, j; - for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) { - io_p = nn->dp.ctrl_bar + nfp_net_et_stats[i].off; - data[i] = readq(io_p); - } - for (j = 0; j < nn->dp.num_r_vecs; j++) { + for (i = 0; i < nn->dp.num_r_vecs; i++) { unsigned int start; do { - start = u64_stats_fetch_begin(&nn->r_vecs[j].rx_sync); - data[i++] = nn->r_vecs[j].rx_pkts; - tmp[0] = nn->r_vecs[j].hw_csum_rx_ok; - tmp[1] = nn->r_vecs[j].hw_csum_rx_inner_ok; - tmp[2] = nn->r_vecs[j].hw_csum_rx_error; - } while (u64_stats_fetch_retry(&nn->r_vecs[j].rx_sync, start)); + start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); + *data++ = nn->r_vecs[i].rx_pkts; + tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; + tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; + tmp[2] = nn->r_vecs[i].hw_csum_rx_error; + } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start)); do { - start = u64_stats_fetch_begin(&nn->r_vecs[j].tx_sync); - data[i++] = nn->r_vecs[j].tx_pkts; - data[i++] = nn->r_vecs[j].tx_busy; - tmp[3] = nn->r_vecs[j].hw_csum_tx; - tmp[4] = nn->r_vecs[j].hw_csum_tx_inner; - tmp[5] = nn->r_vecs[j].tx_gather; - tmp[6] = nn->r_vecs[j].tx_lso; - } while (u64_stats_fetch_retry(&nn->r_vecs[j].tx_sync, start)); - - for (k = 0; k < NN_ET_RVEC_GATHER_STATS; k++) - gathered_stats[k] += tmp[k]; + start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); + *data++ = nn->r_vecs[i].tx_pkts; + *data++ = nn->r_vecs[i].tx_busy; + tmp[3] = nn->r_vecs[i].hw_csum_tx; + tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; + tmp[5] = nn->r_vecs[i].tx_gather; + tmp[6] = nn->r_vecs[i].tx_lso; + } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); + + for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) + gathered_stats[j] += tmp[j]; } + for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) - data[i++] = gathered_stats[j]; - for (j = 0; j < nn->dp.num_tx_rings; j++) { - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j); - data[i++] = readq(io_p); - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8; - data[i++] = readq(io_p); + *data++ = gathered_stats[j]; + + return data; +} + +static unsigned int +nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings) +{ + return NN_ET_GLOBAL_STATS_LEN + (rx_rings + tx_rings) * 2; +} + +static u8 * +nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, + unsigned int tx_rings) +{ + int i; + + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) + data = nfp_pr_et(data, nfp_net_et_stats[i].name); + + for (i = 0; i < tx_rings; i++) { + data = nfp_pr_et(data, "txq_%u_pkts", i); + data = nfp_pr_et(data, "txq_%u_bytes", i); } - for (j = 0; j < nn->dp.num_rx_rings; j++) { - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j); - data[i++] = readq(io_p); - io_p = nn->dp.ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8; - data[i++] = readq(io_p); + + for (i = 0; i < rx_rings; i++) { + data = nfp_pr_et(data, "rxq_%u_pkts", i); + data = nfp_pr_et(data, "rxq_%u_bytes", i); } + + return data; +} + +static u64 * +nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, + unsigned int rx_rings, unsigned int tx_rings) +{ + unsigned int i; + + for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) + *data++ = readq(mem + nfp_net_et_stats[i].off); + + for (i = 0; i < tx_rings; i++) { + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_TXR_STATS(i) + 8); + } + + for (i = 0; i < rx_rings; i++) { + *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i)); + *data++ = readq(mem + NFP_NET_CFG_RXR_STATS(i) + 8); + } + + return data; +} + +static void nfp_net_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct nfp_net *nn = netdev_priv(netdev); + + switch (stringset) { + case ETH_SS_STATS: + data = nfp_vnic_get_sw_stats_strings(netdev, data); + data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, + nn->dp.num_tx_rings); + break; + } +} + +static void +nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *data) +{ + struct nfp_net *nn = netdev_priv(netdev); + + data = nfp_vnic_get_sw_stats(netdev, data); + data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, + nn->dp.num_rx_rings, nn->dp.num_tx_rings); } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) @@ -444,7 +490,9 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) switch (sset) { case ETH_SS_STATS: - return NN_ET_STATS_LEN; + return nfp_vnic_get_sw_stats_count(netdev) + + nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, + nn->dp.num_tx_rings); default: return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From 7344bea1401627cd1b02d19252baa1c40c0fb5e9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:17 -0700 Subject: nfp: store pointer to MAC statistics in nfp_port Store pointer to device memory containing MAC statistics in nfp_port. This simplifies representor code and will be used to dump those statistics in ethtool as well. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 12 ++++-------- drivers/net/ethernet/netronome/nfp/nfp_port.c | 3 +++ drivers/net/ethernet/netronome/nfp/nfp_port.h | 2 ++ 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 50f7cc057cc9..28e932eab812 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -78,12 +78,10 @@ void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len) } static void -nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port, +nfp_repr_phy_port_get_stats64(struct nfp_port *port, struct rtnl_link_stats64 *stats) { - u8 __iomem *mem; - - mem = app->pf->mac_stats_mem + phy_port * NFP_MAC_STATS_SIZE; + u8 __iomem *mem = port->eth_stats; /* TX and RX stats are flipped as we are returning the stats as seen * at the switch port corresponding to the phys port. @@ -141,7 +139,6 @@ static void nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nfp_repr *repr = netdev_priv(netdev); - struct nfp_eth_table_port *eth_port; struct nfp_app *app = repr->app; if (WARN_ON(!repr->port)) @@ -149,10 +146,9 @@ nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) switch (repr->port->type) { case NFP_PORT_PHYS_PORT: - eth_port = __nfp_port_get_eth_port(repr->port); - if (!eth_port) + if (!__nfp_port_get_eth_port(repr->port)) break; - nfp_repr_phy_port_get_stats64(app, eth_port->index, stats); + nfp_repr_phy_port_get_stats64(repr->port, stats); break; case NFP_PORT_PF_PORT: nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 0cf65e57addb..34a6e035fe9a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -225,6 +225,9 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, port->eth_port = &pf->eth_tbl->ports[id]; port->eth_id = pf->eth_tbl->ports[id].index; + if (pf->mac_stats_mem) + port->eth_stats = + pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE; return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 784d82c2f32c..cf71ac065e85 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -76,6 +76,7 @@ enum nfp_port_flags { * @dl_port: devlink port structure * @eth_id: for %NFP_PORT_PHYS_PORT port ID in NFP enumeration scheme * @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry + * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id * @port_list: entry on pf's list of ports @@ -95,6 +96,7 @@ struct nfp_port { struct { unsigned int eth_id; struct nfp_eth_table_port *eth_port; + u8 __iomem *eth_stats; }; /* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */ struct { -- cgit v1.2.3-55-g7522 From 098ce840c9ef7b474437a766a0f9759b5c3e1ae6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:18 -0700 Subject: nfp: report MAC statistics in ethtool Add reporting of MAC statistics in ethtool. MAC statistics are read out from the MAC IP and accumulated by application FW, therefore their presence depends on the application FW. Add missing defines and string names for the statistics and dump them in ethtool -S. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 126 ++++++++++++++++++++- drivers/net/ethernet/netronome/nfp/nfp_port.h | 44 ++++--- 2 files changed, 153 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 169f3e3714fd..f33c341844be 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -99,6 +99,86 @@ static const struct nfp_et_stat nfp_net_et_stats[] = { { "bpf_app3_bytes", NFP_NET_CFG_STATS_APP3_BYTES }, }; +static const struct nfp_et_stat nfp_mac_et_stats[] = { + { "rx_octets", NFP_MAC_STATS_RX_IN_OCTETS, }, + { "rx_frame_too_long_errors", + NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS, }, + { "rx_range_length_errors", NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS, }, + { "rx_vlan_reveive_ok", NFP_MAC_STATS_RX_VLAN_REVEIVE_OK, }, + { "rx_errors", NFP_MAC_STATS_RX_IN_ERRORS, }, + { "rx_broadcast_pkts", NFP_MAC_STATS_RX_IN_BROADCAST_PKTS, }, + { "rx_drop_events", NFP_MAC_STATS_RX_DROP_EVENTS, }, + { "rx_alignment_errors", NFP_MAC_STATS_RX_ALIGNMENT_ERRORS, }, + { "rx_pause_mac_ctrl_frames", + NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES, }, + { "rx_frames_received_ok", NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK, }, + { "rx_frame_check_sequence_errors", + NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS, }, + { "rx_unicast_pkts", NFP_MAC_STATS_RX_UNICAST_PKTS, }, + { "rx_multicast_pkts", NFP_MAC_STATS_RX_MULTICAST_PKTS, }, + { "rx_pkts", NFP_MAC_STATS_RX_PKTS, }, + { "rx_undersize_pkts", NFP_MAC_STATS_RX_UNDERSIZE_PKTS, }, + { "rx_pkts_64_octets", NFP_MAC_STATS_RX_PKTS_64_OCTETS, }, + { "rx_pkts_65_to_127_octets", + NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS, }, + { "rx_pkts_128_to_255_octets", + NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS, }, + { "rx_pkts_256_to_511_octets", + NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS, }, + { "rx_pkts_512_to_1023_octets", + NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS, }, + { "rx_pkts_1024_to_1518_octets", + NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS, }, + { "rx_pkts_1519_to_max_octets", + NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS, }, + { "rx_jabbers", NFP_MAC_STATS_RX_JABBERS, }, + { "rx_fragments", NFP_MAC_STATS_RX_FRAGMENTS, }, + { "rx_oversize_pkts", NFP_MAC_STATS_RX_OVERSIZE_PKTS, }, + { "rx_pause_frames_class0", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0, }, + { "rx_pause_frames_class1", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1, }, + { "rx_pause_frames_class2", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2, }, + { "rx_pause_frames_class3", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3, }, + { "rx_pause_frames_class4", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4, }, + { "rx_pause_frames_class5", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5, }, + { "rx_pause_frames_class6", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6, }, + { "rx_pause_frames_class7", NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7, }, + { "rx_mac_ctrl_frames_received", + NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED, }, + { "rx_mac_head_drop", NFP_MAC_STATS_RX_MAC_HEAD_DROP, }, + { "tx_queue_drop", NFP_MAC_STATS_TX_QUEUE_DROP, }, + { "tx_octets", NFP_MAC_STATS_TX_OUT_OCTETS, }, + { "tx_vlan_transmitted_ok", NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK, }, + { "tx_errors", NFP_MAC_STATS_TX_OUT_ERRORS, }, + { "tx_broadcast_pkts", NFP_MAC_STATS_TX_BROADCAST_PKTS, }, + { "tx_pause_mac_ctrl_frames", + NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES, }, + { "tx_frames_transmitted_ok", + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK, }, + { "tx_unicast_pkts", NFP_MAC_STATS_TX_UNICAST_PKTS, }, + { "tx_multicast_pkts", NFP_MAC_STATS_TX_MULTICAST_PKTS, }, + { "tx_pkts_64_octets", NFP_MAC_STATS_TX_PKTS_64_OCTETS, }, + { "tx_pkts_65_to_127_octets", + NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS, }, + { "tx_pkts_128_to_255_octets", + NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS, }, + { "tx_pkts_256_to_511_octets", + NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS, }, + { "tx_pkts_512_to_1023_octets", + NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS, }, + { "tx_pkts_1024_to_1518_octets", + NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS, }, + { "tx_pkts_1519_to_max_octets", + NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS, }, + { "tx_pause_frames_class0", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0, }, + { "tx_pause_frames_class1", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1, }, + { "tx_pause_frames_class2", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2, }, + { "tx_pause_frames_class3", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3, }, + { "tx_pause_frames_class4", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4, }, + { "tx_pause_frames_class5", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5, }, + { "tx_pause_frames_class6", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6, }, + { "tx_pause_frames_class7", NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7, }, +}; + #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) #define NN_ET_RVEC_GATHER_STATS 7 @@ -459,6 +539,47 @@ nfp_vnic_get_hw_stats(u64 *data, u8 __iomem *mem, return data; } +static unsigned int nfp_mac_get_stats_count(struct net_device *netdev) +{ + struct nfp_port *port; + + port = nfp_port_from_netdev(netdev); + if (!__nfp_port_get_eth_port(port) || !port->eth_stats) + return 0; + + return ARRAY_SIZE(nfp_mac_et_stats); +} + +static u8 *nfp_mac_get_stats_strings(struct net_device *netdev, u8 *data) +{ + struct nfp_port *port; + unsigned int i; + + port = nfp_port_from_netdev(netdev); + if (!__nfp_port_get_eth_port(port) || !port->eth_stats) + return data; + + for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++) + data = nfp_pr_et(data, "mac.%s", nfp_mac_et_stats[i].name); + + return data; +} + +static u64 *nfp_mac_get_stats(struct net_device *netdev, u64 *data) +{ + struct nfp_port *port; + unsigned int i; + + port = nfp_port_from_netdev(netdev); + if (!__nfp_port_get_eth_port(port) || !port->eth_stats) + return data; + + for (i = 0; i < ARRAY_SIZE(nfp_mac_et_stats); i++) + *data++ = readq(port->eth_stats + nfp_mac_et_stats[i].off); + + return data; +} + static void nfp_net_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { @@ -469,6 +590,7 @@ static void nfp_net_get_strings(struct net_device *netdev, data = nfp_vnic_get_sw_stats_strings(netdev, data); data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, nn->dp.num_tx_rings); + data = nfp_mac_get_stats_strings(netdev, data); break; } } @@ -482,6 +604,7 @@ nfp_net_get_stats(struct net_device *netdev, struct ethtool_stats *stats, data = nfp_vnic_get_sw_stats(netdev, data); data = nfp_vnic_get_hw_stats(data, nn->dp.ctrl_bar, nn->dp.num_rx_rings, nn->dp.num_tx_rings); + data = nfp_mac_get_stats(netdev, data); } static int nfp_net_get_sset_count(struct net_device *netdev, int sset) @@ -492,7 +615,8 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_STATS: return nfp_vnic_get_sw_stats_count(netdev) + nfp_vnic_get_hw_stats_count(nn->dp.num_rx_rings, - nn->dp.num_tx_rings); + nn->dp.num_tx_rings) + + nfp_mac_get_stats_count(netdev); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index cf71ac065e85..de8ec609b57e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -147,31 +147,32 @@ void nfp_devlink_port_unregister(struct nfp_port *port); #define NFP_MAC_STATS_SIZE 0x0200 #define NFP_MAC_STATS_RX_IN_OCTETS (NFP_MAC_STATS_BASE + 0x000) + /* unused 0x008 */ #define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010) #define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018) #define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020) #define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028) #define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030) -#define NFP_MAC_STATS_RX_STATS_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038) +#define NFP_MAC_STATS_RX_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038) #define NFP_MAC_STATS_RX_ALIGNMENT_ERRORS (NFP_MAC_STATS_BASE + 0x040) #define NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x048) #define NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x050) #define NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS (NFP_MAC_STATS_BASE + 0x058) #define NFP_MAC_STATS_RX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x060) #define NFP_MAC_STATS_RX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x068) -#define NFP_MAC_STATS_RX_STATS_PKTS (NFP_MAC_STATS_BASE + 0x070) -#define NFP_MAC_STATS_RX_STATS_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078) -#define NFP_MAC_STATS_RX_STATS_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080) -#define NFP_MAC_STATS_RX_STATS_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088) -#define NFP_MAC_STATS_RX_STATS_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090) -#define NFP_MAC_STATS_RX_STATS_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098) -#define NFP_MAC_STATS_RX_STATS_JABBERS (NFP_MAC_STATS_BASE + 0x0a0) -#define NFP_MAC_STATS_RX_STATS_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8) +#define NFP_MAC_STATS_RX_PKTS (NFP_MAC_STATS_BASE + 0x070) +#define NFP_MAC_STATS_RX_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078) +#define NFP_MAC_STATS_RX_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080) +#define NFP_MAC_STATS_RX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088) +#define NFP_MAC_STATS_RX_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090) +#define NFP_MAC_STATS_RX_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098) +#define NFP_MAC_STATS_RX_JABBERS (NFP_MAC_STATS_BASE + 0x0a0) +#define NFP_MAC_STATS_RX_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x0b0) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x0b8) -#define NFP_MAC_STATS_RX_STATS_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0) -#define NFP_MAC_STATS_RX_STATS_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8) -#define NFP_MAC_STATS_RX_STATS_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0) +#define NFP_MAC_STATS_RX_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0) +#define NFP_MAC_STATS_RX_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8) +#define NFP_MAC_STATS_RX_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0) #define NFP_MAC_STATS_RX_OVERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x0d8) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x0e0) #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x0e8) @@ -181,9 +182,12 @@ void nfp_devlink_port_unregister(struct nfp_port *port); #define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x108) #define NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED (NFP_MAC_STATS_BASE + 0x110) #define NFP_MAC_STATS_RX_MAC_HEAD_DROP (NFP_MAC_STATS_BASE + 0x118) - + /* unused 0x120 */ + /* unused 0x128 */ + /* unused 0x130 */ #define NFP_MAC_STATS_TX_QUEUE_DROP (NFP_MAC_STATS_BASE + 0x138) #define NFP_MAC_STATS_TX_OUT_OCTETS (NFP_MAC_STATS_BASE + 0x140) + /* unused 0x148 */ #define NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x150) #define NFP_MAC_STATS_TX_OUT_ERRORS (NFP_MAC_STATS_BASE + 0x158) #define NFP_MAC_STATS_TX_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x160) @@ -195,8 +199,16 @@ void nfp_devlink_port_unregister(struct nfp_port *port); #define NFP_MAC_STATS_TX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x190) #define NFP_MAC_STATS_TX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x198) #define NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x1a0) -#define NFP_MAC_STATS_TX_PKTS_127_TO_512_OCTETS (NFP_MAC_STATS_BASE + 0x1a8) -#define NFP_MAC_STATS_TX_PKTS_128_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0) -#define NFP_MAC_STATS_TX_PKTS_1518_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8) +#define NFP_MAC_STATS_TX_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x1a8) +#define NFP_MAC_STATS_TX_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0) +#define NFP_MAC_STATS_TX_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x1c0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x1c8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS4 (NFP_MAC_STATS_BASE + 0x1d0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS5 (NFP_MAC_STATS_BASE + 0x1d8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x1e0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x1e8) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS6 (NFP_MAC_STATS_BASE + 0x1f0) +#define NFP_MAC_STATS_TX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x1f8) #endif -- cgit v1.2.3-55-g7522 From ef0ec676a7181d418bb9051ccfe1cd13a1f47ab2 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:19 -0700 Subject: nfp: add pointer to vNIC config memory to nfp_port structure Simplify the statistics handling code by keeping pointer to vNIC's config memory in nfp_port. Note that this is referring to the representor side of vNICs, vNIC side has the pointer in nfp_net. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/main.c | 8 +++- drivers/net/ethernet/netronome/nfp/nfp_net_repr.c | 45 +++++------------------ drivers/net/ethernet/netronome/nfp/nfp_port.h | 2 + 3 files changed, 18 insertions(+), 37 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 3088e959f2a3..126a6b5233bf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -159,12 +159,18 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, goto err_reprs_clean; } + /* For now we only support 1 PF */ + WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); + port = nfp_port_alloc(app, port_type, reprs->reprs[i]); if (repr_type == NFP_REPR_TYPE_PF) { port->pf_id = i; + port->vnic = priv->nn->dp.ctrl_bar; } else { - port->pf_id = 0; /* For now we only support 1 PF */ + port->pf_id = 0; port->vf_id = i; + port->vnic = + app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; } eth_hw_addr_random(reprs->reprs[i]); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index 28e932eab812..0f9878d1bf40 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -96,50 +96,25 @@ nfp_repr_phy_port_get_stats64(struct nfp_port *port, } static void -nfp_repr_vf_get_stats64(const struct nfp_app *app, u8 vf, - struct rtnl_link_stats64 *stats) +nfp_repr_vnic_get_stats64(struct nfp_port *port, + struct rtnl_link_stats64 *stats) { - u8 __iomem *mem; - - mem = app->pf->vf_cfg_mem + vf * NFP_NET_CFG_BAR_SZ; - /* TX and RX stats are flipped as we are returning the stats as seen * at the switch port corresponding to the VF. */ - stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES); - stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS); - stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS); + stats->tx_packets = readq(port->vnic + NFP_NET_CFG_STATS_RX_FRAMES); + stats->tx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_RX_OCTETS); + stats->tx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_RX_DISCARDS); - stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES); - stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS); - stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS); -} - -static void -nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf, - struct rtnl_link_stats64 *stats) -{ - u8 __iomem *mem; - - if (pf) - return; - - mem = nfp_cpp_area_iomem(app->pf->data_vnic_bar); - - stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES); - stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS); - stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS); - - stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES); - stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS); - stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS); + stats->rx_packets = readq(port->vnic + NFP_NET_CFG_STATS_TX_FRAMES); + stats->rx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_TX_OCTETS); + stats->rx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_TX_DISCARDS); } static void nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct nfp_repr *repr = netdev_priv(netdev); - struct nfp_app *app = repr->app; if (WARN_ON(!repr->port)) return; @@ -151,10 +126,8 @@ nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) nfp_repr_phy_port_get_stats64(repr->port, stats); break; case NFP_PORT_PF_PORT: - nfp_repr_pf_get_stats64(app, repr->port->pf_id, stats); - break; case NFP_PORT_VF_PORT: - nfp_repr_vf_get_stats64(app, repr->port->vf_id, stats); + nfp_repr_vnic_get_stats64(repr->port, stats); default: break; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index de8ec609b57e..81c034018133 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -79,6 +79,7 @@ enum nfp_port_flags { * @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available * @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3) * @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id + * @vnic: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT vNIC ctrl memory * @port_list: entry on pf's list of ports */ struct nfp_port { @@ -102,6 +103,7 @@ struct nfp_port { struct { unsigned int pf_id; unsigned int vf_id; + u8 __iomem *vnic; }; }; -- cgit v1.2.3-55-g7522 From 899a37ade8c6dd1619d510c1e3b4b99d508272a9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:20 -0700 Subject: nfp: add ethtool statistics for representors Representors may be associated with both VFs or more importantly with physical ports. Allow vNIC and MAC statistics to be read with ethtool -S on representors. In case of vNICs we reuse the vNIC statistic helper, we just need to swap RX and TX to give statistics the "switch perspective." Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- .../net/ethernet/netronome/nfp/nfp_net_ethtool.c | 70 ++++++++++++++++++++-- drivers/net/ethernet/netronome/nfp/nfp_port.h | 5 ++ 2 files changed, 71 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index f33c341844be..07969f06df10 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -180,6 +180,7 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = { }; #define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats) +#define NN_ET_SWITCH_STATS_LEN 9 #define NN_ET_RVEC_GATHER_STATS 7 static void nfp_net_get_nspinfo(struct nfp_app *app, char *version) @@ -497,11 +498,24 @@ nfp_vnic_get_hw_stats_count(unsigned int rx_rings, unsigned int tx_rings) static u8 * nfp_vnic_get_hw_stats_strings(u8 *data, unsigned int rx_rings, - unsigned int tx_rings) + unsigned int tx_rings, bool repr) { - int i; + int swap_off, i; - for (i = 0; i < NN_ET_GLOBAL_STATS_LEN; i++) + BUILD_BUG_ON(NN_ET_GLOBAL_STATS_LEN < NN_ET_SWITCH_STATS_LEN * 2); + /* If repr is true first add SWITCH_STATS_LEN and then subtract it + * effectively swapping the RX and TX statistics (giving us the RX + * and TX from perspective of the switch). + */ + swap_off = repr * NN_ET_SWITCH_STATS_LEN; + + for (i = 0; i < NN_ET_SWITCH_STATS_LEN; i++) + data = nfp_pr_et(data, nfp_net_et_stats[i + swap_off].name); + + for (i = NN_ET_SWITCH_STATS_LEN; i < NN_ET_SWITCH_STATS_LEN * 2; i++) + data = nfp_pr_et(data, nfp_net_et_stats[i - swap_off].name); + + for (i = NN_ET_SWITCH_STATS_LEN * 2; i < NN_ET_GLOBAL_STATS_LEN; i++) data = nfp_pr_et(data, nfp_net_et_stats[i].name); for (i = 0; i < tx_rings; i++) { @@ -589,7 +603,8 @@ static void nfp_net_get_strings(struct net_device *netdev, case ETH_SS_STATS: data = nfp_vnic_get_sw_stats_strings(netdev, data); data = nfp_vnic_get_hw_stats_strings(data, nn->dp.num_rx_rings, - nn->dp.num_tx_rings); + nn->dp.num_tx_rings, + false); data = nfp_mac_get_stats_strings(netdev, data); break; } @@ -622,6 +637,50 @@ static int nfp_net_get_sset_count(struct net_device *netdev, int sset) } } +static void nfp_port_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + + switch (stringset) { + case ETH_SS_STATS: + if (nfp_port_is_vnic(port)) + data = nfp_vnic_get_hw_stats_strings(data, 0, 0, true); + else + data = nfp_mac_get_stats_strings(netdev, data); + break; + } +} + +static void +nfp_port_get_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *data) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + + if (nfp_port_is_vnic(port)) + data = nfp_vnic_get_hw_stats(data, port->vnic, 0, 0); + else + data = nfp_mac_get_stats(netdev, data); +} + +static int nfp_port_get_sset_count(struct net_device *netdev, int sset) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + unsigned int count; + + switch (sset) { + case ETH_SS_STATS: + if (nfp_port_is_vnic(port)) + count = nfp_vnic_get_hw_stats_count(0, 0); + else + count = nfp_mac_get_stats_count(netdev); + return count; + default: + return -EOPNOTSUPP; + } +} + /* RX network flow classification (RSS, filters, etc) */ static u32 ethtool_flow_to_nfp_flag(u32 flow_type) @@ -1085,6 +1144,9 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { const struct ethtool_ops nfp_port_ethtool_ops = { .get_drvinfo = nfp_app_get_drvinfo, .get_link = ethtool_op_get_link, + .get_strings = nfp_port_get_strings, + .get_ethtool_stats = nfp_port_get_stats, + .get_sset_count = nfp_port_get_sset_count, .set_dump = nfp_app_set_dump, .get_dump_flag = nfp_app_get_dump_flag, .get_dump_data = nfp_app_get_dump_data, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h index 81c034018133..51dcb9c603ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h @@ -116,6 +116,11 @@ extern const struct switchdev_ops nfp_port_switchdev_ops; int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +static inline bool nfp_port_is_vnic(const struct nfp_port *port) +{ + return port->type == NFP_PORT_PF_PORT || port->type == NFP_PORT_VF_PORT; +} + struct nfp_port *nfp_port_from_netdev(struct net_device *netdev); struct nfp_port * nfp_port_from_id(struct nfp_pf *pf, enum nfp_port_type type, unsigned int id); -- cgit v1.2.3-55-g7522 From 825b18ab24a1762a07e8202999cd8eb8eb8d505d Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:21 -0700 Subject: nfp: fix copy paste in names and messages regarding vNICs Data and control vNICs currently use the same area name and error message. This could lead to confusion. Make sure the error message says "ctrl" in case of control and the data area is called "nfp.bar0". Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index d5e2361f0e86..acdad6f20251 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -388,7 +388,7 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar); if (IS_ERR(ctrl_bar)) { - nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); + nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n"); err = PTR_ERR(ctrl_bar); goto err_app_clean; } @@ -504,7 +504,7 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) int err; min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; - mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0", + mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", min_size, &pf->data_vnic_bar); if (IS_ERR(mem)) { nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); -- cgit v1.2.3-55-g7522 From 85d8e2ba7060ced6b23a2b1a2a8dd5cdaf951fb9 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2017 15:48:22 -0700 Subject: nfp: don't reuse pointers in ring dumping We were reusing skb pointer when reading page frag, since ring entries contain a union of a skb and frag pointer. This can be confusing to people reading the code. Refactor the code to read frag pointer directly. Signed-off-by: Jakub Kicinski Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 40217ece5fcb..cf81cf95d1d8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -125,7 +125,6 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) struct nfp_net_tx_ring *tx_ring; struct nfp_net_tx_desc *txd; int d_rd_p, d_wr_p, txd_cnt; - struct sk_buff *skb; struct nfp_net *nn; int i; @@ -158,13 +157,15 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) txd->vals[0], txd->vals[1], txd->vals[2], txd->vals[3]); - skb = READ_ONCE(tx_ring->txbufs[i].skb); - if (skb) { - if (tx_ring == r_vec->tx_ring) + if (tx_ring == r_vec->tx_ring) { + struct sk_buff *skb = READ_ONCE(tx_ring->txbufs[i].skb); + + if (skb) seq_printf(file, " skb->head=%p skb->data=%p", skb->head, skb->data); - else - seq_printf(file, " frag=%p", skb); + } else { + seq_printf(file, " frag=%p", + READ_ONCE(tx_ring->txbufs[i].frag)); } if (tx_ring->txbufs[i].dma_addr) -- cgit v1.2.3-55-g7522 From 9374bf18dbb533912898b07b3751fbce40fae5c4 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sat, 19 Aug 2017 14:54:47 +0530 Subject: Bluetooth: make device_type const Make these const as they are only stored in the type field of a device structure, which is const. Done using Coccinelle. Signed-off-by: Bhumika Goyal Signed-off-by: Marcel Holtmann --- net/bluetooth/hci_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index ca7a35ebaefb..aa300f3a0d51 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -13,7 +13,7 @@ static void bt_link_release(struct device *dev) kfree(conn); } -static struct device_type bt_link = { +static const struct device_type bt_link = { .name = "link", .release = bt_link_release, }; @@ -86,7 +86,7 @@ static void bt_host_release(struct device *dev) module_put(THIS_MODULE); } -static struct device_type bt_host = { +static const struct device_type bt_host = { .name = "host", .release = bt_host_release, }; -- cgit v1.2.3-55-g7522 From a2acc543408e1cbdcd7915a268cdbc451f09832a Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Wed, 2 Aug 2017 18:14:43 +0900 Subject: netfilter: connlimit: merge root4 and root6. The root4 variable is used only when connlimit extension module has been stored by the iptables command. and the roo6 variable is used only when connlimit extension module has been stored by the ip6tables command. So the root4 and roo6 variable does not be used at the same time. Signed-off-by: Taehee Yoo Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_connlimit.c | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c index 97589b8a2a40..ffa8eec980e9 100644 --- a/net/netfilter/xt_connlimit.c +++ b/net/netfilter/xt_connlimit.c @@ -58,8 +58,7 @@ struct xt_connlimit_rb { static spinlock_t xt_connlimit_locks[CONNLIMIT_LOCK_SLOTS] __cacheline_aligned_in_smp; struct xt_connlimit_data { - struct rb_root climit_root4[CONNLIMIT_SLOTS]; - struct rb_root climit_root6[CONNLIMIT_SLOTS]; + struct rb_root climit_root[CONNLIMIT_SLOTS]; }; static u_int32_t connlimit_rnd __read_mostly; @@ -294,13 +293,11 @@ static int count_them(struct net *net, int count; u32 hash; - if (family == NFPROTO_IPV6) { + if (family == NFPROTO_IPV6) hash = connlimit_iphash6(addr, mask); - root = &data->climit_root6[hash]; - } else { + else hash = connlimit_iphash(addr->ip & mask->ip); - root = &data->climit_root4[hash]; - } + root = &data->climit_root[hash]; spin_lock_bh(&xt_connlimit_locks[hash % CONNLIMIT_LOCK_SLOTS]); @@ -379,10 +376,8 @@ static int connlimit_mt_check(const struct xt_mtchk_param *par) return -ENOMEM; } - for (i = 0; i < ARRAY_SIZE(info->data->climit_root4); ++i) - info->data->climit_root4[i] = RB_ROOT; - for (i = 0; i < ARRAY_SIZE(info->data->climit_root6); ++i) - info->data->climit_root6[i] = RB_ROOT; + for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i) + info->data->climit_root[i] = RB_ROOT; return 0; } @@ -413,10 +408,8 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par) nf_ct_netns_put(par->net, par->family); - for (i = 0; i < ARRAY_SIZE(info->data->climit_root4); ++i) - destroy_tree(&info->data->climit_root4[i]); - for (i = 0; i < ARRAY_SIZE(info->data->climit_root6); ++i) - destroy_tree(&info->data->climit_root6[i]); + for (i = 0; i < ARRAY_SIZE(info->data->climit_root); ++i) + destroy_tree(&info->data->climit_root[i]); kfree(info->data); } -- cgit v1.2.3-55-g7522 From 166327d79d8d44a1668eff0fda8286e9a193a251 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Thu, 3 Aug 2017 10:26:20 +0900 Subject: netfilter: remove prototype of netfilter_queue_init The netfilter_queue_init() has been removed. so we can remove the prototype of that. Signed-off-by: Taehee Yoo Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_internals.h | 1 - 1 file changed, 1 deletion(-) diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index bfa742da83af..19f00a47a710 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h @@ -15,7 +15,6 @@ int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, struct nf_hook_entry **entryp, unsigned int verdict); unsigned int nf_queue_nf_hook_drop(struct net *net); -int __init netfilter_queue_init(void); /* nf_log.c */ int __init netfilter_log_init(void); -- cgit v1.2.3-55-g7522 From 46b20c38f37c48bbcb832f933e1bee7d951da99b Mon Sep 17 00:00:00 2001 From: Geliang Tang Date: Mon, 7 Aug 2017 21:44:25 +0800 Subject: netfilter: use audit_log() Use audit_log() instead of open-coding it. Signed-off-by: Geliang Tang Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/ebtables.c | 13 ++++--------- net/netfilter/x_tables.c | 14 ++++---------- 2 files changed, 8 insertions(+), 19 deletions(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 9c6e619f452b..54c7ef4e970e 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1069,15 +1069,10 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl, #ifdef CONFIG_AUDIT if (audit_enabled) { - struct audit_buffer *ab; - - ab = audit_log_start(current->audit_context, GFP_KERNEL, - AUDIT_NETFILTER_CFG); - if (ab) { - audit_log_format(ab, "table=%s family=%u entries=%u", - repl->name, AF_BRIDGE, repl->nentries); - audit_log_end(ab); - } + audit_log(current->audit_context, GFP_KERNEL, + AUDIT_NETFILTER_CFG, + "table=%s family=%u entries=%u", + repl->name, AF_BRIDGE, repl->nentries); } #endif return ret; diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index e1648238a9c9..c83a3b5e1c6c 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -1192,16 +1192,10 @@ xt_replace_table(struct xt_table *table, #ifdef CONFIG_AUDIT if (audit_enabled) { - struct audit_buffer *ab; - - ab = audit_log_start(current->audit_context, GFP_KERNEL, - AUDIT_NETFILTER_CFG); - if (ab) { - audit_log_format(ab, "table=%s family=%u entries=%u", - table->name, table->af, - private->number); - audit_log_end(ab); - } + audit_log(current->audit_context, GFP_KERNEL, + AUDIT_NETFILTER_CFG, + "table=%s family=%u entries=%u", + table->name, table->af, private->number); } #endif -- cgit v1.2.3-55-g7522 From a18177008b2613f009ef210b7da695056a932321 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 8 Aug 2017 15:15:27 +0200 Subject: netfilter: exthdr: factor out tcp option access Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_exthdr.c | 33 +++++++++++++++++++++------------ 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 1ec49fe5845f..921c95f2c583 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -61,6 +61,26 @@ err: regs->verdict.code = NFT_BREAK; } +static void * +nft_tcp_header_pointer(const struct nft_pktinfo *pkt, + unsigned int len, void *buffer, unsigned int *tcphdr_len) +{ + struct tcphdr *tcph; + + if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP) + return NULL; + + tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buffer); + if (!tcph) + return NULL; + + *tcphdr_len = __tcp_hdrlen(tcph); + if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len) + return NULL; + + return skb_header_pointer(pkt->skb, pkt->xt.thoff, *tcphdr_len, buffer); +} + static void nft_exthdr_tcp_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -72,18 +92,7 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr, struct tcphdr *tcph; u8 *opt; - if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP) - goto err; - - tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buff); - if (!tcph) - goto err; - - tcphdr_len = __tcp_hdrlen(tcph); - if (tcphdr_len < sizeof(*tcph)) - goto err; - - tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, tcphdr_len, buff); + tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); if (!tcph) goto err; -- cgit v1.2.3-55-g7522 From 5e7d695a482c6e581addf42717469bd363dd734e Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 8 Aug 2017 15:15:28 +0200 Subject: netfilter: exthdr: split netlink dump function so eval and uncoming eval_set versions can reuse a common helper. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_exthdr.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index 921c95f2c583..e3a6eebe7e0c 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -180,12 +180,8 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, NFT_DATA_VALUE, priv->len); } -static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr) +static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv) { - const struct nft_exthdr *priv = nft_expr_priv(expr); - - if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg)) - goto nla_put_failure; if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset))) @@ -202,6 +198,16 @@ nla_put_failure: return -1; } +static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_exthdr *priv = nft_expr_priv(expr); + + if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg)) + return -1; + + return nft_exthdr_dump_common(skb, priv); +} + static struct nft_expr_type nft_exthdr_type; static const struct nft_expr_ops nft_exthdr_ipv6_ops = { .type = &nft_exthdr_type, -- cgit v1.2.3-55-g7522 From 99d1712bc41c7c9a5a473c104a4ad15427757b22 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 8 Aug 2017 15:15:29 +0200 Subject: netfilter: exthdr: tcp option set support This allows setting 2 and 4 byte quantities in the tcp option space. Main purpose is to allow native replacement for xt_TCPMSS to work around pmtu blackholes. Writes to kind and len are now allowed at the moment, it does not seem useful to do this as it causes corruption of the tcp option space. We can always lift this restriction later if a use-case appears. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 4 +- net/netfilter/nft_exthdr.c | 164 ++++++++++++++++++++++++++++++- 2 files changed, 165 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index be25cf69295b..40fd199f7531 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -732,7 +732,8 @@ enum nft_exthdr_op { * @NFTA_EXTHDR_OFFSET: extension header offset (NLA_U32) * @NFTA_EXTHDR_LEN: extension header length (NLA_U32) * @NFTA_EXTHDR_FLAGS: extension header flags (NLA_U32) - * @NFTA_EXTHDR_OP: option match type (NLA_U8) + * @NFTA_EXTHDR_OP: option match type (NLA_U32) + * @NFTA_EXTHDR_SREG: option match type (NLA_U32) */ enum nft_exthdr_attributes { NFTA_EXTHDR_UNSPEC, @@ -742,6 +743,7 @@ enum nft_exthdr_attributes { NFTA_EXTHDR_LEN, NFTA_EXTHDR_FLAGS, NFTA_EXTHDR_OP, + NFTA_EXTHDR_SREG, __NFTA_EXTHDR_MAX }; #define NFTA_EXTHDR_MAX (__NFTA_EXTHDR_MAX - 1) diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index e3a6eebe7e0c..f5a0bf5e3bdd 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -8,6 +8,7 @@ * Development of this code funded by Astaro AG (http://www.astaro.com/) */ +#include #include #include #include @@ -23,6 +24,7 @@ struct nft_exthdr { u8 len; u8 op; enum nft_registers dreg:8; + enum nft_registers sreg:8; u8 flags; }; @@ -124,6 +126,88 @@ err: regs->verdict.code = NFT_BREAK; } +static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE]; + struct nft_exthdr *priv = nft_expr_priv(expr); + unsigned int i, optl, tcphdr_len, offset; + struct tcphdr *tcph; + u8 *opt; + u32 src; + + tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len); + if (!tcph) + return; + + opt = (u8 *)tcph; + for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) { + union { + u8 octet; + __be16 v16; + __be32 v32; + } old, new; + + optl = optlen(opt, i); + + if (priv->type != opt[i]) + continue; + + if (i + optl > tcphdr_len || priv->len + priv->offset > optl) + return; + + if (!skb_make_writable(pkt->skb, pkt->xt.thoff + i + priv->len)) + return; + + tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, + &tcphdr_len); + if (!tcph) + return; + + src = regs->data[priv->sreg]; + offset = i + priv->offset; + + switch (priv->len) { + case 2: + old.v16 = get_unaligned((u16 *)(opt + offset)); + new.v16 = src; + + switch (priv->type) { + case TCPOPT_MSS: + /* increase can cause connection to stall */ + if (ntohs(old.v16) <= ntohs(new.v16)) + return; + break; + } + + if (old.v16 == new.v16) + return; + + put_unaligned(new.v16, (u16*)(opt + offset)); + inet_proto_csum_replace2(&tcph->check, pkt->skb, + old.v16, new.v16, false); + break; + case 4: + new.v32 = src; + old.v32 = get_unaligned((u32 *)(opt + offset)); + + if (old.v32 == new.v32) + return; + + put_unaligned(new.v32, (u32*)(opt + offset)); + inet_proto_csum_replace4(&tcph->check, pkt->skb, + old.v32, new.v32, false); + break; + default: + WARN_ON_ONCE(1); + break; + } + + return; + } +} + static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { [NFTA_EXTHDR_DREG] = { .type = NLA_U32 }, [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 }, @@ -180,6 +264,55 @@ static int nft_exthdr_init(const struct nft_ctx *ctx, NFT_DATA_VALUE, priv->len); } +static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_exthdr *priv = nft_expr_priv(expr); + u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6; + int err; + + if (!tb[NFTA_EXTHDR_SREG] || + !tb[NFTA_EXTHDR_TYPE] || + !tb[NFTA_EXTHDR_OFFSET] || + !tb[NFTA_EXTHDR_LEN]) + return -EINVAL; + + if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS]) + return -EINVAL; + + err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset); + if (err < 0) + return err; + + err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len); + if (err < 0) + return err; + + if (offset < 2) + return -EOPNOTSUPP; + + switch (len) { + case 2: break; + case 4: break; + default: + return -EOPNOTSUPP; + } + + err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op); + if (err < 0) + return err; + + priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]); + priv->offset = offset; + priv->len = len; + priv->sreg = nft_parse_register(tb[NFTA_EXTHDR_SREG]); + priv->flags = flags; + priv->op = op; + + return nft_validate_register_load(priv->sreg, priv->len); +} + static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv) { if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type)) @@ -208,6 +341,16 @@ static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr) return nft_exthdr_dump_common(skb, priv); } +static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr) +{ + const struct nft_exthdr *priv = nft_expr_priv(expr); + + if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg)) + return -1; + + return nft_exthdr_dump_common(skb, priv); +} + static struct nft_expr_type nft_exthdr_type; static const struct nft_expr_ops nft_exthdr_ipv6_ops = { .type = &nft_exthdr_type, @@ -225,6 +368,14 @@ static const struct nft_expr_ops nft_exthdr_tcp_ops = { .dump = nft_exthdr_dump, }; +static const struct nft_expr_ops nft_exthdr_tcp_set_ops = { + .type = &nft_exthdr_type, + .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)), + .eval = nft_exthdr_tcp_set_eval, + .init = nft_exthdr_tcp_set_init, + .dump = nft_exthdr_dump_set, +}; + static const struct nft_expr_ops * nft_exthdr_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) @@ -234,12 +385,21 @@ nft_exthdr_select_ops(const struct nft_ctx *ctx, if (!tb[NFTA_EXTHDR_OP]) return &nft_exthdr_ipv6_ops; + if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG]) + return ERR_PTR(-EOPNOTSUPP); + op = ntohl(nla_get_u32(tb[NFTA_EXTHDR_OP])); switch (op) { case NFT_EXTHDR_OP_TCPOPT: - return &nft_exthdr_tcp_ops; + if (tb[NFTA_EXTHDR_SREG]) + return &nft_exthdr_tcp_set_ops; + if (tb[NFTA_EXTHDR_DREG]) + return &nft_exthdr_tcp_ops; + break; case NFT_EXTHDR_OP_IPV6: - return &nft_exthdr_ipv6_ops; + if (tb[NFTA_EXTHDR_DREG]) + return &nft_exthdr_ipv6_ops; + break; } return ERR_PTR(-EOPNOTSUPP); -- cgit v1.2.3-55-g7522 From 6b5dc98e8fac041a3decfc3186e08c1c570ea691 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 8 Aug 2017 15:48:04 +0200 Subject: netfilter: rt: add support to fetch path mss to be used in combination with tcp option set support to mimic iptables TCPMSS --clamp-mss-to-pmtu. v2: Eric Dumazet points out dst must be initialized. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 2 + net/netfilter/nft_rt.c | 66 ++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 40fd199f7531..b49da72efa68 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -811,11 +811,13 @@ enum nft_meta_keys { * @NFT_RT_CLASSID: realm value of packet's route (skb->dst->tclassid) * @NFT_RT_NEXTHOP4: routing nexthop for IPv4 * @NFT_RT_NEXTHOP6: routing nexthop for IPv6 + * @NFT_RT_TCPMSS: fetch current path tcp mss */ enum nft_rt_keys { NFT_RT_CLASSID, NFT_RT_NEXTHOP4, NFT_RT_NEXTHOP6, + NFT_RT_TCPMSS, }; /** diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c index c7383d8f88d0..e142e65d3176 100644 --- a/net/netfilter/nft_rt.c +++ b/net/netfilter/nft_rt.c @@ -23,6 +23,42 @@ struct nft_rt { enum nft_registers dreg:8; }; +static u16 get_tcpmss(const struct nft_pktinfo *pkt, const struct dst_entry *skbdst) +{ + u32 minlen = sizeof(struct ipv6hdr), mtu = dst_mtu(skbdst); + const struct sk_buff *skb = pkt->skb; + const struct nf_afinfo *ai; + struct flowi fl; + + memset(&fl, 0, sizeof(fl)); + + switch (nft_pf(pkt)) { + case NFPROTO_IPV4: + fl.u.ip4.daddr = ip_hdr(skb)->saddr; + minlen = sizeof(struct iphdr); + break; + case NFPROTO_IPV6: + fl.u.ip6.daddr = ipv6_hdr(skb)->saddr; + break; + } + + ai = nf_get_afinfo(nft_pf(pkt)); + if (ai) { + struct dst_entry *dst = NULL; + + ai->route(nft_net(pkt), &dst, &fl, false); + if (dst) { + mtu = min(mtu, dst_mtu(dst)); + dst_release(dst); + } + } + + if (mtu <= minlen || mtu > 0xffff) + return TCP_MSS_DEFAULT; + + return mtu - minlen; +} + static void nft_rt_get_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -57,6 +93,9 @@ static void nft_rt_get_eval(const struct nft_expr *expr, &ipv6_hdr(skb)->daddr), sizeof(struct in6_addr)); break; + case NFT_RT_TCPMSS: + nft_reg_store16(dest, get_tcpmss(pkt, dst)); + break; default: WARN_ON(1); goto err; @@ -94,6 +133,9 @@ static int nft_rt_get_init(const struct nft_ctx *ctx, case NFT_RT_NEXTHOP6: len = sizeof(struct in6_addr); break; + case NFT_RT_TCPMSS: + len = sizeof(u16); + break; default: return -EOPNOTSUPP; } @@ -118,6 +160,29 @@ nla_put_failure: return -1; } +static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data) +{ + const struct nft_rt *priv = nft_expr_priv(expr); + unsigned int hooks; + + switch (priv->key) { + case NFT_RT_NEXTHOP4: + case NFT_RT_NEXTHOP6: + case NFT_RT_CLASSID: + return 0; + case NFT_RT_TCPMSS: + hooks = (1 << NF_INET_FORWARD) | + (1 << NF_INET_LOCAL_OUT) | + (1 << NF_INET_POST_ROUTING); + break; + default: + return -EINVAL; + } + + return nft_chain_validate_hooks(ctx->chain, hooks); +} + static struct nft_expr_type nft_rt_type; static const struct nft_expr_ops nft_rt_get_ops = { .type = &nft_rt_type, @@ -125,6 +190,7 @@ static const struct nft_expr_ops nft_rt_get_ops = { .eval = nft_rt_get_eval, .init = nft_rt_get_init, .dump = nft_rt_get_dump, + .validate = nft_rt_validate, }; static struct nft_expr_type nft_rt_type __read_mostly = { -- cgit v1.2.3-55-g7522 From 4ac5bc34fca3d75c470139d2b53ccca251a6031f Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 19 Aug 2017 12:21:43 +0530 Subject: net: 3c509: constify eisa_device_id eisa_device_id are not supposed to change at runtime. All functions working with eisa_device_id provided by work with const eisa_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c509.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 077d01d9f141..b223769d6a5e 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -474,7 +474,7 @@ static int pnp_registered; #endif /* CONFIG_PNP */ #ifdef CONFIG_EISA -static struct eisa_device_id el3_eisa_ids[] = { +static const struct eisa_device_id el3_eisa_ids[] = { { "TCM5090" }, { "TCM5091" }, { "TCM5092" }, -- cgit v1.2.3-55-g7522 From 670af5ed4c55e0d7c9aa295e74867cbebbb29ce5 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 19 Aug 2017 12:22:13 +0530 Subject: net: 3c59x: constify eisa_device_id eisa_device_id are not supposed to change at runtime. All functions working with eisa_device_id provided by work with const eisa_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 3b516ebeeddb..402d9090ad29 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -900,7 +900,7 @@ static const struct dev_pm_ops vortex_pm_ops = { #endif /* !CONFIG_PM */ #ifdef CONFIG_EISA -static struct eisa_device_id vortex_eisa_ids[] = { +static const struct eisa_device_id vortex_eisa_ids[] = { { "TCM5920", CH_3C592 }, { "TCM5970", CH_3C597 }, { "" } -- cgit v1.2.3-55-g7522 From f98dfa4a5f83fdc2eb26d31c25924aeb84739e06 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 19 Aug 2017 12:22:44 +0530 Subject: net: de4x5: constify eisa_device_id eisa_device_id are not supposed to change at runtime. All functions working with eisa_device_id provided by work with const eisa_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/de4x5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index 47be5018d35d..0affee9c8aa2 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -2094,7 +2094,7 @@ static int de4x5_eisa_remove(struct device *device) return 0; } -static struct eisa_device_id de4x5_eisa_ids[] = { +static const struct eisa_device_id de4x5_eisa_ids[] = { { "DEC4250", 0 }, /* 0 is the board name index... */ { "" } }; -- cgit v1.2.3-55-g7522 From 33f14384dde8a848d68c9e0224477e29babb233a Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 19 Aug 2017 12:23:14 +0530 Subject: net: hp100: constify eisa_device_id eisa_device_id are not supposed to change at runtime. All functions working with eisa_device_id provided by work with const eisa_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/hp/hp100.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index c6164a98f257..c8c7ad2eff77 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -194,7 +194,7 @@ static const char *hp100_isa_tbl[] = { }; #endif -static struct eisa_device_id hp100_eisa_tbl[] = { +static const struct eisa_device_id hp100_eisa_tbl[] = { { "HWPF180" }, /* HP J2577 rev A */ { "HWP1920" }, /* HP 27248B */ { "HWP1940" }, /* HP J2577 */ -- cgit v1.2.3-55-g7522 From 46d9ceaad0b092ea17c90a7ce2608eba57e9adc3 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 19 Aug 2017 12:23:34 +0530 Subject: net: defxx: constify eisa_device_id eisa_device_id are not supposed to change at runtime. All functions working with eisa_device_id provided by work with const eisa_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Reviewed-by: Maciej W. Rozycki Signed-off-by: David S. Miller --- drivers/net/fddi/defxx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index f4a816cf012a..61fceee73c1b 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3767,7 +3767,7 @@ static void dfx_pci_unregister(struct pci_dev *pdev) #endif /* CONFIG_PCI */ #ifdef CONFIG_EISA -static struct eisa_device_id dfx_eisa_table[] = { +static const struct eisa_device_id dfx_eisa_table[] = { { "DEC3001", DEFEA_PROD_ID_1 }, { "DEC3002", DEFEA_PROD_ID_2 }, { "DEC3003", DEFEA_PROD_ID_3 }, -- cgit v1.2.3-55-g7522 From d7629e748ee4332af91c1c2e2b4d7a92e7d5cde4 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 18 Aug 2017 16:30:00 +0100 Subject: net: hns3: fix a handful of spelling mistakes Trival fix to spelling mistakes: firware -> firmware invald -> invalid mutilcast -> multicast Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 2 +- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index bc869842728f..8b511e6e0ce9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -334,7 +334,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) } hdev->fw_version = version; - dev_info(&hdev->pdev->dev, "The firware version is %08x\n", version); + dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; err_csq: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6fb7648bb2f2..bb45365fb817 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -1952,7 +1952,7 @@ int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex) HCLGE_CFG_SPEED_S, 5); break; default: - dev_err(&hdev->pdev->dev, "invald speed (%d)\n", speed); + dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); return -EINVAL; } @@ -3476,7 +3476,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport, } else { /* This mac addr do not exist, can't delete it */ dev_err(&hdev->pdev->dev, - "Rm mutilcast mac addr failed, ret = %d.\n", + "Rm multicast mac addr failed, ret = %d.\n", status); return -EIO; } -- cgit v1.2.3-55-g7522 From bd76b87962833f6e55264030a227be0f090b1286 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 18 Aug 2017 16:40:00 +0100 Subject: bnxt_en: fix spelling mistake: "swtichdev" -> "switchdev" Trivial fix to spelling mistake in a netdev_info message Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index b05c5d0ee3f9..86cce6f53f78 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -428,7 +428,7 @@ static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode) case DEVLINK_ESWITCH_MODE_SWITCHDEV: if (pci_num_vf(bp->pdev) == 0) { netdev_info(bp->dev, - "Enable VFs before setting swtichdev mode"); + "Enable VFs before setting switchdev mode"); rc = -EPERM; goto done; } -- cgit v1.2.3-55-g7522 From 96eabe7a40aa17e613cf3db2c742ee8b1fc764d0 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Fri, 18 Aug 2017 11:28:00 -0700 Subject: bpf: Allow selecting numa node during map creation The current map creation API does not allow to provide the numa-node preference. The memory usually comes from where the map-creation-process is running. The performance is not ideal if the bpf_prog is known to always run in a numa node different from the map-creation-process. One of the use case is sharding on CPU to different LRU maps (i.e. an array of LRU maps). Here is the test result of map_perf_test on the INNER_LRU_HASH_PREALLOC test if we force the lru map used by CPU0 to be allocated from a remote numa node: [ The machine has 20 cores. CPU0-9 at node 0. CPU10-19 at node 1 ] ># taskset -c 10 ./map_perf_test 512 8 1260000 8000000 5:inner_lru_hash_map_perf pre-alloc 1628380 events per sec 4:inner_lru_hash_map_perf pre-alloc 1626396 events per sec 3:inner_lru_hash_map_perf pre-alloc 1626144 events per sec 6:inner_lru_hash_map_perf pre-alloc 1621657 events per sec 2:inner_lru_hash_map_perf pre-alloc 1621534 events per sec 1:inner_lru_hash_map_perf pre-alloc 1620292 events per sec 7:inner_lru_hash_map_perf pre-alloc 1613305 events per sec 0:inner_lru_hash_map_perf pre-alloc 1239150 events per sec #<<< After specifying numa node: ># taskset -c 10 ./map_perf_test 512 8 1260000 8000000 5:inner_lru_hash_map_perf pre-alloc 1629627 events per sec 3:inner_lru_hash_map_perf pre-alloc 1628057 events per sec 1:inner_lru_hash_map_perf pre-alloc 1623054 events per sec 6:inner_lru_hash_map_perf pre-alloc 1616033 events per sec 2:inner_lru_hash_map_perf pre-alloc 1614630 events per sec 4:inner_lru_hash_map_perf pre-alloc 1612651 events per sec 7:inner_lru_hash_map_perf pre-alloc 1609337 events per sec 0:inner_lru_hash_map_perf pre-alloc 1619340 events per sec #<<< This patch adds one field, numa_node, to the bpf_attr. Since numa node 0 is a valid node, a new flag BPF_F_NUMA_NODE is also added. The numa_node field is honored if and only if the BPF_F_NUMA_NODE flag is set. Numa node selection is not supported for percpu map. This patch does not change all the kmalloc. F.e. 'htab = kzalloc()' is not changed since the object is small enough to stay in the cache. Signed-off-by: Martin KaFai Lau Acked-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- include/linux/bpf.h | 10 +++++++++- include/uapi/linux/bpf.h | 10 +++++++++- kernel/bpf/arraymap.c | 7 +++++-- kernel/bpf/devmap.c | 9 ++++++--- kernel/bpf/hashtab.c | 19 +++++++++++++++---- kernel/bpf/lpm_trie.c | 9 +++++++-- kernel/bpf/sockmap.c | 10 +++++++--- kernel/bpf/stackmap.c | 8 +++++--- kernel/bpf/syscall.c | 14 ++++++++++---- 9 files changed, 73 insertions(+), 23 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 1cc6c5ff61ec..55b88e329804 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -51,6 +51,7 @@ struct bpf_map { u32 map_flags; u32 pages; u32 id; + int numa_node; struct user_struct *user; const struct bpf_map_ops *ops; struct work_struct work; @@ -264,7 +265,7 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); void bpf_map_put_with_uref(struct bpf_map *map); void bpf_map_put(struct bpf_map *map); int bpf_map_precharge_memlock(u32 pages); -void *bpf_map_area_alloc(size_t size); +void *bpf_map_area_alloc(size_t size, int numa_node); void bpf_map_area_free(void *base); extern int sysctl_unprivileged_bpf_disabled; @@ -316,6 +317,13 @@ struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); void __dev_map_insert_ctx(struct bpf_map *map, u32 index); void __dev_map_flush(struct bpf_map *map); +/* Return map's numa specified by userspace */ +static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) +{ + return (attr->map_flags & BPF_F_NUMA_NODE) ? + attr->numa_node : NUMA_NO_NODE; +} + #else static inline struct bpf_prog *bpf_prog_get(u32 ufd) { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 5ecbe812a2cc..843818dff96d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -165,6 +165,7 @@ enum bpf_attach_type { #define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_EXIST 2 /* update existing element */ +/* flags for BPF_MAP_CREATE command */ #define BPF_F_NO_PREALLOC (1U << 0) /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list @@ -173,6 +174,8 @@ enum bpf_attach_type { * across different LRU lists. */ #define BPF_F_NO_COMMON_LRU (1U << 1) +/* Specify numa node during map creation */ +#define BPF_F_NUMA_NODE (1U << 2) union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ @@ -180,8 +183,13 @@ union bpf_attr { __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ - __u32 map_flags; /* prealloc or not */ + __u32 map_flags; /* BPF_MAP_CREATE related + * flags defined above. + */ __u32 inner_map_fd; /* fd pointing to the inner map */ + __u32 numa_node; /* numa node (effective only if + * BPF_F_NUMA_NODE is set). + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index d771a3872500..96e9c5c1dfc9 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -49,13 +49,15 @@ static int bpf_array_alloc_percpu(struct bpf_array *array) static struct bpf_map *array_map_alloc(union bpf_attr *attr) { bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; + int numa_node = bpf_map_attr_numa_node(attr); struct bpf_array *array; u64 array_size; u32 elem_size; /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size == 0 || attr->map_flags) + attr->value_size == 0 || attr->map_flags & ~BPF_F_NUMA_NODE || + (percpu && numa_node != NUMA_NO_NODE)) return ERR_PTR(-EINVAL); if (attr->value_size > KMALLOC_MAX_SIZE) @@ -77,7 +79,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); /* allocate all map elements and zero-initialize them */ - array = bpf_map_area_alloc(array_size); + array = bpf_map_area_alloc(array_size, numa_node); if (!array) return ERR_PTR(-ENOMEM); @@ -87,6 +89,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) array->map.value_size = attr->value_size; array->map.max_entries = attr->max_entries; array->map.map_flags = attr->map_flags; + array->map.numa_node = numa_node; array->elem_size = elem_size; if (!percpu) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 18a72a8add43..67f4f00ce33a 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -80,7 +80,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || attr->map_flags) + attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) return ERR_PTR(-EINVAL); dtab = kzalloc(sizeof(*dtab), GFP_USER); @@ -93,6 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) dtab->map.value_size = attr->value_size; dtab->map.max_entries = attr->max_entries; dtab->map.map_flags = attr->map_flags; + dtab->map.numa_node = bpf_map_attr_numa_node(attr); err = -ENOMEM; @@ -119,7 +120,8 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) goto free_dtab; dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * - sizeof(struct bpf_dtab_netdev *)); + sizeof(struct bpf_dtab_netdev *), + dtab->map.numa_node); if (!dtab->netdev_map) goto free_dtab; @@ -344,7 +346,8 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, if (!ifindex) { dev = NULL; } else { - dev = kmalloc(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN); + dev = kmalloc_node(sizeof(*dev), GFP_ATOMIC | __GFP_NOWARN, + map->numa_node); if (!dev) return -ENOMEM; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 4fb463172aa8..47ae748c3a49 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -18,6 +18,9 @@ #include "bpf_lru_list.h" #include "map_in_map.h" +#define HTAB_CREATE_FLAG_MASK \ + (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE) + struct bucket { struct hlist_nulls_head head; raw_spinlock_t lock; @@ -138,7 +141,8 @@ static int prealloc_init(struct bpf_htab *htab) if (!htab_is_percpu(htab) && !htab_is_lru(htab)) num_entries += num_possible_cpus(); - htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries); + htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, + htab->map.numa_node); if (!htab->elems) return -ENOMEM; @@ -233,6 +237,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) */ bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); + int numa_node = bpf_map_attr_numa_node(attr); struct bpf_htab *htab; int err, i; u64 cost; @@ -248,7 +253,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) */ return ERR_PTR(-EPERM); - if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU)) + if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK) /* reserved bits should not be used */ return ERR_PTR(-EINVAL); @@ -258,6 +263,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) if (lru && !prealloc) return ERR_PTR(-ENOTSUPP); + if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) + return ERR_PTR(-EINVAL); + htab = kzalloc(sizeof(*htab), GFP_USER); if (!htab) return ERR_PTR(-ENOMEM); @@ -268,6 +276,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) htab->map.value_size = attr->value_size; htab->map.max_entries = attr->max_entries; htab->map.map_flags = attr->map_flags; + htab->map.numa_node = numa_node; /* check sanity of attributes. * value_size == 0 may be allowed in the future to use map as a set @@ -346,7 +355,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) err = -ENOMEM; htab->buckets = bpf_map_area_alloc(htab->n_buckets * - sizeof(struct bucket)); + sizeof(struct bucket), + htab->map.numa_node); if (!htab->buckets) goto free_htab; @@ -689,7 +699,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, atomic_dec(&htab->count); return ERR_PTR(-E2BIG); } - l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); + l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, + htab->map.numa_node); if (!l_new) return ERR_PTR(-ENOMEM); } diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index b09185f0f17d..1b767844a76f 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -244,7 +244,8 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, if (value) size += trie->map.value_size; - node = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN); + node = kmalloc_node(size, GFP_ATOMIC | __GFP_NOWARN, + trie->map.numa_node); if (!node) return NULL; @@ -405,6 +406,8 @@ static int trie_delete_elem(struct bpf_map *map, void *key) #define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) +#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE) + static struct bpf_map *trie_alloc(union bpf_attr *attr) { struct lpm_trie *trie; @@ -416,7 +419,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || - attr->map_flags != BPF_F_NO_PREALLOC || + !(attr->map_flags & BPF_F_NO_PREALLOC) || + attr->map_flags & ~LPM_CREATE_FLAG_MASK || attr->key_size < LPM_KEY_SIZE_MIN || attr->key_size > LPM_KEY_SIZE_MAX || attr->value_size < LPM_VAL_SIZE_MIN || @@ -433,6 +437,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) trie->map.value_size = attr->value_size; trie->map.max_entries = attr->max_entries; trie->map.map_flags = attr->map_flags; + trie->map.numa_node = bpf_map_attr_numa_node(attr); trie->data_size = attr->key_size - offsetof(struct bpf_lpm_trie_key, data); trie->max_prefixlen = trie->data_size * 8; diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 39de541fbcdc..78b2bb9370ac 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -443,7 +443,9 @@ static struct smap_psock *smap_init_psock(struct sock *sock, { struct smap_psock *psock; - psock = kzalloc(sizeof(struct smap_psock), GFP_ATOMIC | __GFP_NOWARN); + psock = kzalloc_node(sizeof(struct smap_psock), + GFP_ATOMIC | __GFP_NOWARN, + stab->map.numa_node); if (!psock) return ERR_PTR(-ENOMEM); @@ -465,7 +467,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || attr->map_flags) + attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) return ERR_PTR(-EINVAL); if (attr->value_size > KMALLOC_MAX_SIZE) @@ -481,6 +483,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) stab->map.value_size = attr->value_size; stab->map.max_entries = attr->max_entries; stab->map.map_flags = attr->map_flags; + stab->map.numa_node = bpf_map_attr_numa_node(attr); /* make sure page count doesn't overflow */ cost = (u64) stab->map.max_entries * sizeof(struct sock *); @@ -495,7 +498,8 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) goto free_stab; stab->sock_map = bpf_map_area_alloc(stab->map.max_entries * - sizeof(struct sock *)); + sizeof(struct sock *), + stab->map.numa_node); if (!stab->sock_map) goto free_stab; diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 31147d730abf..135be433e9a0 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -31,7 +31,8 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; int err; - smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); + smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, + smap->map.numa_node); if (!smap->elems) return -ENOMEM; @@ -59,7 +60,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) if (!capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (attr->map_flags) + if (attr->map_flags & ~BPF_F_NUMA_NODE) return ERR_PTR(-EINVAL); /* check sanity of attributes */ @@ -75,7 +76,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) if (cost >= U32_MAX - PAGE_SIZE) return ERR_PTR(-E2BIG); - smap = bpf_map_area_alloc(cost); + smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); if (!smap) return ERR_PTR(-ENOMEM); @@ -91,6 +92,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) smap->map.map_flags = attr->map_flags; smap->n_buckets = n_buckets; smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; + smap->map.numa_node = bpf_map_attr_numa_node(attr); err = bpf_map_precharge_memlock(smap->map.pages); if (err) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b8cb1b3c9bfb..9378f3ba2cbf 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -105,7 +105,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) return map; } -void *bpf_map_area_alloc(size_t size) +void *bpf_map_area_alloc(size_t size, int numa_node) { /* We definitely need __GFP_NORETRY, so OOM killer doesn't * trigger under memory pressure as we really just want to @@ -115,12 +115,13 @@ void *bpf_map_area_alloc(size_t size) void *area; if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { - area = kmalloc(size, GFP_USER | flags); + area = kmalloc_node(size, GFP_USER | flags, numa_node); if (area != NULL) return area; } - return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL); + return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, + __builtin_return_address(0)); } void bpf_map_area_free(void *area) @@ -309,10 +310,11 @@ int bpf_map_new_fd(struct bpf_map *map) offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ sizeof(attr->CMD##_LAST_FIELD)) != NULL -#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd +#define BPF_MAP_CREATE_LAST_FIELD numa_node /* called via syscall */ static int map_create(union bpf_attr *attr) { + int numa_node = bpf_map_attr_numa_node(attr); struct bpf_map *map; int err; @@ -320,6 +322,10 @@ static int map_create(union bpf_attr *attr) if (err) return -EINVAL; + if (numa_node != NUMA_NO_NODE && + (numa_node >= nr_node_ids || !node_online(numa_node))) + return -EINVAL; + /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ map = find_and_alloc_map(attr); if (IS_ERR(map)) -- cgit v1.2.3-55-g7522 From ad17d0e6c708805bf9e6686eb747cc528b702e67 Mon Sep 17 00:00:00 2001 From: Martin KaFai Lau Date: Fri, 18 Aug 2017 11:28:01 -0700 Subject: bpf: Allow numa selection in INNER_LRU_HASH_PREALLOC test of map_perf_test This patch makes the needed changes to allow each process of the INNER_LRU_HASH_PREALLOC test to provide its numa node id when creating the lru map. Signed-off-by: Martin KaFai Lau Acked-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- samples/bpf/bpf_load.c | 21 ++++++++++++-------- samples/bpf/bpf_load.h | 1 + samples/bpf/map_perf_test_kern.c | 2 ++ samples/bpf/map_perf_test_user.c | 12 +++++++++--- tools/include/uapi/linux/bpf.h | 10 +++++++++- tools/lib/bpf/bpf.c | 32 +++++++++++++++++++++++++++---- tools/lib/bpf/bpf.h | 6 ++++++ tools/testing/selftests/bpf/bpf_helpers.h | 1 + 8 files changed, 69 insertions(+), 16 deletions(-) diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index a8552b8a2ab6..6aa50098dfb8 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -201,7 +201,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) static int load_maps(struct bpf_map_data *maps, int nr_maps, fixup_map_cb fixup_map) { - int i; + int i, numa_node; for (i = 0; i < nr_maps; i++) { if (fixup_map) { @@ -213,21 +213,26 @@ static int load_maps(struct bpf_map_data *maps, int nr_maps, } } + numa_node = maps[i].def.map_flags & BPF_F_NUMA_NODE ? + maps[i].def.numa_node : -1; + if (maps[i].def.type == BPF_MAP_TYPE_ARRAY_OF_MAPS || maps[i].def.type == BPF_MAP_TYPE_HASH_OF_MAPS) { int inner_map_fd = map_fd[maps[i].def.inner_map_idx]; - map_fd[i] = bpf_create_map_in_map(maps[i].def.type, + map_fd[i] = bpf_create_map_in_map_node(maps[i].def.type, maps[i].def.key_size, inner_map_fd, maps[i].def.max_entries, - maps[i].def.map_flags); + maps[i].def.map_flags, + numa_node); } else { - map_fd[i] = bpf_create_map(maps[i].def.type, - maps[i].def.key_size, - maps[i].def.value_size, - maps[i].def.max_entries, - maps[i].def.map_flags); + map_fd[i] = bpf_create_map_node(maps[i].def.type, + maps[i].def.key_size, + maps[i].def.value_size, + maps[i].def.max_entries, + maps[i].def.map_flags, + numa_node); } if (map_fd[i] < 0) { printf("failed to create a map: %d %s\n", diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h index ca0563d04744..453e3226b4ce 100644 --- a/samples/bpf/bpf_load.h +++ b/samples/bpf/bpf_load.h @@ -13,6 +13,7 @@ struct bpf_map_def { unsigned int max_entries; unsigned int map_flags; unsigned int inner_map_idx; + unsigned int numa_node; }; struct bpf_map_data { diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index 245165817fbe..ca3b22ed577a 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -40,6 +40,8 @@ struct bpf_map_def SEC("maps") inner_lru_hash_map = { .key_size = sizeof(u32), .value_size = sizeof(long), .max_entries = MAX_ENTRIES, + .map_flags = BPF_F_NUMA_NODE, + .numa_node = 0, }; struct bpf_map_def SEC("maps") array_of_lru_hashs = { diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c index 1a8894b5ac51..bccbf8478e43 100644 --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -97,14 +97,20 @@ static void do_test_lru(enum test_type test, int cpu) if (test == INNER_LRU_HASH_PREALLOC) { int outer_fd = map_fd[array_of_lru_hashs_idx]; + unsigned int mycpu, mynode; assert(cpu < MAX_NR_CPUS); if (cpu) { + ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL); + assert(!ret); + inner_lru_map_fds[cpu] = - bpf_create_map(BPF_MAP_TYPE_LRU_HASH, - sizeof(uint32_t), sizeof(long), - inner_lru_hash_size, 0); + bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH, + sizeof(uint32_t), + sizeof(long), + inner_lru_hash_size, 0, + mynode); if (inner_lru_map_fds[cpu] == -1) { printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n", strerror(errno), errno); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 2d97dd27c8f6..f8f6377fd541 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -168,6 +168,7 @@ enum bpf_sockmap_flags { #define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_EXIST 2 /* update existing element */ +/* flags for BPF_MAP_CREATE command */ #define BPF_F_NO_PREALLOC (1U << 0) /* Instead of having one common LRU list in the * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list @@ -176,6 +177,8 @@ enum bpf_sockmap_flags { * across different LRU lists. */ #define BPF_F_NO_COMMON_LRU (1U << 1) +/* Specify numa node during map creation */ +#define BPF_F_NUMA_NODE (1U << 2) union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ @@ -183,8 +186,13 @@ union bpf_attr { __u32 key_size; /* size of key in bytes */ __u32 value_size; /* size of value in bytes */ __u32 max_entries; /* max number of entries in a map */ - __u32 map_flags; /* prealloc or not */ + __u32 map_flags; /* BPF_MAP_CREATE related + * flags defined above. + */ __u32 inner_map_fd; /* fd pointing to the inner map */ + __u32 numa_node; /* numa node (effective only if + * BPF_F_NUMA_NODE is set). + */ }; struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 77660157a684..a0717610b116 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -57,8 +57,9 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr, return syscall(__NR_bpf, cmd, attr, size); } -int bpf_create_map(enum bpf_map_type map_type, int key_size, - int value_size, int max_entries, __u32 map_flags) +int bpf_create_map_node(enum bpf_map_type map_type, int key_size, + int value_size, int max_entries, __u32 map_flags, + int node) { union bpf_attr attr; @@ -69,12 +70,24 @@ int bpf_create_map(enum bpf_map_type map_type, int key_size, attr.value_size = value_size; attr.max_entries = max_entries; attr.map_flags = map_flags; + if (node >= 0) { + attr.map_flags |= BPF_F_NUMA_NODE; + attr.numa_node = node; + } return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } -int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, - int inner_map_fd, int max_entries, __u32 map_flags) +int bpf_create_map(enum bpf_map_type map_type, int key_size, + int value_size, int max_entries, __u32 map_flags) +{ + return bpf_create_map_node(map_type, key_size, value_size, + max_entries, map_flags, -1); +} + +int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, + int inner_map_fd, int max_entries, + __u32 map_flags, int node) { union bpf_attr attr; @@ -86,10 +99,21 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, attr.inner_map_fd = inner_map_fd; attr.max_entries = max_entries; attr.map_flags = map_flags; + if (node >= 0) { + attr.map_flags |= BPF_F_NUMA_NODE; + attr.numa_node = node; + } return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); } +int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, + int inner_map_fd, int max_entries, __u32 map_flags) +{ + return bpf_create_map_in_map_node(map_type, key_size, inner_map_fd, + max_entries, map_flags, -1); +} + int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns, size_t insns_cnt, const char *license, __u32 kern_version, char *log_buf, size_t log_buf_sz) diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index eaee585c1cea..90e9d4e85d08 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -24,8 +24,14 @@ #include #include +int bpf_create_map_node(enum bpf_map_type map_type, int key_size, + int value_size, int max_entries, __u32 map_flags, + int node); int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries, __u32 map_flags); +int bpf_create_map_in_map_node(enum bpf_map_type map_type, int key_size, + int inner_map_fd, int max_entries, + __u32 map_flags, int node); int bpf_create_map_in_map(enum bpf_map_type map_type, int key_size, int inner_map_fd, int max_entries, __u32 map_flags); diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 73092d4a898e..98f3be26d390 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h @@ -94,6 +94,7 @@ struct bpf_map_def { unsigned int max_entries; unsigned int map_flags; unsigned int inner_map_idx; + unsigned int numa_node; }; static int (*bpf_skb_load_bytes)(void *ctx, int off, void *to, int len) = -- cgit v1.2.3-55-g7522 From 89c63074c2bc25874e4e72406ff15a9a8e3df750 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 19 Aug 2017 03:12:45 +0200 Subject: bpf: make htab inlining more robust wrt assumptions Commit 9015d2f59535 ("bpf: inline htab_map_lookup_elem()") was making the assumption that a direct call emission to the function __htab_map_lookup_elem() will always work out for JITs. This is currently true since all JITs we have are for 64 bit archs, but in case of 32 bit JITs like upcoming arm32, we get a NULL pointer dereference when executing the call to __htab_map_lookup_elem() since passed arguments are of a different size (due to pointer args) than what we do out of BPF. Guard and thus limit this for now for the current 64 bit JITs only. Reported-by: Shubham Bansal Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4f6e7eb42ba0..e42c096ba20d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4160,7 +4160,11 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) continue; } - if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) { + /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup + * handlers are currently limited to 64 bit only. + */ + if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && + insn->imm == BPF_FUNC_map_lookup_elem) { map_ptr = env->insn_aux_data[i + delta].map_ptr; if (map_ptr == BPF_MAP_PTR_POISON || !map_ptr->ops->map_gen_lookup) -- cgit v1.2.3-55-g7522 From 7b0c2a0508b90fce79d3782b2e55d0e8bf6a283e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 19 Aug 2017 03:12:46 +0200 Subject: bpf: inline map in map lookup functions for array and htab Avoid two successive functions calls for the map in map lookup, first is the bpf_map_lookup_elem() helper call, and second the callback via map->ops->map_lookup_elem() to get to the map in map implementation. Implementation inlines array and htab flavor for map in map lookups. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/arraymap.c | 26 ++++++++++++++++++++++++++ kernel/bpf/hashtab.c | 17 +++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 96e9c5c1dfc9..98c0f00c3f5e 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -606,6 +606,31 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) return READ_ONCE(*inner_map); } +static u32 array_of_map_gen_lookup(struct bpf_map *map, + struct bpf_insn *insn_buf) +{ + u32 elem_size = round_up(map->value_size, 8); + struct bpf_insn *insn = insn_buf; + const int ret = BPF_REG_0; + const int map_ptr = BPF_REG_1; + const int index = BPF_REG_2; + + *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); + *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); + if (is_power_of_2(elem_size)) + *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); + else + *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); + *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); + *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); + *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *insn++ = BPF_MOV64_IMM(ret, 0); + + return insn - insn_buf; +} + const struct bpf_map_ops array_of_maps_map_ops = { .map_alloc = array_of_map_alloc, .map_free = array_of_map_free, @@ -615,4 +640,5 @@ const struct bpf_map_ops array_of_maps_map_ops = { .map_fd_get_ptr = bpf_map_fd_get_ptr, .map_fd_put_ptr = bpf_map_fd_put_ptr, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, + .map_gen_lookup = array_of_map_gen_lookup, }; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 47ae748c3a49..ae822de4a90a 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1322,6 +1322,22 @@ static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) return READ_ONCE(*inner_map); } +static u32 htab_of_map_gen_lookup(struct bpf_map *map, + struct bpf_insn *insn_buf) +{ + struct bpf_insn *insn = insn_buf; + const int ret = BPF_REG_0; + + *insn++ = BPF_EMIT_CALL((u64 (*)(u64, u64, u64, u64, u64))__htab_map_lookup_elem); + *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); + *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, + offsetof(struct htab_elem, key) + + round_up(map->key_size, 8)); + *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); + + return insn - insn_buf; +} + static void htab_of_map_free(struct bpf_map *map) { bpf_map_meta_free(map->inner_map_meta); @@ -1337,4 +1353,5 @@ const struct bpf_map_ops htab_of_maps_map_ops = { .map_fd_get_ptr = bpf_map_fd_get_ptr, .map_fd_put_ptr = bpf_map_fd_put_ptr, .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, + .map_gen_lookup = htab_of_map_gen_lookup, }; -- cgit v1.2.3-55-g7522 From d6e1e46f69fbe956e877cdd00dbfb002baddf577 Mon Sep 17 00:00:00 2001 From: David S. Miller Date: Sat, 19 Aug 2017 23:34:03 -0700 Subject: bpf: linux/bpf.h needs linux/numa.h Reported-by: kbuild test robot Signed-off-by: David S. Miller --- include/linux/bpf.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 55b88e329804..830f472d8df5 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -14,6 +14,7 @@ #include #include #include +#include struct perf_event; struct bpf_prog; -- cgit v1.2.3-55-g7522 From 63bfd399de55e09369e5ab344d1fb7f86ed73bab Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Sun, 5 Feb 2017 17:57:40 +0200 Subject: net/mlx5e: Send PAOS command on interface up/down Upon interface up/down, driver will send PAOS (Ports Administrative and Operational Status Register) in order to inform the Firmware on the desired status of the port by the driver. Since now we might change physical link status on mlx5e_open/close, logical VF representor should not use mlx5e_open/close ndos as is, and should call the logical version mlx5e_open/closed_locked. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 +++++++ drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 20 +++++++++++++------- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 2fc3832bc2f3..7c512a4c6d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -2682,6 +2682,8 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); + if (!err) + mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); return err; @@ -2716,6 +2718,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); + mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -4187,6 +4190,10 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_init_l2_addr(priv); + /* Marking the link as currently not needed by the Driver */ + if (!netif_running(netdev)) + mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7a9f53f74976..45c088c10ee1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -613,15 +613,18 @@ static int mlx5e_rep_open(struct net_device *dev) struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; int err; - err = mlx5e_open(dev); + mutex_lock(&priv->state_lock); + err = mlx5e_open_locked(dev); if (err) - return err; + goto unlock; - err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP); - if (!err) + if (!mlx5_eswitch_set_vport_state(esw, rep->vport, + MLX5_ESW_VPORT_ADMIN_STATE_UP)) netif_carrier_on(dev); - return 0; +unlock: + mutex_unlock(&priv->state_lock); + return err; } static int mlx5e_rep_close(struct net_device *dev) @@ -630,10 +633,13 @@ static int mlx5e_rep_close(struct net_device *dev) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + int ret; + mutex_lock(&priv->state_lock); (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN); - - return mlx5e_close(dev); + ret = mlx5e_close_locked(dev); + mutex_unlock(&priv->state_lock); + return ret; } static int mlx5e_rep_get_phys_port_name(struct net_device *dev, -- cgit v1.2.3-55-g7522 From 1d1c3436119a6fb92adb8ae72d9a1bcb7f5d0d6a Mon Sep 17 00:00:00 2001 From: Feras Daoud Date: Mon, 10 Jul 2017 18:35:06 +0300 Subject: net/mlx5e: IPoIB, Fix driver name retrieved by ethtool Printing an enhanced IPoIB device information using "ethtool -i DEVNAME", prints the low level driver name: mlx5_core. This commit changes the name to mlx5_core [ib_ipoib], to include the ipoib device driver infromation. Fixes: 076b0936e5fb ("net/mlx5e: IPoIB, Add ethtool support") Signed-off-by: Feras Daoud Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index eb04e97d8765..b080fabfe8de 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -39,6 +39,8 @@ static void mlx5i_get_drvinfo(struct net_device *dev, struct mlx5e_priv *priv = mlx5i_epriv(dev); mlx5e_ethtool_get_drvinfo(priv, drvinfo); + strlcpy(drvinfo->driver, DRIVER_NAME "[ib_ipoib]", + sizeof(drvinfo->driver)); } static void mlx5i_get_strings(struct net_device *dev, -- cgit v1.2.3-55-g7522 From eb234ee9d5413368c8bef5f4061cd76da0b5dd55 Mon Sep 17 00:00:00 2001 From: Shalom Lagziel Date: Sun, 28 May 2017 16:40:24 +0300 Subject: net/mlx5e: IPoIB, Add support for get_link_ksettings in ethtool Add support for "ethtool DEVNAME" over ipoib ports, Display standard port information for IPoIB netdevices using ethtool For example: $ ethtool ib2 > Settings for ib2: Supported ports: [ ] Supported link modes: Not reported Supported pause frame use: No Supports auto-negotiation: No Advertised link modes: Not reported Advertised pause frame use: No Advertised auto-negotiation: No Speed: 100000Mb/s Duplex: Full Port: Other PHYAD: 0 Transceiver: internal Auto-negotiation: off Link detected: yes Signed-off-by: Shalom Lagziel Signed-off-by: Saeed Mahameed --- .../ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 130 +++++++++++++++++++-- 1 file changed, 118 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index b080fabfe8de..dd49a59854e5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -131,17 +131,123 @@ static int mlx5i_flash_device(struct net_device *netdev, return mlx5e_ethtool_flash_device(priv, flash); } +enum mlx5_ptys_width { + MLX5_PTYS_WIDTH_1X = 1 << 0, + MLX5_PTYS_WIDTH_2X = 1 << 1, + MLX5_PTYS_WIDTH_4X = 1 << 2, + MLX5_PTYS_WIDTH_8X = 1 << 3, + MLX5_PTYS_WIDTH_12X = 1 << 4, +}; + +static inline int mlx5_ptys_width_enum_to_int(enum mlx5_ptys_width width) +{ + switch (width) { + case MLX5_PTYS_WIDTH_1X: return 1; + case MLX5_PTYS_WIDTH_2X: return 2; + case MLX5_PTYS_WIDTH_4X: return 4; + case MLX5_PTYS_WIDTH_8X: return 8; + case MLX5_PTYS_WIDTH_12X: return 12; + default: return -1; + } +} + +enum mlx5_ptys_rate { + MLX5_PTYS_RATE_SDR = 1 << 0, + MLX5_PTYS_RATE_DDR = 1 << 1, + MLX5_PTYS_RATE_QDR = 1 << 2, + MLX5_PTYS_RATE_FDR10 = 1 << 3, + MLX5_PTYS_RATE_FDR = 1 << 4, + MLX5_PTYS_RATE_EDR = 1 << 5, + MLX5_PTYS_RATE_HDR = 1 << 6, +}; + +static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate) +{ + switch (rate) { + case MLX5_PTYS_RATE_SDR: return 2500; + case MLX5_PTYS_RATE_DDR: return 5000; + case MLX5_PTYS_RATE_QDR: + case MLX5_PTYS_RATE_FDR10: return 10000; + case MLX5_PTYS_RATE_FDR: return 14000; + case MLX5_PTYS_RATE_EDR: return 25000; + case MLX5_PTYS_RATE_HDR: return 50000; + default: return -1; + } +} + +static int mlx5i_get_port_settings(struct net_device *netdev, + u16 *ib_link_width_oper, u16 *ib_proto_oper) +{ + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; + int ret; + + ret = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_IB, 1); + if (ret) + return ret; + + *ib_link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); + *ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); + + return 0; +} + +static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) +{ + int rate, width; + + rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper); + if (rate < 0) + return -EINVAL; + width = mlx5_ptys_width_enum_to_int(ib_link_width_oper); + if (width < 0) + return -EINVAL; + + return rate * width; +} + +static int mlx5i_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) +{ + u16 ib_link_width_oper; + u16 ib_proto_oper; + int speed, ret; + + ret = mlx5i_get_port_settings(netdev, &ib_link_width_oper, &ib_proto_oper); + if (ret) + return ret; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + + speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper); + if (speed < 0) + return -EINVAL; + + link_ksettings->base.duplex = DUPLEX_FULL; + link_ksettings->base.port = PORT_OTHER; + + link_ksettings->base.autoneg = AUTONEG_DISABLE; + + link_ksettings->base.speed = speed; + + return 0; +} + const struct ethtool_ops mlx5i_ethtool_ops = { - .get_drvinfo = mlx5i_get_drvinfo, - .get_strings = mlx5i_get_strings, - .get_sset_count = mlx5i_get_sset_count, - .get_ethtool_stats = mlx5i_get_ethtool_stats, - .get_ringparam = mlx5i_get_ringparam, - .set_ringparam = mlx5i_set_ringparam, - .flash_device = mlx5i_flash_device, - .get_channels = mlx5i_get_channels, - .set_channels = mlx5i_set_channels, - .get_coalesce = mlx5i_get_coalesce, - .set_coalesce = mlx5i_set_coalesce, - .get_ts_info = mlx5i_get_ts_info, + .get_drvinfo = mlx5i_get_drvinfo, + .get_strings = mlx5i_get_strings, + .get_sset_count = mlx5i_get_sset_count, + .get_ethtool_stats = mlx5i_get_ethtool_stats, + .get_ringparam = mlx5i_get_ringparam, + .set_ringparam = mlx5i_set_ringparam, + .flash_device = mlx5i_flash_device, + .get_channels = mlx5i_get_channels, + .set_channels = mlx5i_set_channels, + .get_coalesce = mlx5i_get_coalesce, + .set_coalesce = mlx5i_set_coalesce, + .get_ts_info = mlx5i_get_ts_info, + .get_link_ksettings = mlx5i_get_link_ksettings, + .get_link = ethtool_op_get_link, }; -- cgit v1.2.3-55-g7522 From 5405fa26c25e18ab735daddc46c8a7a78f138f70 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 15 Jun 2017 18:29:23 +0300 Subject: net/mlx5: Add PCIe outbound stalls counters infrastructure Add capability bit in MCAM register and counters to MPCNT register. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c99daffc3c3c..ba533b39c885 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1854,7 +1854,17 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { u8 crc_error_tlp[0x20]; - u8 reserved_at_140[0x680]; + u8 reserved_at_140[0x40]; + + u8 outbound_stalled_reads[0x20]; + + u8 outbound_stalled_writes[0x20]; + + u8 outbound_stalled_reads_events[0x20]; + + u8 outbound_stalled_writes_events[0x20]; + + u8 reserved_at_200[0x5c0]; }; struct mlx5_ifc_cmd_inter_comp_event_bits { @@ -7744,8 +7754,9 @@ struct mlx5_ifc_pcam_reg_bits { }; struct mlx5_ifc_mcam_enhanced_features_bits { - u8 reserved_at_0[0x7d]; - + u8 reserved_at_0[0x7b]; + u8 pcie_outbound_stalled[0x1]; + u8 reserved_at_7c[0x1]; u8 mtpps_enh_out_per_adj[0x1]; u8 mtpps_fs[0x1]; u8 pcie_performance_group[0x1]; -- cgit v1.2.3-55-g7522 From 73e90646a275aeffea263305c8662f8dd0cb41ef Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Thu, 15 Jun 2017 18:29:32 +0300 Subject: net/mlx5e: Add PCIe outbound stalls counters outbound_pci_stalled_rd - The percentage of time within the last second that the NIC had outbound non-posted read requests but could not perform the operation due to insufficient non-posted credits. outbound_pci_stalled_wr - The percentage of time within the last second that the NIC had outbound posted writes requests but could not perform the operation due to insufficient posted credits. outbound_pci_stalled_rd_events - The number of events where outbound_pci_stalled_rd was above the threshold. outbound_pci_stalled_wr_events - The number of events where outbound_pci_stalled_wr was above the threshold. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 13 ++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 917fade5f5d5..07202f7322fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -246,6 +246,10 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pcie_perf_stats_desc[i].format); + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stall_stats_desc[i].format); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) sprintf(data + (idx++) * ETH_GSTRING_LEN, @@ -377,6 +381,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, pcie_perf_stats_desc, i); + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stall_stats_desc, i); + for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index e65517eafc58..bdddddc46170 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -305,6 +305,13 @@ static const struct counter_desc pcie_perf_stats_desc[] = { { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, }; +static const struct counter_desc pcie_perf_stall_stats_desc[] = { + { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, + { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, + { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) }, + { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) }, +}; + struct mlx5e_rq_stats { u64 packets; u64 bytes; @@ -397,6 +404,9 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PCIE_PERF_COUNTERS(priv) \ (ARRAY_SIZE(pcie_perf_stats_desc) * \ MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) +#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \ + (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ @@ -407,7 +417,8 @@ static const struct counter_desc sq_stats_desc[] = { NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ NUM_PPORT_PRIO) -#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv) +#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ + NUM_PCIE_PERF_STALL_COUNTERS(priv)) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) -- cgit v1.2.3-55-g7522 From 2dba07975acbc30c77a22d38030ee5a86ab8a748 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 18 Jun 2017 14:56:45 +0300 Subject: net/mlx5: Add RX buffer fullness counters infrastructure Add capability bit in PCAM register and counters to PPCNT register. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ba533b39c885..cf7ff52c594e 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1538,7 +1538,17 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits { u8 port_transmit_wait_low[0x20]; - u8 reserved_at_40[0x780]; + u8 reserved_at_40[0x100]; + + u8 rx_buffer_almost_full_high[0x20]; + + u8 rx_buffer_almost_full_low[0x20]; + + u8 rx_buffer_full_high[0x20]; + + u8 rx_buffer_full_low[0x20]; + + u8 reserved_at_1c0[0x600]; }; struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { @@ -7723,8 +7733,9 @@ struct mlx5_ifc_peir_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x7c]; + u8 reserved_at_0[0x7b]; + u8 rx_buffer_fullness_counters[0x1]; u8 ptys_connector_type[0x1]; u8 reserved_at_7d[0x1]; u8 ppcnt_discard_group[0x1]; -- cgit v1.2.3-55-g7522 From 068aef33bebcb1962720e44525c8d4aff6c2ee3d Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 18 Jun 2017 14:56:57 +0300 Subject: net/mlx5e: Add RX buffer fullness counters rx_buffer_passed_thres_phy - The number of events where the port RX buffer has passed a fullness threshold. rx_buffer_full_phy - The number of events where the port RX buffer has reached 100% fullness. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 ++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 6 ++++++ drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 17 ++++++++++++++++- 3 files changed, 30 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 07202f7322fc..8c013a521319 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -242,6 +242,10 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_phy_statistical_stats_desc[i].format); + for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pport_eth_ext_stats_desc[i].format); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, pcie_perf_stats_desc[i].format); @@ -377,6 +381,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters, pport_phy_statistical_stats_desc, i); + for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters, + pport_eth_ext_stats_desc, i); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++) data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, pcie_perf_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 7c512a4c6d5c..fdc2b92f020b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -288,6 +288,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } + if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) { + out = pstats->eth_ext_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index bdddddc46170..be49df4bedd9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -216,6 +216,12 @@ static const struct counter_desc vport_stats_desc[] = { MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \ counter_set.eth_per_prio_grp_data_layout.c##_high) #define NUM_PPORT_PRIO 8 +#define PPORT_ETH_EXT_OFF(c) \ + MLX5_BYTE_OFF(ppcnt_reg, \ + counter_set.eth_extended_cntrs_grp_data_layout.c##_high) +#define PPORT_ETH_EXT_GET(pstats, c) \ + MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \ + counter_set.eth_extended_cntrs_grp_data_layout.c##_high) struct mlx5e_pport_stats { __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; @@ -224,6 +230,7 @@ struct mlx5e_pport_stats { __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; + __be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)]; }; static const struct counter_desc pport_802_3_stats_desc[] = { @@ -290,6 +297,10 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, }; +static const struct counter_desc pport_eth_ext_stats_desc[] = { + { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) }, +}; + #define PCIE_PERF_OFF(c) \ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) #define PCIE_PERF_GET(pcie_stats, c) \ @@ -411,12 +422,16 @@ static const struct counter_desc sq_stats_desc[] = { ARRAY_SIZE(pport_per_prio_traffic_stats_desc) #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ARRAY_SIZE(pport_per_prio_pfc_stats_desc) +#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \ + (ARRAY_SIZE(pport_eth_ext_stats_desc) * \ + MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters)) #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \ NUM_PPORT_2863_COUNTERS + \ NUM_PPORT_2819_COUNTERS + \ NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ - NUM_PPORT_PRIO) + NUM_PPORT_PRIO + \ + NUM_PPORT_ETH_EXT_COUNTERS(priv)) #define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ NUM_PCIE_PERF_STALL_COUNTERS(priv)) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) -- cgit v1.2.3-55-g7522 From efae7f78c45ba37bdc23a95d219b59ac85bdd0a7 Mon Sep 17 00:00:00 2001 From: Eran Ben Elisha Date: Fri, 12 May 2017 02:47:02 +0300 Subject: net/mlx5e: Add outbound PCI buffer overflow counter Add outbound_pci_buffer_overflow to ethtool output for monitoring the number of packets that were dropped due to lack of PCIe buffers on receive path from NIC port toward the host(s). This counter is valid only in case that tx_overflow_buffer_pkt is supported in MCAM enhanced features. Signed-off-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 12 ++++++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 14 ++++++++++++++ include/linux/mlx5/mlx5_ifc.h | 6 ++++-- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 8c013a521319..d453a11f41fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -250,9 +250,13 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) strcpy(data + (idx++) * ETH_GSTRING_LEN, pcie_perf_stats_desc[i].format); - for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) + for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++) strcpy(data + (idx++) * ETH_GSTRING_LEN, - pcie_perf_stall_stats_desc[i].format); + pcie_perf_stats_desc64[i].format); + + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) + strcpy(data + (idx++) * ETH_GSTRING_LEN, + pcie_perf_stall_stats_desc[i].format); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) @@ -389,6 +393,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, pcie_perf_stats_desc, i); + for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++) + data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters, + pcie_perf_stats_desc64, i); + for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++) data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, pcie_perf_stall_stats_desc, i); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index be49df4bedd9..40b5c73e5e26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -307,6 +307,12 @@ static const struct counter_desc pport_eth_ext_stats_desc[] = { MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ counter_set.pcie_perf_cntrs_grp_data_layout.c) +#define PCIE_PERF_OFF64(c) \ + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) +#define PCIE_PERF_GET64(pcie_stats, c) \ + MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \ + counter_set.pcie_perf_cntrs_grp_data_layout.c##_high) + struct mlx5e_pcie_stats { __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; }; @@ -316,6 +322,10 @@ static const struct counter_desc pcie_perf_stats_desc[] = { { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, }; +static const struct counter_desc pcie_perf_stats_desc64[] = { + { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) }, +}; + static const struct counter_desc pcie_perf_stall_stats_desc[] = { { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) }, { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) }, @@ -415,6 +425,9 @@ static const struct counter_desc sq_stats_desc[] = { #define NUM_PCIE_PERF_COUNTERS(priv) \ (ARRAY_SIZE(pcie_perf_stats_desc) * \ MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group)) +#define NUM_PCIE_PERF_COUNTERS64(priv) \ + (ARRAY_SIZE(pcie_perf_stats_desc64) * \ + MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt)) #define NUM_PCIE_PERF_STALL_COUNTERS(priv) \ (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \ MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled)) @@ -433,6 +446,7 @@ static const struct counter_desc sq_stats_desc[] = { NUM_PPORT_PRIO + \ NUM_PPORT_ETH_EXT_COUNTERS(priv)) #define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \ + NUM_PCIE_PERF_COUNTERS64(priv) +\ NUM_PCIE_PERF_STALL_COUNTERS(priv)) #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cf7ff52c594e..ae7d09b9c52f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1864,7 +1864,9 @@ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { u8 crc_error_tlp[0x20]; - u8 reserved_at_140[0x40]; + u8 tx_overflow_buffer_pkt_high[0x20]; + + u8 tx_overflow_buffer_pkt_low[0x20]; u8 outbound_stalled_reads[0x20]; @@ -7767,7 +7769,7 @@ struct mlx5_ifc_pcam_reg_bits { struct mlx5_ifc_mcam_enhanced_features_bits { u8 reserved_at_0[0x7b]; u8 pcie_outbound_stalled[0x1]; - u8 reserved_at_7c[0x1]; + u8 tx_overflow_buffer_pkt[0x1]; u8 mtpps_enh_out_per_adj[0x1]; u8 mtpps_fs[0x1]; u8 pcie_performance_group[0x1]; -- cgit v1.2.3-55-g7522 From 733d6c5149b0fb8628bb0db618edd9bf49b496e6 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 11 Jul 2017 15:44:04 +0300 Subject: net/mlx5: Avoid blank lines after/before open/close brace To fix these checkpatch complaints: CHECK: Blank lines aren't necessary after an open brace '{' CHECK: Blank lines aren't necessary before a close brace '}' Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index d453a11f41fe..a75ac4d11c5b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -176,7 +176,6 @@ static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { - switch (sset) { case ETH_SS_STATS: return NUM_SW_COUNTERS + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 5e7ffc9fad78..55b07c5ecd12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -71,7 +71,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) sriov->vfs_ctx[vf].enabled = 1; sriov->enabled_vfs++; mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); - } return 0; -- cgit v1.2.3-55-g7522 From ad5b39a95c8339d069be46f13562a0b6a20d5185 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 11 Jul 2017 15:53:29 +0300 Subject: net/mlx5: Add a blank line after declarations To fix these checkpatch complaints: WARNING: Missing a blank line after declarations Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/alloc.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/eq.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c index 3c95f7f53802..47239bf7bf43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c @@ -258,6 +258,7 @@ EXPORT_SYMBOL_GPL(mlx5_db_alloc); void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db) { u32 db_per_page = PAGE_SIZE / cache_line_size(); + mutex_lock(&dev->priv.pgdir_mutex); __set_bit(db->index, db->u.pgdir->bitmap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index de704ff5619a..a08027b8f3ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -188,6 +188,7 @@ static enum mlx5_dev_event port_subtype_event(u8 subtype) static void eq_update_ci(struct mlx5_eq *eq, int arm) { __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); + u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); __raw_writel((__force u32)cpu_to_be32(val), addr); /* We still want ordering, just not swabbing, so add a barrier */ -- cgit v1.2.3-55-g7522 From 61bf2125656259dcd0a34b14839f2b157a16bde9 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 11 Jul 2017 15:36:11 +0300 Subject: net/mlx5e: Properly indent within conditional statements To fix these checkpatch complaints: WARNING: suspect code indent for conditional statements (8, 24) + if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) [...] + return PORT_FIBRE; Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 31 ++++++++++++---------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a75ac4d11c5b..1f3d87e28618 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -987,24 +987,27 @@ static u8 get_connector_port(u32 eth_proto, u8 connector_type) if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) return ptys2connector_type[connector_type]; - if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) - | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) - | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) - | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { - return PORT_FIBRE; + if (eth_proto & + (MLX5E_PROT_MASK(MLX5E_10GBASE_SR) | + MLX5E_PROT_MASK(MLX5E_40GBASE_SR4) | + MLX5E_PROT_MASK(MLX5E_100GBASE_SR4) | + MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) { + return PORT_FIBRE; } - if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) - | MLX5E_PROT_MASK(MLX5E_10GBASE_CR) - | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { - return PORT_DA; + if (eth_proto & + (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) | + MLX5E_PROT_MASK(MLX5E_10GBASE_CR) | + MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) { + return PORT_DA; } - if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) - | MLX5E_PROT_MASK(MLX5E_10GBASE_KR) - | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) - | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { - return PORT_NONE; + if (eth_proto & + (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4) | + MLX5E_PROT_MASK(MLX5E_10GBASE_KR) | + MLX5E_PROT_MASK(MLX5E_40GBASE_KR4) | + MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) { + return PORT_NONE; } return PORT_OTHER; -- cgit v1.2.3-55-g7522 From 12148f5ab024f1c899e545b8da0d47d3dc9b80fb Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 11 Jul 2017 15:40:30 +0300 Subject: net/mlx5e: Avoid using multiple blank lines To fix these checkpatch complaints: CHECK: Please don't use multiple blank lines Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 1 - drivers/net/ethernet/mellanox/mlx5/core/main.c | 1 - 2 files changed, 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 31cbe5e86a01..0ef68a7c051e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -802,7 +802,6 @@ static void cmd_work_handler(struct work_struct *work) bool poll_cmd = ent->polling; int alloc_ret; - sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; down(sem); if (!ent->page_queue) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 7e6e24398926..514c22d21729 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -836,7 +836,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) return -EOPNOTSUPP; } - static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) { struct pci_dev *pdev = dev->pdev; -- cgit v1.2.3-55-g7522 From 1afdb7718fde9bcf59af85e861fa666b9476d927 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Tue, 11 Jul 2017 16:10:50 +0300 Subject: net/mlx5e: Place constants on the right side of comparisons To fix these checkpatch complaints: WARNING: Comparisons should place the constant on the right side of the test Signed-off-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8e224bcbc6a6..55a6786d3c4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -509,8 +509,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u16 tot_len; u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe); - int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) || - (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type)); + int tcp_ack = ((l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) || + (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA)); skb->mac_len = ETH_HLEN; proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth); -- cgit v1.2.3-55-g7522 From c045deef64f389f40f135c71b2ded3405c989fba Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 28 Jun 2017 14:03:36 +0300 Subject: net/mlx5e: Use kernel types instead of uint*_t in ethtool callbacks Fix checkpatch errors: CHECK:PREFER_KERNEL_TYPES: Prefer kernel type 'u32' over 'uint32_t' Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 8 +++----- drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 3 +-- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 1f3d87e28618..c30cf6b4736f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -206,7 +206,7 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset) return mlx5e_ethtool_get_sset_count(priv, sset); } -static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) +static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data) { int i, j, tc, prio, idx = 0; unsigned long pfc_combined; @@ -308,8 +308,7 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data) priv->channel_tc2txq[i][tc]); } -void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, - uint32_t stringset, uint8_t *data) +void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) { int i; @@ -331,8 +330,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, } } -static void mlx5e_get_strings(struct net_device *dev, - uint32_t stringset, uint8_t *data) +static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct mlx5e_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index dd49a59854e5..43c126c63955 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -43,8 +43,7 @@ static void mlx5i_get_drvinfo(struct net_device *dev, sizeof(drvinfo->driver)); } -static void mlx5i_get_strings(struct net_device *dev, - uint32_t stringset, uint8_t *data) +static void mlx5i_get_strings(struct net_device *dev, u32 stringset, u8 *data) { struct mlx5e_priv *priv = mlx5i_epriv(dev); -- cgit v1.2.3-55-g7522 From 9da5106c5656fdd7626af8abc09677364055f2c9 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 2 Jul 2017 18:26:21 +0300 Subject: net/mlx5e: Use size_t to store byte offset in statistics descriptors The byte offset of counter descriptors should be stored in size_t variable instead of an integer. Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 40b5c73e5e26..6761796e803c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -47,7 +47,7 @@ struct counter_desc { char format[ETH_GSTRING_LEN]; - int offset; /* Byte offset */ + size_t offset; /* Byte offset */ }; struct mlx5e_sw_stats { -- cgit v1.2.3-55-g7522 From f00c2987bfdde69c6eb27b5547d222beb5bc7552 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 20 Aug 2017 18:25:02 +0200 Subject: ieee802154: ca8210: Fix a potential NULL pointer dereference 'spi' is known to be NULL, so we dereference a NULL pointer here. Use 'pr_crit()' instead of 'dev_crit()' to report the message. Signed-off-by: Christophe JAILLET Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/ca8210.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 326243fae7e2..24a1eabbbc9d 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -917,10 +917,7 @@ static int ca8210_spi_transfer( struct cas_control *cas_ctl; if (!spi) { - dev_crit( - &spi->dev, - "NULL spi device passed to ca8210_spi_transfer\n" - ); + pr_crit("NULL spi device passed to %s\n", __func__); return -ENODEV; } -- cgit v1.2.3-55-g7522 From 274043c6c95636e62f5b2514e78fdba82eb47601 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 21 Aug 2017 01:48:12 +0200 Subject: bpf: fix double free from dev_map_notification() In the current code, dev_map_free() can still race with dev_map_notification(). In dev_map_free(), we remove dtab from the list of dtabs after we purged all entries from it. However, we don't do xchg() with NULL or the like, so the entry at that point is still pointing to the device. If a unregister notification comes in at the same time, we therefore risk a double-free, since the pointer is still present in the map, and then pushed again to __dev_map_entry_free(). All this is completely unnecessary. Just remove the dtab from the list right before the synchronize_rcu(), so all outstanding readers from the notifier list have finished by then, thus we don't need to deal with this corner case anymore and also wouldn't need to nullify dev entires. This is fine because we iterate over the map releasing all entries and therefore dev references anyway. Fixes: 4cc7b9544b9a ("bpf: devmap fix mutex in rcu critical section") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/devmap.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 67f4f00ce33a..fa08181d1c3d 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -148,6 +148,11 @@ static void dev_map_free(struct bpf_map *map) * no further reads against netdev_map. It does __not__ ensure pending * flush operations (if any) are complete. */ + + spin_lock(&dev_map_lock); + list_del_rcu(&dtab->list); + spin_unlock(&dev_map_lock); + synchronize_rcu(); /* To ensure all pending flush operations have completed wait for flush @@ -162,10 +167,6 @@ static void dev_map_free(struct bpf_map *map) cpu_relax(); } - /* Although we should no longer have datapath or bpf syscall operations - * at this point we we can still race with netdev notifier, hence the - * lock. - */ for (i = 0; i < dtab->map.max_entries; i++) { struct bpf_dtab_netdev *dev; @@ -180,9 +181,6 @@ static void dev_map_free(struct bpf_map *map) /* At this point bpf program is detached and all pending operations * _must_ be complete */ - spin_lock(&dev_map_lock); - list_del_rcu(&dtab->list); - spin_unlock(&dev_map_lock); free_percpu(dtab->flush_needed); bpf_map_area_free(dtab->netdev_map); kfree(dtab); -- cgit v1.2.3-55-g7522 From c3168cabe1af2683475d0e3048220c04b7fa4f51 Mon Sep 17 00:00:00 2001 From: Ganesh Goudar Date: Sun, 20 Aug 2017 14:15:51 +0530 Subject: cxgb4/cxgbvf: Handle 32-bit fw port capabilities Implement new 32-bit Firmware Port Capabilities in order to handle new speeds which couldn't be represented in the old 16-bit Firmware Port Capabilities values. Based on the original work of Casey Leedom Signed-off-by: Ganesh Goudar Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 43 +- drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 98 ++-- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 88 ++-- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 580 ++++++++++++++++----- drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 175 ++++++- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 50 +- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | 86 +-- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 456 +++++++++++++--- 8 files changed, 1220 insertions(+), 356 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index b9bff1d9801f..ea72d2d2e1b4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -104,13 +104,13 @@ enum dev_state { DEV_STATE_ERR }; -enum { +enum cc_pause { PAUSE_RX = 1 << 0, PAUSE_TX = 1 << 1, PAUSE_AUTONEG = 1 << 2 }; -enum { +enum cc_fec { FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ FEC_RS = 1 << 1, /* Reed-Solomon */ FEC_BASER_RS = 1 << 2 /* BaseR/Reed-Solomon */ @@ -366,6 +366,7 @@ struct adapter_params { unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ unsigned int max_ird_adapter; /* Max read depth per adapter */ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ + u8 fw_caps_support; /* 32-bit Port Capabilities */ /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is * used by the Port @@ -439,18 +440,34 @@ struct trace_params { unsigned char port; }; +/* Firmware Port Capabilities types. */ + +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ +}; + struct link_config { - unsigned short supported; /* link capabilities */ - unsigned short advertising; /* advertised capabilities */ - unsigned short lp_advertising; /* peer advertised capabilities */ - unsigned int requested_speed; /* speed user has requested */ - unsigned int speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char auto_fec; /* Forward Error Correction: */ - unsigned char requested_fec; /* "automatic" (IEEE 802.3), */ - unsigned char fec; /* requested, and actual in use */ + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t def_acaps; /* default advertised capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + fw_port_cap32_t lpacaps; /* peer advertised capabilities */ + + fw_port_cap32_t speed_caps; /* speed(s) user has requested */ + unsigned int speed; /* actual link speed (Mb/s) */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec requested_fec; /* Forward Error Correction: */ + enum cc_fec fec; /* requested and actual in use */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ unsigned char link_down_rc; /* link down reason */ }; @@ -1580,6 +1597,8 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox); void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl); int t4_update_port_info(struct port_info *pi); +int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, + unsigned int *speedp, unsigned int *mtup); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); void t4_db_full(struct adapter *adapter); void t4_db_dropped(struct adapter *adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 03f593e84c24..a71af1e587e2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -533,17 +533,23 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, static unsigned int speed_to_fw_caps(int speed) { if (speed == 100) - return FW_PORT_CAP_SPEED_100M; + return FW_PORT_CAP32_SPEED_100M; if (speed == 1000) - return FW_PORT_CAP_SPEED_1G; + return FW_PORT_CAP32_SPEED_1G; if (speed == 10000) - return FW_PORT_CAP_SPEED_10G; + return FW_PORT_CAP32_SPEED_10G; if (speed == 25000) - return FW_PORT_CAP_SPEED_25G; + return FW_PORT_CAP32_SPEED_25G; if (speed == 40000) - return FW_PORT_CAP_SPEED_40G; + return FW_PORT_CAP32_SPEED_40G; + if (speed == 50000) + return FW_PORT_CAP32_SPEED_50G; if (speed == 100000) - return FW_PORT_CAP_SPEED_100G; + return FW_PORT_CAP32_SPEED_100G; + if (speed == 200000) + return FW_PORT_CAP32_SPEED_200G; + if (speed == 400000) + return FW_PORT_CAP32_SPEED_400G; return 0; } @@ -560,12 +566,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, unsigned int fw_caps, unsigned long *link_mode_mask) { - #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \ - ## _BIT, link_mode_mask) + #define SET_LMM(__lmm_name) \ + __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ + link_mode_mask) #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ do { \ - if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ SET_LMM(__lmm_name); \ } while (0) @@ -645,7 +652,10 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); - SET_LMM(100000baseCR4_Full); + FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full); + FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full); + FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full); + FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full); break; default: @@ -663,8 +673,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, /** * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware * capabilities - * - * @link_mode_mask: ethtool Link Mode Mask + * @et_lmm: ethtool Link Mode Mask * * Translate ethtool Link Mode Mask into a Firmware Port capabilities * value. @@ -677,7 +686,7 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) do { \ if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ link_mode_mask)) \ - fw_caps |= FW_PORT_CAP_ ## __fw_name; \ + fw_caps |= FW_PORT_CAP32_ ## __fw_name; \ } while (0) LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M); @@ -685,6 +694,7 @@ static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask) LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G); LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G); LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G); + LMM_TO_FW_CAPS(50000baseCR2_Full, SPEED_50G); LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G); #undef LMM_TO_FW_CAPS @@ -698,10 +708,6 @@ static int get_link_ksettings(struct net_device *dev, struct port_info *pi = netdev_priv(dev); struct ethtool_link_settings *base = &link_ksettings->base; - ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); - ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); - /* For the nonce, the Firmware doesn't send up Port State changes * when the Virtual Interface attached to the Port is down. So * if it's down, let's grab any changes. @@ -709,6 +715,10 @@ static int get_link_ksettings(struct net_device *dev, if (!netif_running(dev)) (void)t4_update_port_info(pi); + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type); if (pi->mdio_addr >= 0) { @@ -721,11 +731,11 @@ static int get_link_ksettings(struct net_device *dev, base->mdio_support = 0; } - fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps, link_ksettings->link_modes.supported); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps, link_ksettings->link_modes.advertising); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); if (netif_carrier_ok(dev)) { @@ -736,8 +746,24 @@ static int get_link_ksettings(struct net_device *dev, base->duplex = DUPLEX_UNKNOWN; } + if (pi->link_cfg.fc & PAUSE_RX) { + if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Pause); + } else { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + } else if (pi->link_cfg.fc & PAUSE_TX) { + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, + Asym_Pause); + } + base->autoneg = pi->link_cfg.autoneg; - if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); if (pi->link_cfg.autoneg) @@ -748,8 +774,7 @@ static int get_link_ksettings(struct net_device *dev, } static int set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings - *link_ksettings) + const struct ethtool_link_ksettings *link_ksettings) { struct port_info *pi = netdev_priv(dev); struct link_config *lc = &pi->link_cfg; @@ -762,12 +787,12 @@ static int set_link_ksettings(struct net_device *dev, if (base->duplex != DUPLEX_FULL) return -EINVAL; - if (!(lc->supported & FW_PORT_CAP_ANEG)) { + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { /* PHY offers a single speed. See if that's what's * being requested. */ if (base->autoneg == AUTONEG_DISABLE && - (lc->supported & speed_to_fw_caps(base->speed))) + (lc->pcaps & speed_to_fw_caps(base->speed))) return 0; return -EINVAL; } @@ -776,18 +801,17 @@ static int set_link_ksettings(struct net_device *dev, if (base->autoneg == AUTONEG_DISABLE) { fw_caps = speed_to_fw_caps(base->speed); - if (!(lc->supported & fw_caps)) + if (!(lc->pcaps & fw_caps)) return -EINVAL; - lc->requested_speed = fw_caps; - lc->advertising = 0; + lc->speed_caps = fw_caps; + lc->acaps = 0; } else { fw_caps = - lmm_to_fw_caps(link_ksettings->link_modes.advertising); - - if (!(lc->supported & fw_caps)) + lmm_to_fw_caps(link_ksettings->link_modes.advertising); + if (!(lc->pcaps & fw_caps)) return -EINVAL; - lc->requested_speed = 0; - lc->advertising = fw_caps | FW_PORT_CAP_ANEG; + lc->speed_caps = 0; + lc->acaps = fw_caps | FW_PORT_CAP32_ANEG; } lc->autoneg = base->autoneg; @@ -806,9 +830,9 @@ static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec) { unsigned int eth_fec = 0; - if (fw_fec & FW_PORT_CAP_FEC_RS) + if (fw_fec & FW_PORT_CAP32_FEC_RS) eth_fec |= ETHTOOL_FEC_RS; - if (fw_fec & FW_PORT_CAP_FEC_BASER_RS) + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) eth_fec |= ETHTOOL_FEC_BASER; /* if nothing is set, then FEC is off */ @@ -864,7 +888,7 @@ static int get_fecparam(struct net_device *dev, struct ethtool_fecparam *fec) * always support IEEE 802.3 "automatic" selection of Link FEC type if * any FEC is supported. */ - fec->fec = fwcap_to_eth_fec(lc->supported); + fec->fec = fwcap_to_eth_fec(lc->pcaps); if (fec->fec != ETHTOOL_FEC_OFF) fec->fec |= ETHTOOL_FEC_AUTO; @@ -917,7 +941,7 @@ static int set_pauseparam(struct net_device *dev, if (epause->autoneg == AUTONEG_DISABLE) lc->requested_fc = 0; - else if (lc->supported & FW_PORT_CAP_ANEG) + else if (lc->pcaps & FW_PORT_CAP32_ANEG) lc->requested_fc = PAUSE_AUTONEG; else return -EINVAL; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e55a9299547a..92d9d795d874 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -530,15 +530,22 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16)); if (cmd == FW_PORT_CMD && - action == FW_PORT_ACTION_GET_PORT_INFO) { + (action == FW_PORT_ACTION_GET_PORT_INFO || + action == FW_PORT_ACTION_GET_PORT_INFO32)) { int port = FW_PORT_CMD_PORTID_G( be32_to_cpu(pcmd->op_to_portid)); - struct net_device *dev = - q->adap->port[q->adap->chan_map[port]]; - int state_input = ((pcmd->u.info.dcbxdis_pkd & - FW_PORT_CMD_DCBXDIS_F) - ? CXGB4_DCB_INPUT_FW_DISABLED - : CXGB4_DCB_INPUT_FW_ENABLED); + struct net_device *dev; + int dcbxdis, state_input; + + dev = q->adap->port[q->adap->chan_map[port]]; + dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO + ? !!(pcmd->u.info.dcbxdis_pkd & + FW_PORT_CMD_DCBXDIS_F) + : !!(pcmd->u.info32.lstatus32_to_cbllen32 & + FW_PORT_CMD_DCBXDIS32_F)); + state_input = (dcbxdis + ? CXGB4_DCB_INPUT_FW_DISABLED + : CXGB4_DCB_INPUT_FW_ENABLED); cxgb4_dcb_state_fsm(dev, state_input); } @@ -2672,11 +2679,10 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, { struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; - struct fw_port_cmd port_cmd, port_rpl; - u32 link_status, speed = 0; + unsigned int link_ok, speed, mtu; u32 fw_pfvf, fw_class; int class_id = vf; - int link_ok, ret; + int ret; u16 pktsize; if (vf >= adap->num_vfs) @@ -2688,41 +2694,18 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, min_tx_rate, vf); return -EINVAL; } - /* Retrieve link details for VF port */ - memset(&port_cmd, 0, sizeof(port_cmd)); - port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | - FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(pi->port_id)); - port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(port_cmd)); - ret = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd), - &port_rpl); + + ret = t4_get_link_params(pi, &link_ok, &speed, &mtu); if (ret != FW_SUCCESS) { dev_err(adap->pdev_dev, - "Failed to get link status for VF %d\n", vf); + "Failed to get link information for VF %d\n", vf); return -EINVAL; } - link_status = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); - link_ok = (link_status & FW_PORT_CMD_LSTATUS_F) != 0; + if (!link_ok) { dev_err(adap->pdev_dev, "Link down for VF %d\n", vf); return -EINVAL; } - /* Determine link speed */ - if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) - speed = 25000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; - else if (link_status & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) - speed = 100000; if (max_tx_rate > speed) { dev_err(adap->pdev_dev, @@ -2730,7 +2713,8 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, max_tx_rate, vf, speed); return -EINVAL; } - pktsize = be16_to_cpu(port_rpl.u.info.mtu); + + pktsize = mtu; /* subtract ethhdr size and 4 bytes crc since, f/w appends it */ pktsize = pktsize - sizeof(struct ethhdr) - 4; /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */ @@ -2741,7 +2725,7 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, SCHED_CLASS_MODE_CLASS, SCHED_CLASS_RATEUNIT_BITS, SCHED_CLASS_RATEMODE_ABS, - pi->port_id, class_id, 0, + pi->tx_chan, class_id, 0, max_tx_rate * 1000, 0, pktsize); if (ret) { dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", @@ -4208,8 +4192,9 @@ static inline bool is_x_10g_port(const struct link_config *lc) { unsigned int speeds, high_speeds; - speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); - high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); + speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); + high_speeds = speeds & + ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); return high_speeds != 0; } @@ -4590,18 +4575,24 @@ static void print_port_info(const struct net_device *dev) else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) spd = " 8 GT/s"; - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M) bufp += sprintf(bufp, "100M/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G) bufp += sprintf(bufp, "1G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G) bufp += sprintf(bufp, "10G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G) bufp += sprintf(bufp, "25G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G) bufp += sprintf(bufp, "40G/"); - if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G) + bufp += sprintf(bufp, "50G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G) bufp += sprintf(bufp, "100G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G) + bufp += sprintf(bufp, "200G/"); + if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G) + bufp += sprintf(bufp, "400G/"); if (bufp != buf) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); @@ -4707,10 +4698,11 @@ static int config_mgmt_dev(struct pci_dev *pdev) pi = netdev_priv(netdev); pi->adapter = adap; - pi->port_id = adap->pf % adap->params.nports; + pi->tx_chan = adap->pf % adap->params.nports; SET_NETDEV_DEV(netdev, &pdev->dev); adap->port[0] = netdev; + pi->port_id = 0; err = register_netdev(adap->port[0]); if (err) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fff8fba86f97..a4a33ebd0b98 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -3835,59 +3835,133 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) } } -#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ - FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ - FW_PORT_CAP_ANEG) +#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ + FW_PORT_CAP32_ANEG) + +/** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + + #define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + + #undef CAP16_TO_CAP32 + + return caps32; +} + +/** + * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits + * @caps32: a 32-bit Port Capabilities value + * + * Returns the equivalent 16-bit Port Capabilities value. Note that + * not all 32-bit Port Capabilities can be represented in the 16-bit + * Port Capabilities and some fields/values may not make it. + */ +static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32) +{ + fw_port_cap16_t caps16 = 0; + + #define CAP32_TO_CAP16(__cap) \ + do { \ + if (caps32 & FW_PORT_CAP32_##__cap) \ + caps16 |= FW_PORT_CAP_##__cap; \ + } while (0) + + CAP32_TO_CAP16(SPEED_100M); + CAP32_TO_CAP16(SPEED_1G); + CAP32_TO_CAP16(SPEED_10G); + CAP32_TO_CAP16(SPEED_25G); + CAP32_TO_CAP16(SPEED_40G); + CAP32_TO_CAP16(SPEED_100G); + CAP32_TO_CAP16(FC_RX); + CAP32_TO_CAP16(FC_TX); + CAP32_TO_CAP16(802_3_PAUSE); + CAP32_TO_CAP16(802_3_ASM_DIR); + CAP32_TO_CAP16(ANEG); + CAP32_TO_CAP16(MDIX); + CAP32_TO_CAP16(MDIAUTO); + CAP32_TO_CAP16(FEC_RS); + CAP32_TO_CAP16(FEC_BASER_RS); + + #undef CAP32_TO_CAP16 + + return caps16; +} /* Translate Firmware Port Capabilities Pause specification to Common Code */ -static inline unsigned int fwcap_to_cc_pause(unsigned int fw_pause) +static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) { - unsigned int cc_pause = 0; + enum cc_pause cc_pause = 0; - if (fw_pause & FW_PORT_CAP_FC_RX) + if (fw_pause & FW_PORT_CAP32_FC_RX) cc_pause |= PAUSE_RX; - if (fw_pause & FW_PORT_CAP_FC_TX) + if (fw_pause & FW_PORT_CAP32_FC_TX) cc_pause |= PAUSE_TX; return cc_pause; } /* Translate Common Code Pause specification into Firmware Port Capabilities */ -static inline unsigned int cc_to_fwcap_pause(unsigned int cc_pause) +static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause) { - unsigned int fw_pause = 0; + fw_port_cap32_t fw_pause = 0; if (cc_pause & PAUSE_RX) - fw_pause |= FW_PORT_CAP_FC_RX; + fw_pause |= FW_PORT_CAP32_FC_RX; if (cc_pause & PAUSE_TX) - fw_pause |= FW_PORT_CAP_FC_TX; + fw_pause |= FW_PORT_CAP32_FC_TX; return fw_pause; } /* Translate Firmware Forward Error Correction specification to Common Code */ -static inline unsigned int fwcap_to_cc_fec(unsigned int fw_fec) +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) { - unsigned int cc_fec = 0; + enum cc_fec cc_fec = 0; - if (fw_fec & FW_PORT_CAP_FEC_RS) + if (fw_fec & FW_PORT_CAP32_FEC_RS) cc_fec |= FEC_RS; - if (fw_fec & FW_PORT_CAP_FEC_BASER_RS) + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) cc_fec |= FEC_BASER_RS; return cc_fec; } /* Translate Common Code Forward Error Correction specification to Firmware */ -static inline unsigned int cc_to_fwcap_fec(unsigned int cc_fec) +static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec) { - unsigned int fw_fec = 0; + fw_port_cap32_t fw_fec = 0; if (cc_fec & FEC_RS) - fw_fec |= FW_PORT_CAP_FEC_RS; + fw_fec |= FW_PORT_CAP32_FEC_RS; if (cc_fec & FEC_BASER_RS) - fw_fec |= FW_PORT_CAP_FEC_BASER_RS; + fw_fec |= FW_PORT_CAP32_FEC_BASER_RS; return fw_fec; } @@ -3906,13 +3980,13 @@ static inline unsigned int cc_to_fwcap_fec(unsigned int cc_fec) * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, * otherwise do it later based on the outcome of auto-negotiation. */ -int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, - struct link_config *lc) +int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox, + unsigned int port, struct link_config *lc) { - struct fw_port_cmd c; - unsigned int fw_mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO); - unsigned int fw_fc, cc_fec, fw_fec; - unsigned int rcap; + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd cmd; + unsigned int fw_mdi = FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO); + fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap; lc->link_ok = 0; @@ -3929,36 +4003,41 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, * use whatever is in the current Requested FEC settings. */ if (lc->requested_fec & FEC_AUTO) - cc_fec = lc->auto_fec; + cc_fec = fwcap_to_cc_fec(lc->def_acaps); else cc_fec = lc->requested_fec; fw_fec = cc_to_fwcap_fec(cc_fec); /* Figure out what our Requested Port Capabilities are going to be. */ - if (!(lc->supported & FW_PORT_CAP_ANEG)) { - rcap = (lc->supported & ADVERT_MASK) | fw_fc | fw_fec; - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) { + rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; lc->fec = cc_fec; } else if (lc->autoneg == AUTONEG_DISABLE) { - rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi; - lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi; + lc->fc = lc->requested_fc & ~PAUSE_AUTONEG; lc->fec = cc_fec; } else { - rcap = lc->advertising | fw_fc | fw_fec | fw_mdi; + rcap = lc->acaps | fw_fc | fw_fec | fw_mdi; } /* And send that on to the Firmware ... */ - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | FW_CMD_EXEC_F | - FW_PORT_CMD_PORTID_V(port)); - c.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | - FW_LEN16(c)); - c.u.l1cfg.rcap = cpu_to_be32(rcap); - return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_EXEC_F | + FW_PORT_CMD_PORTID_V(port)); + cmd.action_to_len16 = + cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_L1_CFG + : FW_PORT_ACTION_L1_CFG32) | + FW_LEN16(cmd)); + if (fw_caps == FW_CAPS16) + cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap)); + else + cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap); + return t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); } /** @@ -3980,7 +4059,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) c.action_to_len16 = cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) | FW_LEN16(c)); - c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG); + c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG); return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); } @@ -7696,6 +7775,98 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) return reason[link_down_rc]; } +/** + * Return the highest speed set in the port capabilities, in Mb/s. + */ +static unsigned int fwcap_to_speed(fw_port_cap32_t caps) +{ + #define TEST_SPEED_RETURN(__caps_speed, __speed) \ + do { \ + if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return __speed; \ + } while (0) + + TEST_SPEED_RETURN(400G, 400000); + TEST_SPEED_RETURN(200G, 200000); + TEST_SPEED_RETURN(100G, 100000); + TEST_SPEED_RETURN(50G, 50000); + TEST_SPEED_RETURN(40G, 40000); + TEST_SPEED_RETURN(25G, 25000); + TEST_SPEED_RETURN(10G, 10000); + TEST_SPEED_RETURN(1G, 1000); + TEST_SPEED_RETURN(100M, 100); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/** + * fwcap_to_fwspeed - return highest speed in Port Capabilities + * @acaps: advertised Port Capabilities + * + * Get the highest speed for the port from the advertised Port + * Capabilities. It will be either the highest speed from the list of + * speeds or whatever user has set using ethtool. + */ +static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps) +{ + #define TEST_SPEED_RETURN(__caps_speed) \ + do { \ + if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return FW_PORT_CAP32_SPEED_##__caps_speed; \ + } while (0) + + TEST_SPEED_RETURN(400G); + TEST_SPEED_RETURN(200G); + TEST_SPEED_RETURN(100G); + TEST_SPEED_RETURN(50G); + TEST_SPEED_RETURN(40G); + TEST_SPEED_RETURN(25G); + TEST_SPEED_RETURN(10G); + TEST_SPEED_RETURN(1G); + TEST_SPEED_RETURN(100M); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/** + * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities + * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value + * + * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new + * 32-bit Port Capabilities value. + */ +static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus) +{ + fw_port_cap32_t linkattr = 0; + + /* Unfortunately the format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else ... + */ + if (lstatus & FW_PORT_CMD_RXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & FW_PORT_CMD_TXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + return linkattr; +} + /** * t4_handle_get_port_info - process a FW reply message * @pi: the port info @@ -7705,76 +7876,123 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc) */ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) { - const struct fw_port_cmd *p = (const void *)rpl; - unsigned int acaps = be16_to_cpu(p->u.info.acap); - struct adapter *adap = pi->adapter; - - /* link/module state change message */ - int speed = 0, fc, fec; - struct link_config *lc; - u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); - int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; - u32 mod = FW_PORT_CMD_MODTYPE_G(stat); - - /* Unfortunately the format of the Link Status returned by the - * Firmware isn't the same as the Firmware Port Capabilities bitfield - * used everywhere else ... + const struct fw_port_cmd *cmd = (const void *)rpl; + int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); + struct adapter *adapter = pi->adapter; + struct link_config *lc = &pi->link_cfg; + int link_ok, linkdnrc; + enum fw_port_type port_type; + enum fw_port_module_type mod_type; + unsigned int speed, fc, fec; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + + /* Extract the various fields from the Port Information message. */ - fc = 0; - if (stat & FW_PORT_CMD_RXPAUSE_F) - fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE_F) - fc |= PAUSE_TX; + switch (action) { + case FW_PORT_ACTION_GET_PORT_INFO: { + u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); + + link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus); + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mod_type = FW_PORT_CMD_MODTYPE_G(lstatus); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); + lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); + linkattr = lstatus_to_fwcap(lstatus); + break; + } + + case FW_PORT_ACTION_GET_PORT_INFO32: { + u32 lstatus32; + + lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); + link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32); + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32); + pcaps = be32_to_cpu(cmd->u.info32.pcaps32); + acaps = be32_to_cpu(cmd->u.info32.acaps32); + lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); + linkattr = be32_to_cpu(cmd->u.info32.linkattr32); + break; + } + + default: + dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n", + be32_to_cpu(cmd->action_to_len16)); + return; + } fec = fwcap_to_cc_fec(acaps); + fc = fwcap_to_cc_pause(linkattr); + speed = fwcap_to_speed(linkattr); + + if (mod_type != pi->mod_type) { + /* With the newer SFP28 and QSFP28 Transceiver Module Types, + * various fundamental Port Capabilities which used to be + * immutable can now change radically. We can now have + * Speeds, Auto-Negotiation, Forward Error Correction, etc. + * all change based on what Transceiver Module is inserted. + * So we need to record the Physical "Port" Capabilities on + * every Transceiver Module change. + */ + lc->pcaps = pcaps; - if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) - speed = 25000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) - speed = 100000; - - lc = &pi->link_cfg; - - if (mod != pi->mod_type) { /* When a new Transceiver Module is inserted, the Firmware - * will examine any Forward Error Correction parameters - * present in the Transceiver Module i2c EPROM and determine - * the supported and recommended FEC settings from those - * based on IEEE 802.3 standards. We always record the - * IEEE 802.3 recommended "automatic" settings. + * will examine its i2c EPROM to determine its type and + * general operating parameters including things like Forward + * Error Control, etc. Various IEEE 802.3 standards dictate + * how to interpret these i2c values to determine default + * "sutomatic" settings. We record these for future use when + * the user explicitly requests these standards-based values. */ - lc->auto_fec = fec; + lc->def_acaps = acaps; + + /* Some versions of the early T6 Firmware "cheated" when + * handling different Transceiver Modules by changing the + * underlaying Port Type reported to the Host Drivers. As + * such we need to capture whatever Port Type the Firmware + * sends us and record it in case it's different from what we + * were told earlier. Unfortunately, since Firmware is + * forever, we'll need to keep this code here forever, but in + * later T6 Firmware it should just be an assignment of the + * same value already recorded. + */ + pi->port_type = port_type; - pi->mod_type = mod; - t4_os_portmod_changed(adap, pi->port_id); + pi->mod_type = mod_type; + t4_os_portmod_changed(adapter, pi->port_id); } + if (link_ok != lc->link_ok || speed != lc->speed || fc != lc->fc || fec != lc->fec) { /* something changed */ if (!link_ok && lc->link_ok) { - unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat); - - lc->link_down_rc = rc; - dev_warn(adap->pdev_dev, - "Port %d link down, reason: %s\n", - pi->port_id, t4_link_down_rc_str(rc)); + lc->link_down_rc = linkdnrc; + dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n", + pi->tx_chan, t4_link_down_rc_str(linkdnrc)); } lc->link_ok = link_ok; lc->speed = speed; lc->fc = fc; lc->fec = fec; - lc->supported = be16_to_cpu(p->u.info.pcap); - lc->lp_advertising = be16_to_cpu(p->u.info.lpacap); + lc->lpacaps = lpacaps; + lc->acaps = acaps & ADVERT_MASK; + + if (lc->acaps & FW_PORT_CAP32_ANEG) { + lc->autoneg = AUTONEG_ENABLE; + } else { + /* When Autoneg is disabled, user needs to set + * single speed. + * Similar to cxgb4_ethtool.c: set_link_ksettings + */ + lc->acaps = 0; + lc->speed_caps = fwcap_to_fwspeed(acaps); + lc->autoneg = AUTONEG_DISABLE; + } - t4_os_link_changed(adap, pi->port_id, link_ok); + t4_os_link_changed(adapter, pi->port_id, link_ok); } } @@ -7788,15 +8006,18 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) */ int t4_update_port_info(struct port_info *pi) { + unsigned int fw_caps = pi->adapter->params.fw_caps_support; struct fw_port_cmd port_cmd; int ret; memset(&port_cmd, 0, sizeof(port_cmd)); port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(pi->port_id)); + FW_PORT_CMD_PORTID_V(pi->tx_chan)); port_cmd.action_to_len16 = cpu_to_be32( - FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | FW_LEN16(port_cmd)); ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, &port_cmd, sizeof(port_cmd), &port_cmd); @@ -7807,6 +8028,65 @@ int t4_update_port_info(struct port_info *pi) return 0; } +/** + * t4_get_link_params - retrieve basic link parameters for given port + * @pi: the port + * @link_okp: value return pointer for link up/down + * @speedp: value return pointer for speed (Mb/s) + * @mtup: value return pointer for mtu + * + * Retrieves basic link parameters for a port: link up/down, speed (Mb/s), + * and MTU for a specified port. A negative error is returned on + * failure; 0 on success. + */ +int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, + unsigned int *speedp, unsigned int *mtup) +{ + unsigned int fw_caps = pi->adapter->params.fw_caps_support; + struct fw_port_cmd port_cmd; + unsigned int action, link_ok, speed, mtu; + fw_port_cap32_t linkattr; + int ret; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->tx_chan)); + action = (fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32); + port_cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(action) | + FW_LEN16(port_cmd)); + ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox, + &port_cmd, sizeof(port_cmd), &port_cmd); + if (ret) + return ret; + + if (action == FW_PORT_ACTION_GET_PORT_INFO) { + u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype); + + link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F); + linkattr = lstatus_to_fwcap(lstatus); + mtu = be16_to_cpu(port_cmd.u.info.mtu); + } else { + u32 lstatus32 = + be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32); + + link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F); + linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32); + mtu = FW_PORT_CMD_MTU32_G( + be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); + } + speed = fwcap_to_speed(linkattr); + + *link_okp = link_ok; + *speedp = fwcap_to_speed(linkattr); + *mtup = mtu; + + return 0; +} + /** * t4_handle_fw_rpl - process a FW reply message * @adap: the adapter @@ -7827,7 +8107,9 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) unsigned int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16)); - if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + if (opcode == FW_PORT_CMD && + (action == FW_PORT_ACTION_GET_PORT_INFO || + action == FW_PORT_ACTION_GET_PORT_INFO32)) { int i; int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid)); struct port_info *pi = NULL; @@ -7840,7 +8122,8 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) t4_handle_get_port_info(pi, rpl); } else { - dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode); + dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", + opcode); return -EINVAL; } return 0; @@ -7859,35 +8142,35 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p) /** * init_link_config - initialize a link's SW state - * @lc: structure holding the link state + * @lc: pointer to structure holding the link state * @pcaps: link Port Capabilities * @acaps: link current Advertised Port Capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ -static void init_link_config(struct link_config *lc, unsigned int pcaps, - unsigned int acaps) +static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) { - lc->supported = pcaps; - lc->lp_advertising = 0; - lc->requested_speed = 0; + lc->pcaps = pcaps; + lc->def_acaps = acaps; + lc->lpacaps = 0; + lc->speed_caps = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; /* For Forward Error Control, we default to whatever the Firmware * tells us the Link is currently advertising. */ - lc->auto_fec = fwcap_to_cc_fec(acaps); lc->requested_fec = FEC_AUTO; - lc->fec = lc->auto_fec; + lc->fec = fwcap_to_cc_fec(lc->def_acaps); - if (lc->supported & FW_PORT_CAP_ANEG) { - lc->advertising = lc->supported & ADVERT_MASK; + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = lc->pcaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { - lc->advertising = 0; + lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; } } @@ -8412,7 +8695,7 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) } /** - * t4_init_portinfo - allocate a virtual interface amd initialize port_info + * t4_init_portinfo - allocate a virtual interface and initialize port_info * @pi: the port_info * @mbox: mailbox to use for the FW command * @port: physical port associated with the VI @@ -8428,21 +8711,67 @@ int t4_init_rss_mode(struct adapter *adap, int mbox) int t4_init_portinfo(struct port_info *pi, int mbox, int port, int pf, int vf, u8 mac[]) { - int ret; - struct fw_port_cmd c; + struct adapter *adapter = pi->adapter; + unsigned int fw_caps = adapter->params.fw_caps_support; + struct fw_port_cmd cmd; unsigned int rss_size; + enum fw_port_type port_type; + int mdio_addr; + fw_port_cap32_t pcaps, acaps; + int ret; - memset(&c, 0, sizeof(c)); - c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | - FW_CMD_REQUEST_F | FW_CMD_READ_F | - FW_PORT_CMD_PORTID_V(port)); - c.action_to_len16 = cpu_to_be32( - FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(c)); - ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c); + /* If we haven't yet determined whether we're talking to Firmware + * which knows the new 32-bit Port Capabilities, it's time to find + * out now. This will also tell new Firmware to send us Port Status + * Updates using the new 32-bit Port Capabilities version of the + * Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val); + fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); + adapter->params.fw_caps_support = fw_caps; + } + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(port)); + cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(cmd)); + ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd); if (ret) return ret; + /* Extract the various fields from the Port Information message. + */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype); + + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F) + ? FW_PORT_CMD_MDIOADDR_G(lstatus) + : -1); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap)); + } else { + u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32); + + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F) + ? FW_PORT_CMD_MDIOADDR32_G(lstatus32) + : -1); + pcaps = be32_to_cpu(cmd.u.info32.pcaps32); + acaps = be32_to_cpu(cmd.u.info32.acaps32); + } + ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size); if (ret < 0) return ret; @@ -8452,14 +8781,11 @@ int t4_init_portinfo(struct port_info *pi, int mbox, pi->lport = port; pi->rss_size = rss_size; - ret = be32_to_cpu(c.u.info.lstatus_to_modtype); - pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ? - FW_PORT_CMD_MDIOADDR_G(ret) : -1; - pi->port_type = FW_PORT_CMD_PTYPE_G(ret); + pi->port_type = port_type; + pi->mdio_addr = mdio_addr; pi->mod_type = FW_PORT_MOD_TYPE_NA; - init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap), - be16_to_cpu(c.u.info.acap)); + init_link_config(&pi->link_cfg, pcaps, acaps); return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index ad825fbc21a5..ca2756dcefc5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1173,7 +1173,8 @@ enum fw_params_param_pfvf { FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E, FW_PARAMS_PARAM_PFVF_ETHOFLD_END = 0x30, FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31, - FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32 + FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x32, + FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A, }; /* @@ -2256,6 +2257,7 @@ struct fw_acl_vlan_cmd { #define FW_ACL_VLAN_CMD_FM_S 6 #define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S) +/* old 16-bit port capabilities bitmap (fw_port_cap16_t) */ enum fw_port_cap { FW_PORT_CAP_SPEED_100M = 0x0001, FW_PORT_CAP_SPEED_1G = 0x0002, @@ -2291,6 +2293,84 @@ enum fw_port_mdi { #define FW_PORT_CAP_MDI_S 9 #define FW_PORT_CAP_MDI_V(x) ((x) << FW_PORT_CAP_MDI_S) +/* new 32-bit port capabilities bitmap (fw_port_cap32_t) */ +#define FW_PORT_CAP32_SPEED_100M 0x00000001UL +#define FW_PORT_CAP32_SPEED_1G 0x00000002UL +#define FW_PORT_CAP32_SPEED_10G 0x00000004UL +#define FW_PORT_CAP32_SPEED_25G 0x00000008UL +#define FW_PORT_CAP32_SPEED_40G 0x00000010UL +#define FW_PORT_CAP32_SPEED_50G 0x00000020UL +#define FW_PORT_CAP32_SPEED_100G 0x00000040UL +#define FW_PORT_CAP32_SPEED_200G 0x00000080UL +#define FW_PORT_CAP32_SPEED_400G 0x00000100UL +#define FW_PORT_CAP32_SPEED_RESERVED1 0x00000200UL +#define FW_PORT_CAP32_SPEED_RESERVED2 0x00000400UL +#define FW_PORT_CAP32_SPEED_RESERVED3 0x00000800UL +#define FW_PORT_CAP32_RESERVED1 0x0000f000UL +#define FW_PORT_CAP32_FC_RX 0x00010000UL +#define FW_PORT_CAP32_FC_TX 0x00020000UL +#define FW_PORT_CAP32_802_3_PAUSE 0x00040000UL +#define FW_PORT_CAP32_802_3_ASM_DIR 0x00080000UL +#define FW_PORT_CAP32_ANEG 0x00100000UL +#define FW_PORT_CAP32_MDIX 0x00200000UL +#define FW_PORT_CAP32_MDIAUTO 0x00400000UL +#define FW_PORT_CAP32_FEC_RS 0x00800000UL +#define FW_PORT_CAP32_FEC_BASER_RS 0x01000000UL +#define FW_PORT_CAP32_FEC_RESERVED1 0x02000000UL +#define FW_PORT_CAP32_FEC_RESERVED2 0x04000000UL +#define FW_PORT_CAP32_FEC_RESERVED3 0x08000000UL +#define FW_PORT_CAP32_RESERVED2 0xf0000000UL + +#define FW_PORT_CAP32_SPEED_S 0 +#define FW_PORT_CAP32_SPEED_M 0xfff +#define FW_PORT_CAP32_SPEED_V(x) ((x) << FW_PORT_CAP32_SPEED_S) +#define FW_PORT_CAP32_SPEED_G(x) \ + (((x) >> FW_PORT_CAP32_SPEED_S) & FW_PORT_CAP32_SPEED_M) + +#define FW_PORT_CAP32_FC_S 16 +#define FW_PORT_CAP32_FC_M 0x3 +#define FW_PORT_CAP32_FC_V(x) ((x) << FW_PORT_CAP32_FC_S) +#define FW_PORT_CAP32_FC_G(x) \ + (((x) >> FW_PORT_CAP32_FC_S) & FW_PORT_CAP32_FC_M) + +#define FW_PORT_CAP32_802_3_S 18 +#define FW_PORT_CAP32_802_3_M 0x3 +#define FW_PORT_CAP32_802_3_V(x) ((x) << FW_PORT_CAP32_802_3_S) +#define FW_PORT_CAP32_802_3_G(x) \ + (((x) >> FW_PORT_CAP32_802_3_S) & FW_PORT_CAP32_802_3_M) + +#define FW_PORT_CAP32_ANEG_S 20 +#define FW_PORT_CAP32_ANEG_M 0x1 +#define FW_PORT_CAP32_ANEG_V(x) ((x) << FW_PORT_CAP32_ANEG_S) +#define FW_PORT_CAP32_ANEG_G(x) \ + (((x) >> FW_PORT_CAP32_ANEG_S) & FW_PORT_CAP32_ANEG_M) + +enum fw_port_mdi32 { + FW_PORT_CAP32_MDI_UNCHANGED, + FW_PORT_CAP32_MDI_AUTO, + FW_PORT_CAP32_MDI_F_STRAIGHT, + FW_PORT_CAP32_MDI_F_CROSSOVER +}; + +#define FW_PORT_CAP32_MDI_S 21 +#define FW_PORT_CAP32_MDI_M 3 +#define FW_PORT_CAP32_MDI_V(x) ((x) << FW_PORT_CAP32_MDI_S) +#define FW_PORT_CAP32_MDI_G(x) \ + (((x) >> FW_PORT_CAP32_MDI_S) & FW_PORT_CAP32_MDI_M) + +#define FW_PORT_CAP32_FEC_S 23 +#define FW_PORT_CAP32_FEC_M 0x1f +#define FW_PORT_CAP32_FEC_V(x) ((x) << FW_PORT_CAP32_FEC_S) +#define FW_PORT_CAP32_FEC_G(x) \ + (((x) >> FW_PORT_CAP32_FEC_S) & FW_PORT_CAP32_FEC_M) + +/* macros to isolate various 32-bit Port Capabilities sub-fields */ +#define CAP32_SPEED(__cap32) \ + (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) & __cap32) + +#define CAP32_FEC(__cap32) \ + (FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) & __cap32) + enum fw_port_action { FW_PORT_ACTION_L1_CFG = 0x0001, FW_PORT_ACTION_L2_CFG = 0x0002, @@ -2300,6 +2380,8 @@ enum fw_port_action { FW_PORT_ACTION_DCB_READ_TRANS = 0x0006, FW_PORT_ACTION_DCB_READ_RECV = 0x0007, FW_PORT_ACTION_DCB_READ_DET = 0x0008, + FW_PORT_ACTION_L1_CFG32 = 0x0009, + FW_PORT_ACTION_GET_PORT_INFO32 = 0x000a, FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010, FW_PORT_ACTION_L1_LOW_PWR_EN = 0x0011, FW_PORT_ACTION_L2_WOL_MODE_EN = 0x0012, @@ -2447,6 +2529,18 @@ struct fw_port_cmd { __be64 r12; } control; } dcb; + struct fw_port_l1cfg32 { + __be32 rcap32; + __be32 r; + } l1cfg32; + struct fw_port_info32 { + __be32 lstatus32_to_cbllen32; + __be32 auxlinfo32_mtu32; + __be32 linkattr32; + __be32 pcaps32; + __be32 acaps32; + __be32 lpacaps32; + } info32; } u; }; @@ -2555,6 +2649,85 @@ struct fw_port_cmd { #define FW_PORT_CMD_DCB_VERSION_G(x) \ (((x) >> FW_PORT_CMD_DCB_VERSION_S) & FW_PORT_CMD_DCB_VERSION_M) +#define FW_PORT_CMD_LSTATUS32_S 31 +#define FW_PORT_CMD_LSTATUS32_M 0x1 +#define FW_PORT_CMD_LSTATUS32_V(x) ((x) << FW_PORT_CMD_LSTATUS32_S) +#define FW_PORT_CMD_LSTATUS32_G(x) \ + (((x) >> FW_PORT_CMD_LSTATUS32_S) & FW_PORT_CMD_LSTATUS32_M) +#define FW_PORT_CMD_LSTATUS32_F FW_PORT_CMD_LSTATUS32_V(1U) + +#define FW_PORT_CMD_LINKDNRC32_S 28 +#define FW_PORT_CMD_LINKDNRC32_M 0x7 +#define FW_PORT_CMD_LINKDNRC32_V(x) ((x) << FW_PORT_CMD_LINKDNRC32_S) +#define FW_PORT_CMD_LINKDNRC32_G(x) \ + (((x) >> FW_PORT_CMD_LINKDNRC32_S) & FW_PORT_CMD_LINKDNRC32_M) + +#define FW_PORT_CMD_DCBXDIS32_S 27 +#define FW_PORT_CMD_DCBXDIS32_M 0x1 +#define FW_PORT_CMD_DCBXDIS32_V(x) ((x) << FW_PORT_CMD_DCBXDIS32_S) +#define FW_PORT_CMD_DCBXDIS32_G(x) \ + (((x) >> FW_PORT_CMD_DCBXDIS32_S) & FW_PORT_CMD_DCBXDIS32_M) +#define FW_PORT_CMD_DCBXDIS32_F FW_PORT_CMD_DCBXDIS32_V(1U) + +#define FW_PORT_CMD_MDIOCAP32_S 26 +#define FW_PORT_CMD_MDIOCAP32_M 0x1 +#define FW_PORT_CMD_MDIOCAP32_V(x) ((x) << FW_PORT_CMD_MDIOCAP32_S) +#define FW_PORT_CMD_MDIOCAP32_G(x) \ + (((x) >> FW_PORT_CMD_MDIOCAP32_S) & FW_PORT_CMD_MDIOCAP32_M) +#define FW_PORT_CMD_MDIOCAP32_F FW_PORT_CMD_MDIOCAP32_V(1U) + +#define FW_PORT_CMD_MDIOADDR32_S 21 +#define FW_PORT_CMD_MDIOADDR32_M 0x1f +#define FW_PORT_CMD_MDIOADDR32_V(x) ((x) << FW_PORT_CMD_MDIOADDR32_S) +#define FW_PORT_CMD_MDIOADDR32_G(x) \ + (((x) >> FW_PORT_CMD_MDIOADDR32_S) & FW_PORT_CMD_MDIOADDR32_M) + +#define FW_PORT_CMD_PORTTYPE32_S 13 +#define FW_PORT_CMD_PORTTYPE32_M 0xff +#define FW_PORT_CMD_PORTTYPE32_V(x) ((x) << FW_PORT_CMD_PORTTYPE32_S) +#define FW_PORT_CMD_PORTTYPE32_G(x) \ + (((x) >> FW_PORT_CMD_PORTTYPE32_S) & FW_PORT_CMD_PORTTYPE32_M) + +#define FW_PORT_CMD_MODTYPE32_S 8 +#define FW_PORT_CMD_MODTYPE32_M 0x1f +#define FW_PORT_CMD_MODTYPE32_V(x) ((x) << FW_PORT_CMD_MODTYPE32_S) +#define FW_PORT_CMD_MODTYPE32_G(x) \ + (((x) >> FW_PORT_CMD_MODTYPE32_S) & FW_PORT_CMD_MODTYPE32_M) + +#define FW_PORT_CMD_CBLLEN32_S 0 +#define FW_PORT_CMD_CBLLEN32_M 0xff +#define FW_PORT_CMD_CBLLEN32_V(x) ((x) << FW_PORT_CMD_CBLLEN32_S) +#define FW_PORT_CMD_CBLLEN32_G(x) \ + (((x) >> FW_PORT_CMD_CBLLEN32_S) & FW_PORT_CMD_CBLLEN32_M) + +#define FW_PORT_CMD_AUXLINFO32_S 24 +#define FW_PORT_CMD_AUXLINFO32_M 0xff +#define FW_PORT_CMD_AUXLINFO32_V(x) ((x) << FW_PORT_CMD_AUXLINFO32_S) +#define FW_PORT_CMD_AUXLINFO32_G(x) \ + (((x) >> FW_PORT_CMD_AUXLINFO32_S) & FW_PORT_CMD_AUXLINFO32_M) + +#define FW_PORT_AUXLINFO32_KX4_S 2 +#define FW_PORT_AUXLINFO32_KX4_M 0x1 +#define FW_PORT_AUXLINFO32_KX4_V(x) \ + ((x) << FW_PORT_AUXLINFO32_KX4_S) +#define FW_PORT_AUXLINFO32_KX4_G(x) \ + (((x) >> FW_PORT_AUXLINFO32_KX4_S) & FW_PORT_AUXLINFO32_KX4_M) +#define FW_PORT_AUXLINFO32_KX4_F FW_PORT_AUXLINFO32_KX4_V(1U) + +#define FW_PORT_AUXLINFO32_KR_S 1 +#define FW_PORT_AUXLINFO32_KR_M 0x1 +#define FW_PORT_AUXLINFO32_KR_V(x) \ + ((x) << FW_PORT_AUXLINFO32_KR_S) +#define FW_PORT_AUXLINFO32_KR_G(x) \ + (((x) >> FW_PORT_AUXLINFO32_KR_S) & FW_PORT_AUXLINFO32_KR_M) +#define FW_PORT_AUXLINFO32_KR_F FW_PORT_AUXLINFO32_KR_V(1U) + +#define FW_PORT_CMD_MTU32_S 0 +#define FW_PORT_CMD_MTU32_M 0xffff +#define FW_PORT_CMD_MTU32_V(x) ((x) << FW_PORT_CMD_MTU32_S) +#define FW_PORT_CMD_MTU32_G(x) \ + (((x) >> FW_PORT_CMD_MTU32_S) & FW_PORT_CMD_MTU32_M) + enum fw_port_type { FW_PORT_TYPE_FIBER_XFI, FW_PORT_TYPE_FIBER_XAUI, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2b85b874fd0d..8996ebbd222e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -182,7 +182,7 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) break; } - switch (pi->link_cfg.fc) { + switch ((int)pi->link_cfg.fc) { case PAUSE_RX: fc = "RX"; break; @@ -191,7 +191,7 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok) fc = "TX"; break; - case PAUSE_RX|PAUSE_TX: + case PAUSE_RX | PAUSE_TX: fc = "RX/TX"; break; @@ -1213,7 +1213,11 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, } else if (port_type == FW_PORT_TYPE_SFP || port_type == FW_PORT_TYPE_QSFP_10G || port_type == FW_PORT_TYPE_QSA || - port_type == FW_PORT_TYPE_QSFP) { + port_type == FW_PORT_TYPE_QSFP || + port_type == FW_PORT_TYPE_CR4_QSFP || + port_type == FW_PORT_TYPE_CR_QSFP || + port_type == FW_PORT_TYPE_CR2_QSFP || + port_type == FW_PORT_TYPE_SFP28) { if (mod_type == FW_PORT_MOD_TYPE_LR || mod_type == FW_PORT_MOD_TYPE_SR || mod_type == FW_PORT_MOD_TYPE_ER || @@ -1224,6 +1228,9 @@ static int from_fw_port_mod_type(enum fw_port_type port_type, return PORT_DA; else return PORT_OTHER; + } else if (port_type == FW_PORT_TYPE_KR4_100G || + port_type == FW_PORT_TYPE_KR_SFP28) { + return PORT_NONE; } return PORT_OTHER; @@ -1242,12 +1249,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, unsigned int fw_caps, unsigned long *link_mode_mask) { - #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\ - ## _BIT, link_mode_mask) + #define SET_LMM(__lmm_name) \ + __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \ + link_mode_mask) #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \ do { \ - if (fw_caps & FW_PORT_CAP_ ## __fw_name) \ + if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \ SET_LMM(__lmm_name); \ } while (0) @@ -1310,6 +1318,16 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, SET_LMM(25000baseCR_Full); break; + case FW_PORT_TYPE_KR_SFP28: + SET_LMM(Backplane); + SET_LMM(25000baseKR_Full); + break; + + case FW_PORT_TYPE_CR2_QSFP: + SET_LMM(FIBRE); + SET_LMM(50000baseSR2_Full); + break; + case FW_PORT_TYPE_KR4_100G: case FW_PORT_TYPE_CR4_QSFP: SET_LMM(FIBRE); @@ -1329,12 +1347,18 @@ static void fw_caps_to_lmm(enum fw_port_type port_type, } static int cxgb4vf_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings - *link_ksettings) + struct ethtool_link_ksettings *link_ksettings) { - const struct port_info *pi = netdev_priv(dev); + struct port_info *pi = netdev_priv(dev); struct ethtool_link_settings *base = &link_ksettings->base; + /* For the nonce, the Firmware doesn't send up Port State changes + * when the Virtual Interface attached to the Port is down. So + * if it's down, let's grab any changes. + */ + if (!netif_running(dev)) + (void)t4vf_update_port_info(pi); + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); @@ -1351,11 +1375,11 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, base->mdio_support = 0; } - fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps, link_ksettings->link_modes.supported); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps, link_ksettings->link_modes.advertising); - fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising, + fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps, link_ksettings->link_modes.lp_advertising); if (netif_carrier_ok(dev)) { @@ -1367,7 +1391,7 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev, } base->autoneg = pi->link_cfg.autoneg; - if (pi->link_cfg.supported & FW_PORT_CAP_ANEG) + if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG) ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Autoneg); if (pi->link_cfg.autoneg) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index b3903fe411aa..9cf9c56b0f73 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -104,24 +104,62 @@ struct t4vf_port_stats { /* * Per-"port" (Virtual Interface) link configuration ... */ -struct link_config { - unsigned int supported; /* link capabilities */ - unsigned int advertising; /* advertised capabilities */ - unsigned short lp_advertising; /* peer advertised capabilities */ - unsigned int requested_speed; /* speed user has requested */ - unsigned int speed; /* actual link speed */ - unsigned char requested_fc; /* flow control user has requested */ - unsigned char fc; /* actual link flow control */ - unsigned char autoneg; /* autonegotiating? */ - unsigned char link_ok; /* link up? */ +typedef u16 fw_port_cap16_t; /* 16-bit Port Capabilities integral value */ +typedef u32 fw_port_cap32_t; /* 32-bit Port Capabilities integral value */ + +enum fw_caps { + FW_CAPS_UNKNOWN = 0, /* 0'ed out initial state */ + FW_CAPS16 = 1, /* old Firmware: 16-bit Port Capabilities */ + FW_CAPS32 = 2, /* new Firmware: 32-bit Port Capabilities */ }; -enum { - PAUSE_RX = 1 << 0, - PAUSE_TX = 1 << 1, - PAUSE_AUTONEG = 1 << 2 +enum cc_pause { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +enum cc_fec { + FEC_AUTO = 1 << 0, /* IEEE 802.3 "automatic" */ + FEC_RS = 1 << 1, /* Reed-Solomon */ + FEC_BASER_RS = 1 << 2, /* BaseR/Reed-Solomon */ +}; + +struct link_config { + fw_port_cap32_t pcaps; /* link capabilities */ + fw_port_cap32_t acaps; /* advertised capabilities */ + fw_port_cap32_t lpacaps; /* peer advertised capabilities */ + + fw_port_cap32_t speed_caps; /* speed(s) user has requested */ + u32 speed; /* actual link speed */ + + enum cc_pause requested_fc; /* flow control user has requested */ + enum cc_pause fc; /* actual link flow control */ + + enum cc_fec auto_fec; /* Forward Error Correction: */ + enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */ + enum cc_fec fec; /* requested, and actual in use */ + + unsigned char autoneg; /* autonegotiating? */ + + unsigned char link_ok; /* link up? */ + unsigned char link_down_rc; /* link down reason */ }; +/* Return true if the Link Configuration supports "High Speeds" (those greater + * than 1Gb/s). + */ +static inline bool is_x_10g_port(const struct link_config *lc) +{ + fw_port_cap32_t speeds, high_speeds; + + speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps)); + high_speeds = + speeds & ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G); + + return high_speeds != 0; +} + /* * General device parameters ... */ @@ -227,6 +265,7 @@ struct adapter_params { struct arch_specific_params arch; /* chip specific params */ enum chip_type chip; /* chip code */ u8 nports; /* # of Ethernet "ports" */ + u8 fw_caps_support; /* 32-bit Port Capabilities */ }; /* Firmware Mailbox Command/Reply log. All values are in Host-Endian format. @@ -266,24 +305,6 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, #define for_each_port(adapter, iter) \ for (iter = 0; iter < (adapter)->params.nports; iter++) -static inline bool is_10g_port(const struct link_config *lc) -{ - return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; -} - -/* Return true if the Link Configuration supports "High Speeds" (those greater - * than 1Gb/s). - */ -static inline bool is_x_10g_port(const struct link_config *lc) -{ - unsigned int speeds, high_speeds; - - speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported)); - high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G); - - return high_speeds != 0; -} - static inline unsigned int core_ticks_per_usec(const struct adapter *adapter) { return adapter->params.vpd.cclk / 1000; @@ -387,6 +408,7 @@ int t4vf_iq_free(struct adapter *, unsigned int, unsigned int, unsigned int, unsigned int); int t4vf_eth_eq_free(struct adapter *, unsigned int); +int t4vf_update_port_info(struct port_info *pi); int t4vf_handle_fw_rpl(struct adapter *, const __be64 *); int t4vf_prep_adapter(struct adapter *); int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index e98248f00fef..a8d94963b4d0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -313,32 +313,130 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, return ret; } -#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \ - FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \ - FW_PORT_CAP_ANEG) +#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \ + FW_PORT_CAP32_ANEG) /** + * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits + * @caps16: a 16-bit Port Capabilities value + * + * Returns the equivalent 32-bit Port Capabilities value. + */ +static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16) +{ + fw_port_cap32_t caps32 = 0; + + #define CAP16_TO_CAP32(__cap) \ + do { \ + if (caps16 & FW_PORT_CAP_##__cap) \ + caps32 |= FW_PORT_CAP32_##__cap; \ + } while (0) + + CAP16_TO_CAP32(SPEED_100M); + CAP16_TO_CAP32(SPEED_1G); + CAP16_TO_CAP32(SPEED_25G); + CAP16_TO_CAP32(SPEED_10G); + CAP16_TO_CAP32(SPEED_40G); + CAP16_TO_CAP32(SPEED_100G); + CAP16_TO_CAP32(FC_RX); + CAP16_TO_CAP32(FC_TX); + CAP16_TO_CAP32(ANEG); + CAP16_TO_CAP32(MDIX); + CAP16_TO_CAP32(MDIAUTO); + CAP16_TO_CAP32(FEC_RS); + CAP16_TO_CAP32(FEC_BASER_RS); + CAP16_TO_CAP32(802_3_PAUSE); + CAP16_TO_CAP32(802_3_ASM_DIR); + + #undef CAP16_TO_CAP32 + + return caps32; +} + +/* Translate Firmware Pause specification to Common Code */ +static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause) +{ + enum cc_pause cc_pause = 0; + + if (fw_pause & FW_PORT_CAP32_FC_RX) + cc_pause |= PAUSE_RX; + if (fw_pause & FW_PORT_CAP32_FC_TX) + cc_pause |= PAUSE_TX; + + return cc_pause; +} + +/* Translate Firmware Forward Error Correction specification to Common Code */ +static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec) +{ + enum cc_fec cc_fec = 0; + + if (fw_fec & FW_PORT_CAP32_FEC_RS) + cc_fec |= FEC_RS; + if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS) + cc_fec |= FEC_BASER_RS; + + return cc_fec; +} + +/** + * Return the highest speed set in the port capabilities, in Mb/s. + */ +static unsigned int fwcap_to_speed(fw_port_cap32_t caps) +{ + #define TEST_SPEED_RETURN(__caps_speed, __speed) \ + do { \ + if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \ + return __speed; \ + } while (0) + + TEST_SPEED_RETURN(400G, 400000); + TEST_SPEED_RETURN(200G, 200000); + TEST_SPEED_RETURN(100G, 100000); + TEST_SPEED_RETURN(50G, 50000); + TEST_SPEED_RETURN(40G, 40000); + TEST_SPEED_RETURN(25G, 25000); + TEST_SPEED_RETURN(10G, 10000); + TEST_SPEED_RETURN(1G, 1000); + TEST_SPEED_RETURN(100M, 100); + + #undef TEST_SPEED_RETURN + + return 0; +} + +/* * init_link_config - initialize a link's SW state * @lc: structure holding the link state - * @caps: link capabilities + * @pcaps: link Port Capabilities + * @acaps: link current Advertised Port Capabilities * * Initializes the SW state maintained for each link, including the link's * capabilities and default speed/flow-control/autonegotiation settings. */ -static void init_link_config(struct link_config *lc, unsigned int caps) +static void init_link_config(struct link_config *lc, + fw_port_cap32_t pcaps, + fw_port_cap32_t acaps) { - lc->supported = caps; - lc->lp_advertising = 0; - lc->requested_speed = 0; + lc->pcaps = pcaps; + lc->lpacaps = 0; + lc->speed_caps = 0; lc->speed = 0; lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; - if (lc->supported & FW_PORT_CAP_ANEG) { - lc->advertising = lc->supported & ADVERT_MASK; + + /* For Forward Error Control, we default to whatever the Firmware + * tells us the Link is currently advertising. + */ + lc->auto_fec = fwcap_to_cc_fec(acaps); + lc->requested_fec = FEC_AUTO; + lc->fec = lc->auto_fec; + + if (lc->pcaps & FW_PORT_CAP32_ANEG) { + lc->acaps = acaps & ADVERT_MASK; lc->autoneg = AUTONEG_ENABLE; lc->requested_fc |= PAUSE_AUTONEG; } else { - lc->advertising = 0; + lc->acaps = 0; lc->autoneg = AUTONEG_DISABLE; } } @@ -351,9 +449,30 @@ static void init_link_config(struct link_config *lc, unsigned int caps) int t4vf_port_init(struct adapter *adapter, int pidx) { struct port_info *pi = adap2pinfo(adapter, pidx); + unsigned int fw_caps = adapter->params.fw_caps_support; struct fw_vi_cmd vi_cmd, vi_rpl; struct fw_port_cmd port_cmd, port_rpl; - int v; + enum fw_port_type port_type; + int mdio_addr; + fw_port_cap32_t pcaps, acaps; + int ret; + + /* If we haven't yet determined whether we're talking to Firmware + * which knows the new 32-bit Port Capabilities, it's time to find + * out now. This will also tell new Firmware to send us Port Status + * Updates using the new 32-bit Port Capabilities version of the + * Port Information message. + */ + if (fw_caps == FW_CAPS_UNKNOWN) { + u32 param, val; + + param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | + FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32)); + val = 1; + ret = t4vf_set_params(adapter, 1, ¶m, &val); + fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16); + adapter->params.fw_caps_support = fw_caps; + } /* * Execute a VI Read command to get our Virtual Interface information @@ -365,9 +484,9 @@ int t4vf_port_init(struct adapter *adapter, int pidx) FW_CMD_READ_F); vi_cmd.alloc_to_len16 = cpu_to_be32(FW_LEN16(vi_cmd)); vi_cmd.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(pi->viid)); - v = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); - if (v) - return v; + ret = t4vf_wr_mbox(adapter, &vi_cmd, sizeof(vi_cmd), &vi_rpl); + if (ret != FW_SUCCESS) + return ret; BUG_ON(pi->port_id != FW_VI_CMD_PORTID_G(vi_rpl.portid_pkd)); pi->rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(vi_rpl.rsssize_pkd)); @@ -385,21 +504,42 @@ int t4vf_port_init(struct adapter *adapter, int pidx) FW_CMD_REQUEST_F | FW_CMD_READ_F | FW_PORT_CMD_PORTID_V(pi->port_id)); - port_cmd.action_to_len16 = - cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) | - FW_LEN16(port_cmd)); - v = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); - if (v) - return v; + port_cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(port_cmd)); + ret = t4vf_wr_mbox(adapter, &port_cmd, sizeof(port_cmd), &port_rpl); + if (ret != FW_SUCCESS) + return ret; - v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); - pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? - FW_PORT_CMD_MDIOADDR_G(v) : -1; - pi->port_type = FW_PORT_CMD_PTYPE_G(v); - pi->mod_type = FW_PORT_MOD_TYPE_NA; + /* Extract the various fields from the Port Information message. */ + if (fw_caps == FW_CAPS16) { + u32 lstatus = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); - init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap)); + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F) + ? FW_PORT_CMD_MDIOADDR_G(lstatus) + : -1); + pcaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(port_rpl.u.info.acap)); + } else { + u32 lstatus32 = + be32_to_cpu(port_rpl.u.info32.lstatus32_to_cbllen32); + + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F) + ? FW_PORT_CMD_MDIOADDR32_G(lstatus32) + : -1); + pcaps = be32_to_cpu(port_rpl.u.info32.pcaps32); + acaps = be32_to_cpu(port_rpl.u.info32.acaps32); + } + pi->port_type = port_type; + pi->mdio_addr = mdio_addr; + pi->mod_type = FW_PORT_MOD_TYPE_NA; + + init_link_config(&pi->link_cfg, pcaps, acaps); return 0; } @@ -1666,6 +1806,202 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid) return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } +/** + * t4vf_link_down_rc_str - return a string for a Link Down Reason Code + * @link_down_rc: Link Down Reason Code + * + * Returns a string representation of the Link Down Reason Code. + */ +const char *t4vf_link_down_rc_str(unsigned char link_down_rc) +{ + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + + if (link_down_rc >= ARRAY_SIZE(reason)) + return "Bad Reason Code"; + + return reason[link_down_rc]; +} + +/** + * t4vf_handle_get_port_info - process a FW reply message + * @pi: the port info + * @rpl: start of the FW message + * + * Processes a GET_PORT_INFO FW reply message. + */ +void t4vf_handle_get_port_info(struct port_info *pi, + const struct fw_port_cmd *cmd) +{ + int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); + struct adapter *adapter = pi->adapter; + struct link_config *lc = &pi->link_cfg; + int link_ok, linkdnrc; + enum fw_port_type port_type; + enum fw_port_module_type mod_type; + unsigned int speed, fc, fec; + fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; + + /* Extract the various fields from the Port Information message. */ + switch (action) { + case FW_PORT_ACTION_GET_PORT_INFO: { + u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); + + link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus); + port_type = FW_PORT_CMD_PTYPE_G(lstatus); + mod_type = FW_PORT_CMD_MODTYPE_G(lstatus); + pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap)); + acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap)); + lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap)); + + /* Unfortunately the format of the Link Status in the old + * 16-bit Port Information message isn't the same as the + * 16-bit Port Capabilities bitfield used everywhere else ... + */ + linkattr = 0; + if (lstatus & FW_PORT_CMD_RXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_RX; + if (lstatus & FW_PORT_CMD_TXPAUSE_F) + linkattr |= FW_PORT_CAP32_FC_TX; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) + linkattr |= FW_PORT_CAP32_SPEED_100M; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) + linkattr |= FW_PORT_CAP32_SPEED_1G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) + linkattr |= FW_PORT_CAP32_SPEED_10G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) + linkattr |= FW_PORT_CAP32_SPEED_25G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) + linkattr |= FW_PORT_CAP32_SPEED_40G; + if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) + linkattr |= FW_PORT_CAP32_SPEED_100G; + + break; + } + + case FW_PORT_ACTION_GET_PORT_INFO32: { + u32 lstatus32; + + lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32); + link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0; + linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32); + port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32); + mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32); + pcaps = be32_to_cpu(cmd->u.info32.pcaps32); + acaps = be32_to_cpu(cmd->u.info32.acaps32); + lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32); + linkattr = be32_to_cpu(cmd->u.info32.linkattr32); + break; + } + + default: + dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n", + be32_to_cpu(cmd->action_to_len16)); + return; + } + + fec = fwcap_to_cc_fec(acaps); + fc = fwcap_to_cc_pause(linkattr); + speed = fwcap_to_speed(linkattr); + + if (mod_type != pi->mod_type) { + /* When a new Transceiver Module is inserted, the Firmware + * will examine any Forward Error Correction parameters + * present in the Transceiver Module i2c EPROM and determine + * the supported and recommended FEC settings from those + * based on IEEE 802.3 standards. We always record the + * IEEE 802.3 recommended "automatic" settings. + */ + lc->auto_fec = fec; + + /* Some versions of the early T6 Firmware "cheated" when + * handling different Transceiver Modules by changing the + * underlaying Port Type reported to the Host Drivers. As + * such we need to capture whatever Port Type the Firmware + * sends us and record it in case it's different from what we + * were told earlier. Unfortunately, since Firmware is + * forever, we'll need to keep this code here forever, but in + * later T6 Firmware it should just be an assignment of the + * same value already recorded. + */ + pi->port_type = port_type; + + pi->mod_type = mod_type; + t4vf_os_portmod_changed(adapter, pi->pidx); + } + + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc || fec != lc->fec) { /* something changed */ + if (!link_ok && lc->link_ok) { + lc->link_down_rc = linkdnrc; + dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n", + pi->port_id, t4vf_link_down_rc_str(linkdnrc)); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->fec = fec; + + lc->pcaps = pcaps; + lc->lpacaps = lpacaps; + lc->acaps = acaps & ADVERT_MASK; + + if (lc->acaps & FW_PORT_CAP32_ANEG) { + lc->autoneg = AUTONEG_ENABLE; + } else { + /* When Autoneg is disabled, user needs to set + * single speed. + * Similar to cxgb4_ethtool.c: set_link_ksettings + */ + lc->acaps = 0; + lc->speed_caps = fwcap_to_speed(acaps); + lc->autoneg = AUTONEG_DISABLE; + } + + t4vf_os_link_changed(adapter, pi->pidx, link_ok); + } +} + +/** + * t4vf_update_port_info - retrieve and update port information if changed + * @pi: the port_info + * + * We issue a Get Port Information Command to the Firmware and, if + * successful, we check to see if anything is different from what we + * last recorded and update things accordingly. + */ +int t4vf_update_port_info(struct port_info *pi) +{ + unsigned int fw_caps = pi->adapter->params.fw_caps_support; + struct fw_port_cmd port_cmd; + int ret; + + memset(&port_cmd, 0, sizeof(port_cmd)); + port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) | + FW_CMD_REQUEST_F | FW_CMD_READ_F | + FW_PORT_CMD_PORTID_V(pi->port_id)); + port_cmd.action_to_len16 = cpu_to_be32( + FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16 + ? FW_PORT_ACTION_GET_PORT_INFO + : FW_PORT_ACTION_GET_PORT_INFO32) | + FW_LEN16(port_cmd)); + ret = t4vf_wr_mbox(pi->adapter, &port_cmd, sizeof(port_cmd), + &port_cmd); + if (ret) + return ret; + t4vf_handle_get_port_info(pi, &port_cmd); + return 0; +} + /** * t4vf_handle_fw_rpl - process a firmware reply message * @adapter: the adapter @@ -1685,15 +2021,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) */ const struct fw_port_cmd *port_cmd = (const struct fw_port_cmd *)rpl; - u32 stat, mod; - int action, port_id, link_ok, speed, fc, pidx; - - /* - * Extract various fields from port status change message. - */ - action = FW_PORT_CMD_ACTION_G( + int action = FW_PORT_CMD_ACTION_G( be32_to_cpu(port_cmd->action_to_len16)); - if (action != FW_PORT_ACTION_GET_PORT_INFO) { + int port_id, pidx; + + if (action != FW_PORT_ACTION_GET_PORT_INFO && + action != FW_PORT_ACTION_GET_PORT_INFO32) { dev_err(adapter->pdev_dev, "Unknown firmware PORT reply action %x\n", action); @@ -1702,61 +2035,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) port_id = FW_PORT_CMD_PORTID_G( be32_to_cpu(port_cmd->op_to_portid)); - - stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype); - link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0; - speed = 0; - fc = 0; - if (stat & FW_PORT_CMD_RXPAUSE_F) - fc |= PAUSE_RX; - if (stat & FW_PORT_CMD_TXPAUSE_F) - fc |= PAUSE_TX; - if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M)) - speed = 100; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G)) - speed = 1000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G)) - speed = 10000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G)) - speed = 25000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G)) - speed = 40000; - else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G)) - speed = 100000; - - /* - * Scan all of our "ports" (Virtual Interfaces) looking for - * those bound to the physical port which has changed. If - * our recorded state doesn't match the current state, - * signal that change to the OS code. - */ for_each_port(adapter, pidx) { struct port_info *pi = adap2pinfo(adapter, pidx); - struct link_config *lc; if (pi->port_id != port_id) continue; - - lc = &pi->link_cfg; - - mod = FW_PORT_CMD_MODTYPE_G(stat); - if (mod != pi->mod_type) { - pi->mod_type = mod; - t4vf_os_portmod_changed(adapter, pidx); - } - - if (link_ok != lc->link_ok || speed != lc->speed || - fc != lc->fc) { - /* something changed */ - lc->link_ok = link_ok; - lc->speed = speed; - lc->fc = fc; - lc->supported = - be16_to_cpu(port_cmd->u.info.pcap); - lc->lp_advertising = - be16_to_cpu(port_cmd->u.info.lpacap); - t4vf_os_link_changed(adapter, pidx, link_ok); - } + t4vf_handle_get_port_info(pi, port_cmd); } break; } -- cgit v1.2.3-55-g7522 From 138b57f0f893873badd86e33c2b1f7bbc0bab831 Mon Sep 17 00:00:00 2001 From: Christophe Jaillet Date: Sun, 20 Aug 2017 06:35:00 +0200 Subject: net: ibm: emac: Fix some error handling path in 'emac_probe()' If 'irq_of_parse_and_map()' or 'of_address_to_resource()' fail, 'err' is known to be 0 at this point. So return -ENODEV instead in the first case and use 'of_iomap()' instead of the equivalent 'of_address_to_resource()/ioremap()' combinaison in the 2nd case. Doing so, the 'rsrc_regs' field of the 'emac_instance struct' becomes redundant and is removed. While at it, turn a 'err != 0' test into an equivalent 'err' to be more consistent. Signed-off-by: Christophe JAILLET Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/emac/core.c | 12 ++++-------- drivers/net/ethernet/ibm/emac/core.h | 1 - 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 95135d20458f..7feff2450ed6 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -3032,7 +3032,7 @@ static int emac_probe(struct platform_device *ofdev) /* Init various config data based on device-tree */ err = emac_init_config(dev); - if (err != 0) + if (err) goto err_free; /* Get interrupts. EMAC irq is mandatory, WOL irq is optional */ @@ -3040,18 +3040,14 @@ static int emac_probe(struct platform_device *ofdev) dev->wol_irq = irq_of_parse_and_map(np, 1); if (!dev->emac_irq) { printk(KERN_ERR "%pOF: Can't map main interrupt\n", np); + err = -ENODEV; goto err_free; } ndev->irq = dev->emac_irq; /* Map EMAC regs */ - if (of_address_to_resource(np, 0, &dev->rsrc_regs)) { - printk(KERN_ERR "%pOF: Can't get registers address\n", np); - goto err_irq_unmap; - } - // TODO : request_mem_region - dev->emacp = ioremap(dev->rsrc_regs.start, - resource_size(&dev->rsrc_regs)); + // TODO : platform_get_resource() and devm_ioremap_resource() + dev->emacp = of_iomap(np, 0); if (dev->emacp == NULL) { printk(KERN_ERR "%pOF: Can't map device registers!\n", np); err = -ENOMEM; diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h index f10e156641d5..369de2cfb15b 100644 --- a/drivers/net/ethernet/ibm/emac/core.h +++ b/drivers/net/ethernet/ibm/emac/core.h @@ -167,7 +167,6 @@ struct emac_error_stats { struct emac_instance { struct net_device *ndev; - struct resource rsrc_regs; struct emac_regs __iomem *emacp; struct platform_device *ofdev; struct device_node **blist; /* bootlist entry */ -- cgit v1.2.3-55-g7522 From 6eb15e2130c939aec2edb08d84110c20ab3a16bf Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Sat, 19 Aug 2017 16:25:52 +0530 Subject: net: dsa: mv88e6xxx: make irq_chip const Make this const as it is only used in a copy operation. Done using Coccinelle. Signed-off-by: Bhumika Goyal Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 +- drivers/net/dsa/mv88e6xxx/global2.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 918d8f0fe091..c6678aa9b4ef 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -307,7 +307,7 @@ out: mutex_unlock(&chip->reg_lock); } -static struct irq_chip mv88e6xxx_g1_irq_chip = { +static const struct irq_chip mv88e6xxx_g1_irq_chip = { .name = "mv88e6xxx-g1", .irq_mask = mv88e6xxx_g1_irq_mask, .irq_unmask = mv88e6xxx_g1_irq_unmask, diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index 16f556261022..af0727877825 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1019,7 +1019,7 @@ static void mv88e6xxx_g2_irq_bus_sync_unlock(struct irq_data *d) mutex_unlock(&chip->reg_lock); } -static struct irq_chip mv88e6xxx_g2_irq_chip = { +static const struct irq_chip mv88e6xxx_g2_irq_chip = { .name = "mv88e6xxx-g2", .irq_mask = mv88e6xxx_g2_irq_mask, .irq_unmask = mv88e6xxx_g2_irq_unmask, -- cgit v1.2.3-55-g7522 From 0c45d7fe12c7e1510bae9dfac189c8b927e4636b Mon Sep 17 00:00:00 2001 From: Rick Farrington Date: Fri, 18 Aug 2017 18:21:49 -0700 Subject: liquidio: fix use of pf in pass-through mode in a virtual machine Fix problem when PF is used in pass-through mode in a VM (w/embedded f/w). If host error reading PF num from CN23XX_PCIE_SRIOV_FDL reg, try to retrieve PF num from SLI_PKT(0)_INPUT_CONTROL (initialized by f/w). Signed-off-by: Rick Farrington Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- .../ethernet/cavium/liquidio/cn23xx_pf_device.c | 47 +++++++++++++++++++--- drivers/net/ethernet/cavium/liquidio/lio_main.c | 2 + 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 4b0ca9fb2cb4..fbc0d4e008f3 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1150,14 +1150,50 @@ static void cn23xx_get_pcie_qlmport(struct octeon_device *oct) oct->pcie_port); } -static void cn23xx_get_pf_num(struct octeon_device *oct) +static int cn23xx_get_pf_num(struct octeon_device *oct) { u32 fdl_bit = 0; + u64 pkt0_in_ctl, d64; + int pfnum, mac, trs, ret; + + ret = 0; /** Read Function Dependency Link reg to get the function number */ - pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, &fdl_bit); - oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & - CN23XX_PCIE_SRIOV_FDL_MASK); + if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, + &fdl_bit) == 0) { + oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & + CN23XX_PCIE_SRIOV_FDL_MASK); + } else { + ret = EINVAL; + + /* Under some virtual environments, extended PCI regs are + * inaccessible, in which case the above read will have failed. + * In this case, read the PF number from the + * SLI_PKT0_INPUT_CONTROL reg (written by f/w) + */ + pkt0_in_ctl = octeon_read_csr64(oct, + CN23XX_SLI_IQ_PKT_CONTROL64(0)); + pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; + mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff; + + /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/ + d64 = octeon_read_csr64(oct, + CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum)); + trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff; + if (trs == 1) { + dev_err(&oct->pci_dev->dev, + "OCTEON: error reading PCI cfg space pfnum, re-read %u\n", + pfnum); + oct->pf_num = pfnum; + ret = 0; + } else { + dev_err(&oct->pci_dev->dev, + "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n"); + } + } + + return ret; } static void cn23xx_setup_reg_address(struct octeon_device *oct) @@ -1279,7 +1315,8 @@ int setup_cn23xx_octeon_pf_device(struct octeon_device *oct) return 1; } - cn23xx_get_pf_num(oct); + if (cn23xx_get_pf_num(oct) != 0) + return 1; if (cn23xx_sriov_config(oct)) { octeon_unmap_pci_barx(oct, 0); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 89d4bbc81707..c2360fe8cef2 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1560,6 +1560,8 @@ static int octeon_chip_specific_setup(struct octeon_device *oct) case OCTEON_CN23XX_PCIID_PF: oct->chip_id = OCTEON_CN23XX_PF_VID; ret = setup_cn23xx_octeon_pf_device(oct); + if (ret) + break; #ifdef CONFIG_PCI_IOV if (!ret) pci_sriov_set_totalvfs(oct->pci_dev, -- cgit v1.2.3-55-g7522 From da6817ebc353c54c5e74676418bfa77d13e64159 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Mon, 21 Aug 2017 17:13:10 +0530 Subject: qlogic: make device_attribute const Make these const as they are only passed as an argument to the function device_create_file and device_remove_file and the corresponding arguments are of type const. Done using Coccinelle Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 4 ++-- drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 827de838389f..f2e8de607119 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2828,7 +2828,7 @@ netxen_show_bridged_mode(struct device *dev, return sprintf(buf, "%d\n", bridged_mode); } -static struct device_attribute dev_attr_bridged_mode = { +static const struct device_attribute dev_attr_bridged_mode = { .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = netxen_show_bridged_mode, .store = netxen_store_bridged_mode, @@ -2860,7 +2860,7 @@ netxen_show_diag_mode(struct device *dev, !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)); } -static struct device_attribute dev_attr_diag_mode = { +static const struct device_attribute dev_attr_diag_mode = { .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = netxen_show_diag_mode, .store = netxen_store_diag_mode, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 82fcb83ea3c8..287d89dd086f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -1174,19 +1174,19 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp, return size; } -static struct device_attribute dev_attr_bridged_mode = { +static const struct device_attribute dev_attr_bridged_mode = { .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_bridged_mode, .store = qlcnic_store_bridged_mode, }; -static struct device_attribute dev_attr_diag_mode = { +static const struct device_attribute dev_attr_diag_mode = { .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_diag_mode, .store = qlcnic_store_diag_mode, }; -static struct device_attribute dev_attr_beacon = { +static const struct device_attribute dev_attr_beacon = { .attr = {.name = "beacon", .mode = (S_IRUGO | S_IWUSR)}, .show = qlcnic_show_beacon, .store = qlcnic_store_beacon, -- cgit v1.2.3-55-g7522 From 8d8d18c3283f0cf72f6ac41f337d5b6f818649b6 Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 18 Aug 2017 16:40:31 -0700 Subject: MIPS,bpf: Fix using smp_processor_id() in preemptible splat. If the kernel is configured with preemption enabled we were getting warning stack traces for use of current_cpu_type(). Fix by moving the test between preempt_disable()/preempt_enable() and caching the results of the CPU type tests for use during code generation. Signed-off-by: David Daney Signed-off-by: David S. Miller --- arch/mips/net/ebpf_jit.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 3f87b96da5c4..721216b0f8e4 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -113,6 +113,7 @@ struct jit_ctx { u64 *reg_val_types; unsigned int long_b_conversion:1; unsigned int gen_b_offsets:1; + unsigned int use_bbit_insns:1; }; static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type) @@ -655,19 +656,6 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx) return build_int_epilogue(ctx, MIPS_R_T9); } -static bool use_bbit_insns(void) -{ - switch (current_cpu_type()) { - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_CAVIUM_OCTEON3: - return true; - default: - return false; - } -} - static bool is_bad_offset(int b_off) { return b_off > 0x1ffff || b_off < -0x20000; @@ -1198,7 +1186,7 @@ jeq_common: if (dst < 0) return dst; - if (use_bbit_insns() && hweight32((u32)insn->imm) == 1) { + if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) { if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) @@ -1853,6 +1841,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) memset(&ctx, 0, sizeof(ctx)); + preempt_disable(); + switch (current_cpu_type()) { + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_CAVIUM_OCTEON3: + ctx.use_bbit_insns = 1; + default: + ctx.use_bbit_insns = 0; + } + preempt_enable(); + ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); if (ctx.offsets == NULL) goto out_err; -- cgit v1.2.3-55-g7522 From a67b375fdc5b21ae3a15761f9c5ef7bd352b5f7d Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 18 Aug 2017 16:40:32 -0700 Subject: MIPS, bpf: Implement JLT, JLE, JSLT and JSLE ops in the eBPF JIT. Signed-off-by: David Daney Signed-off-by: David S. Miller --- arch/mips/net/ebpf_jit.c | 101 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 29 deletions(-) diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 721216b0f8e4..c1e21cbdd336 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -990,8 +990,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, goto jeq_common; case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */ case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSET | BPF_X: @@ -1013,28 +1017,34 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, cmp_eq = false; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JSGT) { + } else if (BPF_OP(insn->code) == BPF_JSGT || BPF_OP(insn->code) == BPF_JSLE) { emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - emit_instr(ctx, blez, MIPS_R_AT, b_off); + if (BPF_OP(insn->code) == BPF_JSGT) + emit_instr(ctx, blez, MIPS_R_AT, b_off); + else + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); emit_instr(ctx, nop); return 2; /* We consumed the exit. */ } b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - emit_instr(ctx, bgtz, MIPS_R_AT, b_off); + if (BPF_OP(insn->code) == BPF_JSGT) + emit_instr(ctx, bgtz, MIPS_R_AT, b_off); + else + emit_instr(ctx, blez, MIPS_R_AT, b_off); emit_instr(ctx, nop); break; - } else if (BPF_OP(insn->code) == BPF_JSGE) { + } else if (BPF_OP(insn->code) == BPF_JSGE || BPF_OP(insn->code) == BPF_JSLT) { emit_instr(ctx, slt, MIPS_R_AT, dst, src); - cmp_eq = true; + cmp_eq = BPF_OP(insn->code) == BPF_JSGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGT) { + } else if (BPF_OP(insn->code) == BPF_JGT || BPF_OP(insn->code) == BPF_JLE) { /* dst or src could be AT */ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); emit_instr(ctx, sltu, MIPS_R_AT, dst, src); @@ -1042,12 +1052,12 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); - cmp_eq = true; + cmp_eq = BPF_OP(insn->code) == BPF_JGT; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGE) { + } else if (BPF_OP(insn->code) == BPF_JGE || BPF_OP(insn->code) == BPF_JLT) { emit_instr(ctx, sltu, MIPS_R_AT, dst, src); - cmp_eq = true; + cmp_eq = BPF_OP(insn->code) == BPF_JGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; } else { /* JNE/JEQ case */ @@ -1110,6 +1120,8 @@ jeq_common: break; case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ + case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */ + case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */ cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) @@ -1120,65 +1132,92 @@ jeq_common: b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (cmp_eq) - emit_instr(ctx, bltz, dst, b_off); - else + switch (BPF_OP(insn->code)) { + case BPF_JSGT: emit_instr(ctx, blez, dst, b_off); + break; + case BPF_JSGE: + emit_instr(ctx, bltz, dst, b_off); + break; + case BPF_JSLT: + emit_instr(ctx, bgez, dst, b_off); + break; + case BPF_JSLE: + emit_instr(ctx, bgtz, dst, b_off); + break; + } emit_instr(ctx, nop); return 2; /* We consumed the exit. */ } b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (cmp_eq) - emit_instr(ctx, bgez, dst, b_off); - else + switch (BPF_OP(insn->code)) { + case BPF_JSGT: emit_instr(ctx, bgtz, dst, b_off); + break; + case BPF_JSGE: + emit_instr(ctx, bgez, dst, b_off); + break; + case BPF_JSLT: + emit_instr(ctx, bltz, dst, b_off); + break; + case BPF_JSLE: + emit_instr(ctx, blez, dst, b_off); + break; + } emit_instr(ctx, nop); break; } /* * only "LT" compare available, so we must use imm + 1 - * to generate "GT" + * to generate "GT" and imm -1 to generate LE */ - t64s = insn->imm + (cmp_eq ? 0 : 1); + if (BPF_OP(insn->code) == BPF_JSGT) + t64s = insn->imm + 1; + else if (BPF_OP(insn->code) == BPF_JSLE) + t64s = insn->imm + 1; + else + t64s = insn->imm; + + cmp_eq = BPF_OP(insn->code) == BPF_JSGT || BPF_OP(insn->code) == BPF_JSGE; if (t64s >= S16_MIN && t64s <= S16_MAX) { emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; } emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: cmp_eq = (BPF_OP(insn->code) == BPF_JGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; /* * only "LT" compare available, so we must use imm + 1 - * to generate "GT" + * to generate "GT" and imm -1 to generate LE */ - t64s = (u64)(u32)(insn->imm) + (cmp_eq ? 0 : 1); - if (t64s >= 0 && t64s <= S16_MAX) { - emit_instr(ctx, sltiu, MIPS_R_AT, dst, (int)t64s); - src = MIPS_R_AT; - dst = MIPS_R_ZERO; - cmp_eq = true; - goto jeq_common; - } + if (BPF_OP(insn->code) == BPF_JGT) + t64s = (u64)(u32)(insn->imm) + 1; + else if (BPF_OP(insn->code) == BPF_JLE) + t64s = (u64)(u32)(insn->imm) + 1; + else + t64s = (u64)(u32)(insn->imm); + + cmp_eq = BPF_OP(insn->code) == BPF_JGT || BPF_OP(insn->code) == BPF_JGE; + emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); src = MIPS_R_AT; dst = MIPS_R_ZERO; - cmp_eq = true; goto jeq_common; case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */ @@ -1712,10 +1751,14 @@ static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt, case BPF_JEQ: case BPF_JGT: case BPF_JGE: + case BPF_JLT: + case BPF_JLE: case BPF_JSET: case BPF_JNE: case BPF_JSGT: case BPF_JSGE: + case BPF_JSLT: + case BPF_JSLE: if (follow_taken) { rvt[idx] |= RVT_BRANCH_TAKEN; idx += insn->off; -- cgit v1.2.3-55-g7522 From 6035b3faf3e58ee6eb423a45a1b7b7c3b4c8dc9f Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 18 Aug 2017 16:40:33 -0700 Subject: MIPS,bpf: Cache value of BPF_OP(insn->code) in eBPF JIT. The code looks a little cleaner if we replace BPF_OP(insn->code) with the local variable bpf_op. Caching the value this way also saves 300 bytes (about 1%) in the code size of the JIT. Signed-off-by: David Daney Signed-off-by: David S. Miller --- arch/mips/net/ebpf_jit.c | 67 ++++++++++++++++++++++++------------------------ 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index c1e21cbdd336..44ddc12cbb0e 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -670,6 +670,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, unsigned int target; u64 t64; s64 t64s; + int bpf_op = BPF_OP(insn->code); switch (insn->code) { case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */ @@ -758,13 +759,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, dst, dst, 0); if (insn->imm == 1) { /* div by 1 is a nop, mod by 1 is zero */ - if (BPF_OP(insn->code) == BPF_MOD) + if (bpf_op == BPF_MOD) emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); emit_instr(ctx, divu, dst, MIPS_R_AT); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -786,13 +787,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (insn->imm == 1) { /* div by 1 is a nop, mod by 1 is zero */ - if (BPF_OP(insn->code) == BPF_MOD) + if (bpf_op == BPF_MOD) emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO); break; } gen_imm_to_reg(insn, MIPS_R_AT, ctx); emit_instr(ctx, ddivu, dst, MIPS_R_AT); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -817,7 +818,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); did_move = false; if (insn->src_reg == BPF_REG_10) { - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK); did_move = true; } else { @@ -827,7 +828,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) { int tmp_reg = MIPS_R_AT; - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { tmp_reg = dst; did_move = true; } @@ -835,7 +836,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32); src = MIPS_R_AT; } - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_MOV: if (!did_move) emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO); @@ -867,7 +868,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, ddivu, dst, src); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -911,7 +912,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { int tmp_reg = MIPS_R_AT; - if (BPF_OP(insn->code) == BPF_MOV) { + if (bpf_op == BPF_MOV) { tmp_reg = dst; did_move = true; } @@ -919,7 +920,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, tmp_reg, src, 0); src = MIPS_R_AT; } - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_MOV: if (!did_move) emit_instr(ctx, addu, dst, src, MIPS_R_ZERO); @@ -950,7 +951,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, beq, src, MIPS_R_ZERO, b_off); emit_instr(ctx, movz, MIPS_R_V0, MIPS_R_ZERO, src); emit_instr(ctx, divu, dst, src); - if (BPF_OP(insn->code) == BPF_DIV) + if (bpf_op == BPF_DIV) emit_instr(ctx, mflo, dst); else emit_instr(ctx, mfhi, dst); @@ -977,7 +978,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, break; case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */ - cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + cmp_eq = (bpf_op == BPF_JEQ); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; @@ -1012,18 +1013,18 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, sll, MIPS_R_AT, dst, 0); dst = MIPS_R_AT; } - if (BPF_OP(insn->code) == BPF_JSET) { + if (bpf_op == BPF_JSET) { emit_instr(ctx, and, MIPS_R_AT, dst, src); cmp_eq = false; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JSGT || BPF_OP(insn->code) == BPF_JSLE) { + } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) { emit_instr(ctx, dsubu, MIPS_R_AT, dst, src); if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) { b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (BPF_OP(insn->code) == BPF_JSGT) + if (bpf_op == BPF_JSGT) emit_instr(ctx, blez, MIPS_R_AT, b_off); else emit_instr(ctx, bgtz, MIPS_R_AT, b_off); @@ -1033,18 +1034,18 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - if (BPF_OP(insn->code) == BPF_JSGT) + if (bpf_op == BPF_JSGT) emit_instr(ctx, bgtz, MIPS_R_AT, b_off); else emit_instr(ctx, blez, MIPS_R_AT, b_off); emit_instr(ctx, nop); break; - } else if (BPF_OP(insn->code) == BPF_JSGE || BPF_OP(insn->code) == BPF_JSLT) { + } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) { emit_instr(ctx, slt, MIPS_R_AT, dst, src); - cmp_eq = BPF_OP(insn->code) == BPF_JSGE; + cmp_eq = bpf_op == BPF_JSGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGT || BPF_OP(insn->code) == BPF_JLE) { + } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) { /* dst or src could be AT */ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src); emit_instr(ctx, sltu, MIPS_R_AT, dst, src); @@ -1052,16 +1053,16 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8); emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8); emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT); - cmp_eq = BPF_OP(insn->code) == BPF_JGT; + cmp_eq = bpf_op == BPF_JGT; dst = MIPS_R_AT; src = MIPS_R_ZERO; - } else if (BPF_OP(insn->code) == BPF_JGE || BPF_OP(insn->code) == BPF_JLT) { + } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) { emit_instr(ctx, sltu, MIPS_R_AT, dst, src); - cmp_eq = BPF_OP(insn->code) == BPF_JGE; + cmp_eq = bpf_op == BPF_JGE; dst = MIPS_R_AT; src = MIPS_R_ZERO; } else { /* JNE/JEQ case */ - cmp_eq = (BPF_OP(insn->code) == BPF_JEQ); + cmp_eq = (bpf_op == BPF_JEQ); } jeq_common: /* @@ -1122,7 +1123,7 @@ jeq_common: case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */ case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */ - cmp_eq = (BPF_OP(insn->code) == BPF_JSGE); + cmp_eq = (bpf_op == BPF_JSGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; @@ -1132,7 +1133,7 @@ jeq_common: b_off = b_imm(exit_idx, ctx); if (is_bad_offset(b_off)) return -E2BIG; - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_JSGT: emit_instr(ctx, blez, dst, b_off); break; @@ -1152,7 +1153,7 @@ jeq_common: b_off = b_imm(this_idx + insn->off + 1, ctx); if (is_bad_offset(b_off)) return -E2BIG; - switch (BPF_OP(insn->code)) { + switch (bpf_op) { case BPF_JSGT: emit_instr(ctx, bgtz, dst, b_off); break; @@ -1173,14 +1174,14 @@ jeq_common: * only "LT" compare available, so we must use imm + 1 * to generate "GT" and imm -1 to generate LE */ - if (BPF_OP(insn->code) == BPF_JSGT) + if (bpf_op == BPF_JSGT) t64s = insn->imm + 1; - else if (BPF_OP(insn->code) == BPF_JSLE) + else if (bpf_op == BPF_JSLE) t64s = insn->imm + 1; else t64s = insn->imm; - cmp_eq = BPF_OP(insn->code) == BPF_JSGT || BPF_OP(insn->code) == BPF_JSGE; + cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE; if (t64s >= S16_MIN && t64s <= S16_MAX) { emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s); src = MIPS_R_AT; @@ -1197,7 +1198,7 @@ jeq_common: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JLT | BPF_K: case BPF_JMP | BPF_JLE | BPF_K: - cmp_eq = (BPF_OP(insn->code) == BPF_JGE); + cmp_eq = (bpf_op == BPF_JGE); dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok); if (dst < 0) return dst; @@ -1205,14 +1206,14 @@ jeq_common: * only "LT" compare available, so we must use imm + 1 * to generate "GT" and imm -1 to generate LE */ - if (BPF_OP(insn->code) == BPF_JGT) + if (bpf_op == BPF_JGT) t64s = (u64)(u32)(insn->imm) + 1; - else if (BPF_OP(insn->code) == BPF_JLE) + else if (bpf_op == BPF_JLE) t64s = (u64)(u32)(insn->imm) + 1; else t64s = (u64)(u32)(insn->imm); - cmp_eq = BPF_OP(insn->code) == BPF_JGT || BPF_OP(insn->code) == BPF_JGE; + cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE; emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s); emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT); -- cgit v1.2.3-55-g7522 From 89e49506bc62520f93e64a278293444319a6aebb Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 17 Aug 2017 16:47:00 +0200 Subject: dsa: remove unused net_device arg from handlers compile tested only, but saw no warnings/errors with allmodconfig build. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller --- include/net/dsa.h | 6 ++---- net/dsa/dsa.c | 4 ++-- net/dsa/tag_brcm.c | 3 +-- net/dsa/tag_dsa.c | 3 +-- net/dsa/tag_edsa.c | 3 +-- net/dsa/tag_ksz.c | 3 +-- net/dsa/tag_lan9303.c | 2 +- net/dsa/tag_mtk.c | 3 +-- net/dsa/tag_qca.c | 3 +-- net/dsa/tag_trailer.c | 3 +-- 10 files changed, 12 insertions(+), 21 deletions(-) diff --git a/include/net/dsa.h b/include/net/dsa.h index 7f46b521313e..398ca8d70ccd 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -104,8 +104,7 @@ struct packet_type; struct dsa_device_ops { struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev); struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev); + struct packet_type *pt); int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto, int *offset); }; @@ -134,8 +133,7 @@ struct dsa_switch_tree { /* Copy of tag_ops->rcv for faster access in hot path */ struct sk_buff * (*rcv)(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev); + struct packet_type *pt); /* * The switch port to which the CPU is attached. diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 99e38af85fc5..03c58b0eb082 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -186,7 +186,7 @@ struct net_device *dsa_dev_to_net_device(struct device *dev) EXPORT_SYMBOL_GPL(dsa_dev_to_net_device); static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) + struct packet_type *pt, struct net_device *unused) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct sk_buff *nskb = NULL; @@ -202,7 +202,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb) return 0; - nskb = dst->rcv(skb, dev, pt, orig_dev); + nskb = dst->rcv(skb, dev, pt); if (!nskb) { kfree_skb(skb); return 0; diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index c697d9815177..de74c3f77818 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -89,8 +89,7 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev } static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) + struct packet_type *pt) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_port *cpu_dp = dsa_get_cpu_port(dst); diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index 12867a4b458f..fbf9ca954773 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -65,8 +65,7 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev) } static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) + struct packet_type *pt) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 67a9d26f9075..76367ba1b2e2 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -78,8 +78,7 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev) } static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) + struct packet_type *pt) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index de66ca8e6201..17f30675c15c 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -76,8 +76,7 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev) } static struct sk_buff *ksz_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) + struct packet_type *pt) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_port *cpu_dp = dsa_get_cpu_port(dst); diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c index e23e7635fa00..0b9826105e42 100644 --- a/net/dsa/tag_lan9303.c +++ b/net/dsa/tag_lan9303.c @@ -68,7 +68,7 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) } static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) + struct packet_type *pt) { u16 *lan9303_tag; struct dsa_switch_tree *dst = dev->dsa_ptr; diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c index 02163c045a96..ec8ee5f43255 100644 --- a/net/dsa/tag_mtk.c +++ b/net/dsa/tag_mtk.c @@ -44,8 +44,7 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, } static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) + struct packet_type *pt) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_switch *ds; diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index 1867a3d11f28..1d4c70711c0f 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -63,8 +63,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) } static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) + struct packet_type *pt) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_port *cpu_dp = dsa_get_cpu_port(dst); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index b09e56214005..8707157dea32 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -56,8 +56,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev) } static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, - struct net_device *orig_dev) + struct packet_type *pt) { struct dsa_switch_tree *dst = dev->dsa_ptr; struct dsa_port *cpu_dp = dsa_get_cpu_port(dst); -- cgit v1.2.3-55-g7522 From 4832c30d5458387ff2533ff66fbde26ad8bb5a2d Mon Sep 17 00:00:00 2001 From: David Ahern Date: Thu, 17 Aug 2017 12:17:20 -0700 Subject: net: ipv6: put host and anycast routes on device with address One nagging difference between ipv4 and ipv6 is host routes for ipv6 addresses are installed using the loopback device or VRF / L3 Master device. e.g., 2001:db8:1::/120 dev veth0 proto kernel metric 256 pref medium local 2001:db8:1::1 dev lo table local proto kernel metric 0 pref medium Using the loopback device is convenient -- necessary for local tx, but has some nasty side effects, most notably setting the 'lo' device down causes all host routes for all local IPv6 address to be removed from the FIB and completely breaks IPv6 networking across all interfaces. This patch puts FIB entries for IPv6 routes against the device. This simplifies the routes in the FIB, for example by making dst->dev and rt6i_idev->dev the same (a future patch can look at removing the device reference taken for rt6i_idev for FIB entries). When copies are made on FIB lookups, the cloned route has dst->dev set to loopback (or the L3 master device). This is needed for the local Tx of packets to local addresses. With fib entries allocated against the real network device, the addrconf code that reinserts host routes on admin up of 'lo' is no longer needed. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 42 ------------------------------------------ net/ipv6/icmp.c | 15 +++++++++++++-- net/ipv6/route.c | 46 ++++++++++++++++++++++++++++++++++------------ 3 files changed, 47 insertions(+), 56 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 640792e1ecb7..45d0a24644de 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3030,9 +3030,6 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) static void init_loopback(struct net_device *dev) { struct inet6_dev *idev; - struct net_device *sp_dev; - struct inet6_ifaddr *sp_ifa; - struct rt6_info *sp_rt; /* ::1 */ @@ -3045,45 +3042,6 @@ static void init_loopback(struct net_device *dev) } add_addr(idev, &in6addr_loopback, 128, IFA_HOST); - - /* Add routes to other interface's IPv6 addresses */ - for_each_netdev(dev_net(dev), sp_dev) { - if (!strcmp(sp_dev->name, dev->name)) - continue; - - idev = __in6_dev_get(sp_dev); - if (!idev) - continue; - - read_lock_bh(&idev->lock); - list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { - - if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) - continue; - - if (sp_ifa->rt) { - /* This dst has been added to garbage list when - * lo device down, release this obsolete dst and - * reallocate a new router for ifa. - */ - if (!sp_ifa->rt->rt6i_node) { - ip6_rt_put(sp_ifa->rt); - sp_ifa->rt = NULL; - } else { - continue; - } - } - - sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false); - - /* Failure cases are ignored */ - if (!IS_ERR(sp_rt)) { - sp_ifa->rt = sp_rt; - ip6_ins_rt(sp_rt); - } - } - read_unlock_bh(&idev->lock); - } } void addrconf_add_linklocal(struct inet6_dev *idev, diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 8d7b113958b1..4f82830fc068 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -459,9 +459,20 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, * Source addr check */ - if (__ipv6_addr_needs_scope_id(addr_type)) + if (__ipv6_addr_needs_scope_id(addr_type)) { iif = skb->dev->ifindex; - else { + + /* for local packets, get the real device index */ + if (iif == LOOPBACK_IFINDEX) { + dst = skb_dst(skb); + if (dst) { + struct rt6_info *rt; + + rt = container_of(dst, struct rt6_info, dst); + iif = rt->rt6i_idev->dev->ifindex; + } + } + } else { dst = skb_dst(skb); iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index bec12ae3e6b7..9b02064c3335 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -958,10 +958,34 @@ int ip6_ins_rt(struct rt6_info *rt) return __ip6_ins_rt(rt, &info, &mxc, NULL); } +/* called with rcu_lock held */ +static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) +{ + struct net_device *dev = rt->dst.dev; + + if (rt->rt6i_flags & RTF_LOCAL) { + /* for copies of local routes, dst->dev needs to be the + * device if it is a master device, the master device if + * device is enslaved, and the loopback as the default + */ + if (netif_is_l3_slave(dev) && + !rt6_need_strict(&rt->rt6i_dst.addr)) + dev = l3mdev_master_dev_rcu(dev); + else if (!netif_is_l3_master(dev)) + dev = dev_net(dev)->loopback_dev; + /* last case is netif_is_l3_master(dev) is true in which + * case we want dev returned to be dev + */ + } + + return dev; +} + static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, const struct in6_addr *daddr, const struct in6_addr *saddr) { + struct net_device *dev; struct rt6_info *rt; /* @@ -971,8 +995,10 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) ort = (struct rt6_info *)ort->dst.from; - rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0); - + rcu_read_lock(); + dev = ip6_rt_get_dev_rcu(ort); + rt = __ip6_dst_alloc(dev_net(dev), dev, 0); + rcu_read_unlock(); if (!rt) return NULL; @@ -1000,11 +1026,13 @@ static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) { + struct net_device *dev; struct rt6_info *pcpu_rt; - pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), - rt->dst.dev, rt->dst.flags); - + rcu_read_lock(); + dev = ip6_rt_get_dev_rcu(rt); + pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags); + rcu_read_unlock(); if (!pcpu_rt) return NULL; ip6_rt_copy_init(pcpu_rt, rt); @@ -2688,15 +2716,9 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, { u32 tb_id; struct net *net = dev_net(idev->dev); - struct net_device *dev = net->loopback_dev; + struct net_device *dev = idev->dev; struct rt6_info *rt; - /* use L3 Master device as loopback for host routes if device - * is enslaved and address is not link local or multicast - */ - if (!rt6_need_strict(addr)) - dev = l3mdev_master_dev_rcu(idev->dev) ? : dev; - rt = ip6_dst_alloc(net, dev, DST_NOCOUNT); if (!rt) return ERR_PTR(-ENOMEM); -- cgit v1.2.3-55-g7522 From e65a4955b0bb70ab66e2fbfd5509747fe51d8bf9 Mon Sep 17 00:00:00 2001 From: David Lamparter Date: Fri, 18 Aug 2017 14:31:35 +0200 Subject: net: check type when freeing metadata dst Commit 3fcece12bc1b ("net: store port/representator id in metadata_dst") added a new type field to metadata_dst, but metadata_dst_free() wasn't updated to check it before freeing the METADATA_IP_TUNNEL specific dst cache entry. This is not currently causing problems since it's far enough back in the struct to be zeroed for the only other type currently in existance (METADATA_HW_PORT_MUX), but nevertheless it's not correct. Fixes: 3fcece12bc1b ("net: store port/representator id in metadata_dst") Signed-off-by: David Lamparter Cc: Jakub Kicinski Cc: Sridhar Samudrala Cc: Simon Horman Cc: David S. Miller Signed-off-by: David S. Miller --- net/core/dst.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/core/dst.c b/net/core/dst.c index d6ead757c258..a6c47da7d0f8 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(metadata_dst_alloc); void metadata_dst_free(struct metadata_dst *md_dst) { #ifdef CONFIG_DST_CACHE - dst_cache_destroy(&md_dst->u.tun_info.dst_cache); + if (md_dst->type == METADATA_IP_TUNNEL) + dst_cache_destroy(&md_dst->u.tun_info.dst_cache); #endif kfree(md_dst); } -- cgit v1.2.3-55-g7522 From 40501f90ed5d992176ba504910d512d9dd1b2668 Mon Sep 17 00:00:00 2001 From: Jon Paul Maloy Date: Mon, 21 Aug 2017 17:59:30 +0200 Subject: tipc: don't reset stale broadcast send link When the broadcast send link after 100 attempts has failed to transfer a packet to all peers, we consider it stale, and reset it. Thereafter it needs to re-synchronize with the peers, something currently done by just resetting and re-establishing all links to all peers. This has turned out to be overkill, with potentially unwanted consequences for the remaining cluster. A closer analysis reveals that this can be done much simpler. When this kind of failure happens, for reasons that may lie outside the TIPC protocol, it is typically only one peer which is failing to receive and acknowledge packets. It is hence sufficient to identify and reset the links only to that peer to resolve the situation, without having to reset the broadcast link at all. This solution entails a much lower risk of negative consequences for the own node as well as for the overall cluster. We implement this change in this commit. Reviewed-by: Parthasarathy Bhuvaragan Acked-by: Ying Xue Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/bearer.c | 24 ------------------------ net/tipc/bearer.h | 1 - net/tipc/link.c | 23 +++++++++++++---------- net/tipc/node.c | 14 ++++---------- 4 files changed, 17 insertions(+), 45 deletions(-) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 767e0537dde5..d49598f6002b 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -365,30 +365,6 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b) return 0; } -/* tipc_bearer_reset_all - reset all links on all bearers - */ -void tipc_bearer_reset_all(struct net *net) -{ - struct tipc_bearer *b; - int i; - - for (i = 0; i < MAX_BEARERS; i++) { - b = bearer_get(net, i); - if (b) - clear_bit_unlock(0, &b->up); - } - for (i = 0; i < MAX_BEARERS; i++) { - b = bearer_get(net, i); - if (b) - tipc_reset_bearer(net, b); - } - for (i = 0; i < MAX_BEARERS; i++) { - b = bearer_get(net, i); - if (b) - test_and_set_bit_lock(0, &b->up); - } -} - /** * bearer_disable * diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 635c9086e19a..865cb0901a20 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -210,7 +210,6 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest); struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name); int tipc_bearer_get_name(struct net *net, char *name, u32 bearer_id); struct tipc_media *tipc_media_find(const char *name); -void tipc_bearer_reset_all(struct net *net); int tipc_bearer_setup(void); void tipc_bearer_cleanup(void); void tipc_bearer_stop(struct net *net); diff --git a/net/tipc/link.c b/net/tipc/link.c index 60820dc35a08..ac0144f532aa 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -978,15 +978,15 @@ static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb) struct tipc_msg *hdr = buf_msg(skb); pr_warn("Retransmission failure on link <%s>\n", l->name); - link_print(l, "Resetting link "); + link_print(l, "State of link "); pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n", msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr)); pr_info("sqno %u, prev: %x, src: %x\n", msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr)); } -int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to, - struct sk_buff_head *xmitq) +int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker, + u16 from, u16 to, struct sk_buff_head *xmitq) { struct sk_buff *_skb, *skb = skb_peek(&l->transmq); struct tipc_msg *hdr; @@ -997,11 +997,14 @@ int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to, return 0; /* Detect repeated retransmit failures on same packet */ - if (likely(l->last_retransm != buf_seqno(skb))) { - l->last_retransm = buf_seqno(skb); - l->stale_count = 1; - } else if (++l->stale_count > 100) { + if (nacker->last_retransm != buf_seqno(skb)) { + nacker->last_retransm = buf_seqno(skb); + nacker->stale_count = 1; + } else if (++nacker->stale_count > 100) { link_retransmit_failure(l, skb); + nacker->stale_count = 0; + if (link_is_bc_sndlink(l)) + return TIPC_LINK_DOWN_EVT; return tipc_link_fsm_evt(l, LINK_FAILURE_EVT); } @@ -1528,7 +1531,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, /* If NACK, retransmit will now start at right position */ if (gap) { - rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq); + rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq); l->stats.recv_nacks++; } @@ -1680,7 +1683,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, return rc; if (link_bc_retr_eval(snd_l, &from, &to)) - rc = tipc_link_retrans(snd_l, from, to, xmitq); + rc = tipc_link_retrans(snd_l, l, from, to, xmitq); l->snd_nxt = peers_snd_nxt; if (link_bc_rcv_gap(l)) @@ -1775,7 +1778,7 @@ int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb, if (dnode == tipc_own_addr(l->net)) { tipc_link_bc_ack_rcv(l, acked, xmitq); - rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq); + rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq); l->stats.recv_nacks++; return rc; } diff --git a/net/tipc/node.c b/net/tipc/node.c index 9b4dcb6a16b5..eb728397c810 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1284,7 +1284,7 @@ static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr, rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr); if (rc & TIPC_LINK_DOWN_EVT) { - tipc_bearer_reset_all(n->net); + tipc_node_reset_links(n); return; } @@ -1351,15 +1351,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id if (!skb_queue_empty(&be->inputq1)) tipc_node_mcast_rcv(n); - if (rc & TIPC_LINK_DOWN_EVT) { - /* Reception reassembly failure => reset all links to peer */ - if (!tipc_link_is_up(be->link)) - tipc_node_reset_links(n); - - /* Retransmission failure => reset all links to all peers */ - if (!tipc_link_is_up(tipc_bc_sndlink(net))) - tipc_bearer_reset_all(net); - } + /* If reassembly or retransmission failure => reset all links to peer */ + if (rc & TIPC_LINK_DOWN_EVT) + tipc_node_reset_links(n); tipc_node_put(n); } -- cgit v1.2.3-55-g7522 From 7d3f0cd43feea1636dd7746f22fe8249b34d1b79 Mon Sep 17 00:00:00 2001 From: Gao Feng Date: Fri, 18 Aug 2017 15:23:24 +0800 Subject: net: sched: Add the invalid handle check in qdisc_class_find Add the invalid handle "0" check to avoid unnecessary search, because the qdisc uses the skb->priority as the handle value to look up, and it is "0" usually. Signed-off-by: Gao Feng Signed-off-by: David S. Miller --- include/net/sch_generic.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 5865db91976b..107c52432245 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -393,6 +393,9 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id) struct Qdisc_class_common *cl; unsigned int h; + if (!id) + return NULL; + h = qdisc_class_hash(id, hash->hashmask); hlist_for_each_entry(cl, &hash->hash[h], hnode) { if (cl->classid == id) -- cgit v1.2.3-55-g7522 From 39c13c204bb1150d401e27d41a9d8b332be47c49 Mon Sep 17 00:00:00 2001 From: Shubham Bansal Date: Tue, 22 Aug 2017 12:02:33 +0530 Subject: arm: eBPF JIT compiler The JIT compiler emits ARM 32 bit instructions. Currently, It supports eBPF only. Classic BPF is supported because of the conversion by BPF core. This patch is essentially changing the current implementation of JIT compiler of Berkeley Packet Filter from classic to internal with almost all instructions from eBPF ISA supported except the following BPF_ALU64 | BPF_DIV | BPF_K BPF_ALU64 | BPF_DIV | BPF_X BPF_ALU64 | BPF_MOD | BPF_K BPF_ALU64 | BPF_MOD | BPF_X BPF_STX | BPF_XADD | BPF_W BPF_STX | BPF_XADD | BPF_DW Implementation is using scratch space to emulate 64 bit eBPF ISA on 32 bit ARM because of deficiency of general purpose registers on ARM. Currently, only LITTLE ENDIAN machines are supported in this eBPF JIT Compiler. Tested on ARMv7 with QEMU by me (Shubham Bansal). Testing results on ARMv7: 1) test_bpf: Summary: 341 PASSED, 0 FAILED, [312/333 JIT'ed] 2) test_tag: OK (40945 tests) 3) test_progs: Summary: 30 PASSED, 0 FAILED 4) test_lpm: OK 5) test_lru_map: OK Above tests are all done with following flags enabled discreatly. 1) bpf_jit_enable=1 a) CONFIG_FRAME_POINTER enabled b) CONFIG_FRAME_POINTER disabled 2) bpf_jit_enable=1 and bpf_jit_harden=2 a) CONFIG_FRAME_POINTER enabled b) CONFIG_FRAME_POINTER disabled See Documentation/networking/filter.txt for more information. Signed-off-by: Shubham Bansal Signed-off-by: David S. Miller --- arch/arm/Kconfig | 2 +- arch/arm/net/bpf_jit_32.c | 2448 ++++++++++++++++++++++++++++++--------------- arch/arm/net/bpf_jit_32.h | 108 +- 3 files changed, 1747 insertions(+), 811 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 61a0cb15067e..f1b3f1d575d4 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -50,7 +50,7 @@ config ARM select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) select HAVE_ARCH_TRACEHOOK select HAVE_ARM_SMCCC if CPU_V7 - select HAVE_CBPF_JIT + select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CC_STACKPROTECTOR select HAVE_CONTEXT_TRACKING select HAVE_C_RECORDMCOUNT diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index d5b9fa19b684..c199990e12b6 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -1,6 +1,7 @@ /* - * Just-In-Time compiler for BPF filters on 32bit ARM + * Just-In-Time compiler for eBPF filters on 32bit ARM * + * Copyright (c) 2017 Shubham Bansal * Copyright (c) 2011 Mircea Gherzan * * This program is free software; you can redistribute it and/or modify it @@ -8,6 +9,7 @@ * Free Software Foundation; version 2 of the License. */ +#include #include #include #include @@ -18,54 +20,101 @@ #include #include -#include #include #include #include "bpf_jit_32.h" +int bpf_jit_enable __read_mostly; + +#define STACK_OFFSET(k) (k) +#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ +#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ +#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ + +/* Flags used for JIT optimization */ +#define SEEN_CALL (1 << 0) + +#define FLAG_IMM_OVERFLOW (1 << 0) + /* - * ABI: + * Map eBPF registers to ARM 32bit registers or stack scratch space. + * + * 1. First argument is passed using the arm 32bit registers and rest of the + * arguments are passed on stack scratch space. + * 2. First callee-saved arugument is mapped to arm 32 bit registers and rest + * arguments are mapped to scratch space on stack. + * 3. We need two 64 bit temp registers to do complex operations on eBPF + * registers. + * + * As the eBPF registers are all 64 bit registers and arm has only 32 bit + * registers, we have to map each eBPF registers with two arm 32 bit regs or + * scratch memory space and we have to build eBPF 64 bit register from those. * - * r0 scratch register - * r4 BPF register A - * r5 BPF register X - * r6 pointer to the skb - * r7 skb->data - * r8 skb_headlen(skb) */ +static const u8 bpf2a32[][2] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = {ARM_R1, ARM_R0}, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = {ARM_R3, ARM_R2}, + /* Stored on stack scratch space */ + [BPF_REG_2] = {STACK_OFFSET(0), STACK_OFFSET(4)}, + [BPF_REG_3] = {STACK_OFFSET(8), STACK_OFFSET(12)}, + [BPF_REG_4] = {STACK_OFFSET(16), STACK_OFFSET(20)}, + [BPF_REG_5] = {STACK_OFFSET(24), STACK_OFFSET(28)}, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = {ARM_R5, ARM_R4}, + /* Stored on stack scratch space */ + [BPF_REG_7] = {STACK_OFFSET(32), STACK_OFFSET(36)}, + [BPF_REG_8] = {STACK_OFFSET(40), STACK_OFFSET(44)}, + [BPF_REG_9] = {STACK_OFFSET(48), STACK_OFFSET(52)}, + /* Read only Frame Pointer to access Stack */ + [BPF_REG_FP] = {STACK_OFFSET(56), STACK_OFFSET(60)}, + /* Temporary Register for internal BPF JIT, can be used + * for constant blindings and others. + */ + [TMP_REG_1] = {ARM_R7, ARM_R6}, + [TMP_REG_2] = {ARM_R10, ARM_R8}, + /* Tail call count. Stored on stack scratch space. */ + [TCALL_CNT] = {STACK_OFFSET(64), STACK_OFFSET(68)}, + /* temporary register for blinding constants. + * Stored on stack scratch space. + */ + [BPF_REG_AX] = {STACK_OFFSET(72), STACK_OFFSET(76)}, +}; -#define r_scratch ARM_R0 -/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ -#define r_off ARM_R1 -#define r_A ARM_R4 -#define r_X ARM_R5 -#define r_skb ARM_R6 -#define r_skb_data ARM_R7 -#define r_skb_hl ARM_R8 - -#define SCRATCH_SP_OFFSET 0 -#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k)) - -#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) -#define SEEN_MEM_WORD(k) (1 << (k)) -#define SEEN_X (1 << BPF_MEMWORDS) -#define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) -#define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) -#define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) +#define dst_lo dst[1] +#define dst_hi dst[0] +#define src_lo src[1] +#define src_hi src[0] -#define FLAG_NEED_X_RESET (1 << 0) -#define FLAG_IMM_OVERFLOW (1 << 1) +/* + * JIT Context: + * + * prog : bpf_prog + * idx : index of current last JITed instruction. + * prologue_bytes : bytes used in prologue. + * epilogue_offset : offset of epilogue starting. + * seen : bit mask used for JIT optimization. + * offsets : array of eBPF instruction offsets in + * JITed code. + * target : final JITed code. + * epilogue_bytes : no of bytes used in epilogue. + * imm_count : no of immediate counts used for global + * variables. + * imms : array of global variable addresses. + */ struct jit_ctx { - const struct bpf_prog *skf; - unsigned idx; - unsigned prologue_bytes; - int ret0_fp_idx; + const struct bpf_prog *prog; + unsigned int idx; + unsigned int prologue_bytes; + unsigned int epilogue_offset; u32 seen; u32 flags; u32 *offsets; u32 *target; + u32 stack_size; #if __LINUX_ARM_ARCH__ < 7 u16 epilogue_bytes; u16 imm_count; @@ -73,68 +122,16 @@ struct jit_ctx { #endif }; -int bpf_jit_enable __read_mostly; - -static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, - unsigned int size) -{ - void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); - - if (!ptr) - return -EFAULT; - memcpy(ret, ptr, size); - return 0; -} - -static u64 jit_get_skb_b(struct sk_buff *skb, int offset) -{ - u8 ret; - int err; - - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 1); - else - err = skb_copy_bits(skb, offset, &ret, 1); - - return (u64)err << 32 | ret; -} - -static u64 jit_get_skb_h(struct sk_buff *skb, int offset) -{ - u16 ret; - int err; - - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 2); - else - err = skb_copy_bits(skb, offset, &ret, 2); - - return (u64)err << 32 | ntohs(ret); -} - -static u64 jit_get_skb_w(struct sk_buff *skb, int offset) -{ - u32 ret; - int err; - - if (offset < 0) - err = call_neg_helper(skb, offset, &ret, 4); - else - err = skb_copy_bits(skb, offset, &ret, 4); - - return (u64)err << 32 | ntohl(ret); -} - /* * Wrappers which handle both OABI and EABI and assures Thumb2 interworking * (where the assembly routines like __aeabi_uidiv could cause problems). */ -static u32 jit_udiv(u32 dividend, u32 divisor) +static u32 jit_udiv32(u32 dividend, u32 divisor) { return dividend / divisor; } -static u32 jit_mod(u32 dividend, u32 divisor) +static u32 jit_mod32(u32 dividend, u32 divisor) { return dividend % divisor; } @@ -158,36 +155,22 @@ static inline void emit(u32 inst, struct jit_ctx *ctx) _emit(ARM_COND_AL, inst, ctx); } -static u16 saved_regs(struct jit_ctx *ctx) +/* + * Checks if immediate value can be converted to imm12(12 bits) value. + */ +static int16_t imm8m(u32 x) { - u16 ret = 0; - - if ((ctx->skf->len > 1) || - (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) - ret |= 1 << r_A; - -#ifdef CONFIG_FRAME_POINTER - ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); -#else - if (ctx->seen & SEEN_CALL) - ret |= 1 << ARM_LR; -#endif - if (ctx->seen & (SEEN_DATA | SEEN_SKB)) - ret |= 1 << r_skb; - if (ctx->seen & SEEN_DATA) - ret |= (1 << r_skb_data) | (1 << r_skb_hl); - if (ctx->seen & SEEN_X) - ret |= 1 << r_X; - - return ret; -} + u32 rot; -static inline int mem_words_used(struct jit_ctx *ctx) -{ - /* yes, we do waste some stack space IF there are "holes" in the set" */ - return fls(ctx->seen & SEEN_MEM); + for (rot = 0; rot < 16; rot++) + if ((x & ~ror32(0xff, 2 * rot)) == 0) + return rol32(x, 2 * rot) | (rot << 8); + return -1; } +/* + * Initializes the JIT space with undefined instructions. + */ static void jit_fill_hole(void *area, unsigned int size) { u32 *ptr; @@ -196,88 +179,34 @@ static void jit_fill_hole(void *area, unsigned int size) *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); } -static void build_prologue(struct jit_ctx *ctx) -{ - u16 reg_set = saved_regs(ctx); - u16 off; - -#ifdef CONFIG_FRAME_POINTER - emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); - emit(ARM_PUSH(reg_set), ctx); - emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); -#else - if (reg_set) - emit(ARM_PUSH(reg_set), ctx); -#endif - - if (ctx->seen & (SEEN_DATA | SEEN_SKB)) - emit(ARM_MOV_R(r_skb, ARM_R0), ctx); - - if (ctx->seen & SEEN_DATA) { - off = offsetof(struct sk_buff, data); - emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); - /* headlen = len - data_len */ - off = offsetof(struct sk_buff, len); - emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); - off = offsetof(struct sk_buff, data_len); - emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); - emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); - } - - if (ctx->flags & FLAG_NEED_X_RESET) - emit(ARM_MOV_I(r_X, 0), ctx); - - /* do not leak kernel data to userspace */ - if (bpf_needs_clear_a(&ctx->skf->insns[0])) - emit(ARM_MOV_I(r_A, 0), ctx); - - /* stack space for the BPF_MEM words */ - if (ctx->seen & SEEN_MEM) - emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); -} - -static void build_epilogue(struct jit_ctx *ctx) -{ - u16 reg_set = saved_regs(ctx); - - if (ctx->seen & SEEN_MEM) - emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); - - reg_set &= ~(1 << ARM_LR); +/* Stack must be multiples of 16 Bytes */ +#define STACK_ALIGN(sz) (((sz) + 3) & ~3) -#ifdef CONFIG_FRAME_POINTER - /* the first instruction of the prologue was: mov ip, sp */ - reg_set &= ~(1 << ARM_IP); - reg_set |= (1 << ARM_SP); - emit(ARM_LDM(ARM_SP, reg_set), ctx); -#else - if (reg_set) { - if (ctx->seen & SEEN_CALL) - reg_set |= 1 << ARM_PC; - emit(ARM_POP(reg_set), ctx); - } +/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, + * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, + * BPF_REG_FP and Tail call counts. + */ +#define SCRATCH_SIZE 80 - if (!(ctx->seen & SEEN_CALL)) - emit(ARM_BX(ARM_LR), ctx); -#endif -} +/* total stack size used in JITed code */ +#define _STACK_SIZE \ + (ctx->prog->aux->stack_depth + \ + + SCRATCH_SIZE + \ + + 4 /* extra for skb_copy_bits buffer */) -static int16_t imm8m(u32 x) -{ - u32 rot; +#define STACK_SIZE STACK_ALIGN(_STACK_SIZE) - for (rot = 0; rot < 16; rot++) - if ((x & ~ror32(0xff, 2 * rot)) == 0) - return rol32(x, 2 * rot) | (rot << 8); +/* Get the offset of eBPF REGISTERs stored on scratch space. */ +#define STACK_VAR(off) (STACK_SIZE-off-4) - return -1; -} +/* Offset of skb_copy_bits buffer */ +#define SKB_BUFFER STACK_VAR(SCRATCH_SIZE) #if __LINUX_ARM_ARCH__ < 7 static u16 imm_offset(u32 k, struct jit_ctx *ctx) { - unsigned i = 0, offset; + unsigned int i = 0, offset; u16 imm; /* on the "fake" run we just count them (duplicates included) */ @@ -296,7 +225,7 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx) ctx->imms[i] = k; /* constants go just after the epilogue */ - offset = ctx->offsets[ctx->skf->len]; + offset = ctx->offsets[ctx->prog->len - 1] * 4; offset += ctx->prologue_bytes; offset += ctx->epilogue_bytes; offset += i * 4; @@ -320,10 +249,22 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx) #endif /* __LINUX_ARM_ARCH__ */ +static inline int bpf2a32_offset(int bpf_to, int bpf_from, + const struct jit_ctx *ctx) { + int to, from; + + if (ctx->target == NULL) + return 0; + to = ctx->offsets[bpf_to]; + from = ctx->offsets[bpf_from]; + + return to - from - 1; +} + /* * Move an immediate that's not an imm8m to a core register. */ -static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) +static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx) { #if __LINUX_ARM_ARCH__ < 7 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); @@ -334,7 +275,7 @@ static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) #endif } -static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) +static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) { int imm12 = imm8m(val); @@ -344,676 +285,1594 @@ static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) emit_mov_i_no8m(rd, val, ctx); } -#if __LINUX_ARM_ARCH__ < 6 - -static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) +static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) { - _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); - _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); - _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); - _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); - _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); - _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); - _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); - _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); + ctx->seen |= SEEN_CALL; +#if __LINUX_ARM_ARCH__ < 5 + emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); + + if (elf_hwcap & HWCAP_THUMB) + emit(ARM_BX(tgt_reg), ctx); + else + emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); +#else + emit(ARM_BLX_R(tgt_reg), ctx); +#endif } -static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) +static inline int epilogue_offset(const struct jit_ctx *ctx) { - _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); - _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); - _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); + int to, from; + /* No need for 1st dummy run */ + if (ctx->target == NULL) + return 0; + to = ctx->epilogue_offset; + from = ctx->idx; + + return to - from - 2; } -static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) +static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) { - /* r_dst = (r_src << 8) | (r_src >> 8) */ - emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); - emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); + const u8 *tmp = bpf2a32[TMP_REG_1]; + s32 jmp_offset; + + /* checks if divisor is zero or not. If it is, then + * exit directly. + */ + emit(ARM_CMP_I(rn, 0), ctx); + _emit(ARM_COND_EQ, ARM_MOV_I(ARM_R0, 0), ctx); + jmp_offset = epilogue_offset(ctx); + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); +#if __LINUX_ARM_ARCH__ == 7 + if (elf_hwcap & HWCAP_IDIVA) { + if (op == BPF_DIV) + emit(ARM_UDIV(rd, rm, rn), ctx); + else { + emit(ARM_UDIV(ARM_IP, rm, rn), ctx); + emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx); + } + return; + } +#endif /* - * we need to mask out the bits set in r_dst[23:16] due to - * the first shift instruction. - * - * note that 0x8ff is the encoded immediate 0x00ff0000. + * For BPF_ALU | BPF_DIV | BPF_K instructions + * As ARM_R1 and ARM_R0 contains 1st argument of bpf + * function, we need to save it on caller side to save + * it from getting destroyed within callee. + * After the return from the callee, we restore ARM_R0 + * ARM_R1. */ - emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); -} + if (rn != ARM_R1) { + emit(ARM_MOV_R(tmp[0], ARM_R1), ctx); + emit(ARM_MOV_R(ARM_R1, rn), ctx); + } + if (rm != ARM_R0) { + emit(ARM_MOV_R(tmp[1], ARM_R0), ctx); + emit(ARM_MOV_R(ARM_R0, rm), ctx); + } -#else /* ARMv6+ */ + /* Call appropriate function */ + ctx->seen |= SEEN_CALL; + emit_mov_i(ARM_IP, op == BPF_DIV ? + (u32)jit_udiv32 : (u32)jit_mod32, ctx); + emit_blx_r(ARM_IP, ctx); -static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) -{ - _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); -#ifdef __LITTLE_ENDIAN - _emit(cond, ARM_REV(r_res, r_res), ctx); -#endif + /* Save return value */ + if (rd != ARM_R0) + emit(ARM_MOV_R(rd, ARM_R0), ctx); + + /* Restore ARM_R0 and ARM_R1 */ + if (rn != ARM_R1) + emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx); + if (rm != ARM_R0) + emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx); } -static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) +/* Checks whether BPF register is on scratch stack space or not. */ +static inline bool is_on_stack(u8 bpf_reg) { - _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); -#ifdef __LITTLE_ENDIAN - _emit(cond, ARM_REV16(r_res, r_res), ctx); -#endif + static u8 stack_regs[] = {BPF_REG_AX, BPF_REG_3, BPF_REG_4, BPF_REG_5, + BPF_REG_7, BPF_REG_8, BPF_REG_9, TCALL_CNT, + BPF_REG_2, BPF_REG_FP}; + int i, reg_len = sizeof(stack_regs); + + for (i = 0 ; i < reg_len ; i++) { + if (bpf_reg == stack_regs[i]) + return true; + } + return false; } -static inline void emit_swap16(u8 r_dst __maybe_unused, - u8 r_src __maybe_unused, - struct jit_ctx *ctx __maybe_unused) +static inline void emit_a32_mov_i(const u8 dst, const u32 val, + bool dstk, struct jit_ctx *ctx) { -#ifdef __LITTLE_ENDIAN - emit(ARM_REV16(r_dst, r_src), ctx); -#endif + const u8 *tmp = bpf2a32[TMP_REG_1]; + + if (dstk) { + emit_mov_i(tmp[1], val, ctx); + emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(dst)), ctx); + } else { + emit_mov_i(dst, val, ctx); + } } -#endif /* __LINUX_ARM_ARCH__ < 6 */ +/* Sign extended move */ +static inline void emit_a32_mov_i64(const bool is64, const u8 dst[], + const u32 val, bool dstk, + struct jit_ctx *ctx) { + u32 hi = 0; + if (is64 && (val & (1<<31))) + hi = (u32)~0; + emit_a32_mov_i(dst_lo, val, dstk, ctx); + emit_a32_mov_i(dst_hi, hi, dstk, ctx); +} -/* Compute the immediate value for a PC-relative branch. */ -static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) -{ - u32 imm; +static inline void emit_a32_add_r(const u8 dst, const u8 src, + const bool is64, const bool hi, + struct jit_ctx *ctx) { + /* 64 bit : + * adds dst_lo, dst_lo, src_lo + * adc dst_hi, dst_hi, src_hi + * 32 bit : + * add dst_lo, dst_lo, src_lo + */ + if (!hi && is64) + emit(ARM_ADDS_R(dst, dst, src), ctx); + else if (hi && is64) + emit(ARM_ADC_R(dst, dst, src), ctx); + else + emit(ARM_ADD_R(dst, dst, src), ctx); +} - if (ctx->target == NULL) - return 0; - /* - * BPF allows only forward jumps and the offset of the target is - * still the one computed during the first pass. +static inline void emit_a32_sub_r(const u8 dst, const u8 src, + const bool is64, const bool hi, + struct jit_ctx *ctx) { + /* 64 bit : + * subs dst_lo, dst_lo, src_lo + * sbc dst_hi, dst_hi, src_hi + * 32 bit : + * sub dst_lo, dst_lo, src_lo */ - imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); + if (!hi && is64) + emit(ARM_SUBS_R(dst, dst, src), ctx); + else if (hi && is64) + emit(ARM_SBC_R(dst, dst, src), ctx); + else + emit(ARM_SUB_R(dst, dst, src), ctx); +} - return imm >> 2; +static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, + const bool hi, const u8 op, struct jit_ctx *ctx){ + switch (BPF_OP(op)) { + /* dst = dst + src */ + case BPF_ADD: + emit_a32_add_r(dst, src, is64, hi, ctx); + break; + /* dst = dst - src */ + case BPF_SUB: + emit_a32_sub_r(dst, src, is64, hi, ctx); + break; + /* dst = dst | src */ + case BPF_OR: + emit(ARM_ORR_R(dst, dst, src), ctx); + break; + /* dst = dst & src */ + case BPF_AND: + emit(ARM_AND_R(dst, dst, src), ctx); + break; + /* dst = dst ^ src */ + case BPF_XOR: + emit(ARM_EOR_R(dst, dst, src), ctx); + break; + /* dst = dst * src */ + case BPF_MUL: + emit(ARM_MUL(dst, dst, src), ctx); + break; + /* dst = dst << src */ + case BPF_LSH: + emit(ARM_LSL_R(dst, dst, src), ctx); + break; + /* dst = dst >> src */ + case BPF_RSH: + emit(ARM_LSR_R(dst, dst, src), ctx); + break; + /* dst = dst >> src (signed)*/ + case BPF_ARSH: + emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx); + break; + } } -#define OP_IMM3(op, r1, r2, imm_val, ctx) \ - do { \ - imm12 = imm8m(imm_val); \ - if (imm12 < 0) { \ - emit_mov_i_no8m(r_scratch, imm_val, ctx); \ - emit(op ## _R((r1), (r2), r_scratch), ctx); \ - } else { \ - emit(op ## _I((r1), (r2), imm12), ctx); \ - } \ - } while (0) - -static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) -{ - if (ctx->ret0_fp_idx >= 0) { - _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); - /* NOP to keep the size constant between passes */ - emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); +/* ALU operation (32 bit) + * dst = dst (op) src + */ +static inline void emit_a32_alu_r(const u8 dst, const u8 src, + bool dstk, bool sstk, + struct jit_ctx *ctx, const bool is64, + const bool hi, const u8 op) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rn = sstk ? tmp[1] : src; + + if (sstk) + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src)), ctx); + + /* ALU operation */ + if (dstk) { + emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); + emit_alu_r(tmp[0], rn, is64, hi, op, ctx); + emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(dst)), ctx); } else { - _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); - _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); + emit_alu_r(dst, rn, is64, hi, op, ctx); } } -static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) -{ -#if __LINUX_ARM_ARCH__ < 5 - emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); +/* ALU operation (64 bit) */ +static inline void emit_a32_alu_r64(const bool is64, const u8 dst[], + const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx, + const u8 op) { + emit_a32_alu_r(dst_lo, src_lo, dstk, sstk, ctx, is64, false, op); + if (is64) + emit_a32_alu_r(dst_hi, src_hi, dstk, sstk, ctx, is64, true, op); + else + emit_a32_mov_i(dst_hi, 0, dstk, ctx); +} - if (elf_hwcap & HWCAP_THUMB) - emit(ARM_BX(tgt_reg), ctx); +/* dst = imm (4 bytes)*/ +static inline void emit_a32_mov_r(const u8 dst, const u8 src, + bool dstk, bool sstk, + struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rt = sstk ? tmp[0] : src; + + if (sstk) + emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(src)), ctx); + if (dstk) + emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst)), ctx); else - emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); -#else - emit(ARM_BLX_R(tgt_reg), ctx); -#endif + emit(ARM_MOV_R(dst, rt), ctx); } -static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, - int bpf_op) -{ -#if __LINUX_ARM_ARCH__ == 7 - if (elf_hwcap & HWCAP_IDIVA) { - if (bpf_op == BPF_DIV) - emit(ARM_UDIV(rd, rm, rn), ctx); - else { - emit(ARM_UDIV(ARM_R3, rm, rn), ctx); - emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx); - } - return; +/* dst = src */ +static inline void emit_a32_mov_r64(const bool is64, const u8 dst[], + const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + emit_a32_mov_r(dst_lo, src_lo, dstk, sstk, ctx); + if (is64) { + /* complete 8 byte move */ + emit_a32_mov_r(dst_hi, src_hi, dstk, sstk, ctx); + } else { + /* Zero out high 4 bytes */ + emit_a32_mov_i(dst_hi, 0, dstk, ctx); } -#endif +} - /* - * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4 - * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into - * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm - * before using it as a source for ARM_R1. - * - * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is - * ARM_R5 (r_X) so there is no particular register overlap - * issues. - */ - if (rn != ARM_R1) - emit(ARM_MOV_R(ARM_R1, rn), ctx); - if (rm != ARM_R0) - emit(ARM_MOV_R(ARM_R0, rm), ctx); +/* Shift operations */ +static inline void emit_a32_alu_i(const u8 dst, const u32 val, bool dstk, + struct jit_ctx *ctx, const u8 op) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[0] : dst; + + if (dstk) + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + + /* Do shift operation */ + switch (op) { + case BPF_LSH: + emit(ARM_LSL_I(rd, rd, val), ctx); + break; + case BPF_RSH: + emit(ARM_LSR_I(rd, rd, val), ctx); + break; + case BPF_NEG: + emit(ARM_RSB_I(rd, rd, val), ctx); + break; + } + if (dstk) + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); +} + +/* dst = ~dst (64 bit) */ +static inline void emit_a32_neg64(const u8 dst[], bool dstk, + struct jit_ctx *ctx){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[1] : dst[1]; + u8 rm = dstk ? tmp[0] : dst[0]; + + /* Setup Operand */ + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do Negate Operation */ + emit(ARM_RSBS_I(rd, rd, 0), ctx); + emit(ARM_RSC_I(rm, rm, 0), ctx); + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +/* dst = dst << src */ +static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + + /* Setup Operands */ + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSH operation */ + emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); + emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); + /* As we are using ARM_LR */ ctx->seen |= SEEN_CALL; - emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod, - ctx); - emit_blx_r(ARM_R3, ctx); + emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_ASL, rt), ctx); + + if (dstk) { + emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit(ARM_MOV_R(rm, ARM_IP), ctx); + } +} - if (rd != ARM_R0) - emit(ARM_MOV_R(rd, ARM_R0), ctx); +/* dst = dst >> src (signed)*/ +static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup Operands */ + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do the ARSH operation */ + emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); + emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); + /* As we are using ARM_LR */ + ctx->seen |= SEEN_CALL; + emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); + _emit(ARM_COND_MI, ARM_B(0), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_ASR, rt), ctx); + if (dstk) { + emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit(ARM_MOV_R(rm, ARM_IP), ctx); + } +} + +/* dst = dst >> src */ +static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup Operands */ + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSH operation */ + emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); + emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); + /* As we are using ARM_LR */ + ctx->seen |= SEEN_CALL; + emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx); + emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx); + emit(ARM_MOV_SR(ARM_IP, rm, SRTYPE_LSR, rt), ctx); + if (dstk) { + emit(ARM_STR_I(ARM_LR, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_LR), ctx); + emit(ARM_MOV_R(rm, ARM_IP), ctx); + } } -static inline void update_on_xread(struct jit_ctx *ctx) +/* dst = dst << val */ +static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, + const u32 val, struct jit_ctx *ctx){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSH operation */ + if (val < 32) { + emit(ARM_MOV_SI(tmp2[0], rm, SRTYPE_ASL, val), ctx); + emit(ARM_ORR_SI(rm, tmp2[0], rd, SRTYPE_LSR, 32 - val), ctx); + emit(ARM_MOV_SI(rd, rd, SRTYPE_ASL, val), ctx); + } else { + if (val == 32) + emit(ARM_MOV_R(rm, rd), ctx); + else + emit(ARM_MOV_SI(rm, rd, SRTYPE_ASL, val - 32), ctx); + emit(ARM_EOR_R(rd, rd, rd), ctx); + } + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +/* dst = dst >> val */ +static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk, + const u32 val, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do LSR operation */ + if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_LSR, val), ctx); + } else if (val == 32) { + emit(ARM_MOV_R(rd, rm), ctx); + emit(ARM_MOV_I(rm, 0), ctx); + } else { + emit(ARM_MOV_SI(rd, rm, SRTYPE_LSR, val - 32), ctx); + emit(ARM_MOV_I(rm, 0), ctx); + } + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +/* dst = dst >> val (signed) */ +static inline void emit_a32_arsh_i64(const u8 dst[], bool dstk, + const u32 val, struct jit_ctx *ctx){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Do ARSH operation */ + if (val < 32) { + emit(ARM_MOV_SI(tmp2[1], rd, SRTYPE_LSR, val), ctx); + emit(ARM_ORR_SI(rd, tmp2[1], rm, SRTYPE_ASL, 32 - val), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, val), ctx); + } else if (val == 32) { + emit(ARM_MOV_R(rd, rm), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + } else { + emit(ARM_MOV_SI(rd, rm, SRTYPE_ASR, val - 32), ctx); + emit(ARM_MOV_SI(rm, rm, SRTYPE_ASR, 31), ctx); + } + + if (dstk) { + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } +} + +static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + /* Setup operands for multiplication */ + u8 rd = dstk ? tmp[1] : dst_lo; + u8 rm = dstk ? tmp[0] : dst_hi; + u8 rt = sstk ? tmp2[1] : src_lo; + u8 rn = sstk ? tmp2[0] : src_hi; + + if (dstk) { + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + if (sstk) { + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), ctx); + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_hi)), ctx); + } + + /* Do Multiplication */ + emit(ARM_MUL(ARM_IP, rd, rn), ctx); + emit(ARM_MUL(ARM_LR, rm, rt), ctx); + /* As we are using ARM_LR */ + ctx->seen |= SEEN_CALL; + emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); + + emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx); + emit(ARM_ADD_R(rm, ARM_LR, rm), ctx); + if (dstk) { + emit(ARM_STR_I(ARM_IP, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } else { + emit(ARM_MOV_R(rd, ARM_IP), ctx); + } +} + +/* *(size *)(dst + off) = src */ +static inline void emit_str_r(const u8 dst, const u8 src, bool dstk, + const s32 off, struct jit_ctx *ctx, const u8 sz){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[1] : dst; + + if (dstk) + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); + if (off) { + emit_a32_mov_i(tmp[0], off, false, ctx); + emit(ARM_ADD_R(tmp[0], rd, tmp[0]), ctx); + rd = tmp[0]; + } + switch (sz) { + case BPF_W: + /* Store a Word */ + emit(ARM_STR_I(src, rd, 0), ctx); + break; + case BPF_H: + /* Store a HalfWord */ + emit(ARM_STRH_I(src, rd, 0), ctx); + break; + case BPF_B: + /* Store a Byte */ + emit(ARM_STRB_I(src, rd, 0), ctx); + break; + } +} + +/* dst = *(size*)(src + off) */ +static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk, + const s32 off, struct jit_ctx *ctx, const u8 sz){ + const u8 *tmp = bpf2a32[TMP_REG_1]; + u8 rd = dstk ? tmp[1] : dst; + u8 rm = src; + + if (off) { + emit_a32_mov_i(tmp[0], off, false, ctx); + emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); + rm = tmp[0]; + } + switch (sz) { + case BPF_W: + /* Load a Word */ + emit(ARM_LDR_I(rd, rm, 0), ctx); + break; + case BPF_H: + /* Load a HalfWord */ + emit(ARM_LDRH_I(rd, rm, 0), ctx); + break; + case BPF_B: + /* Load a Byte */ + emit(ARM_LDRB_I(rd, rm, 0), ctx); + break; + } + if (dstk) + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx); +} + +/* Arithmatic Operation */ +static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, + const u8 rn, struct jit_ctx *ctx, u8 op) { + switch (op) { + case BPF_JSET: + ctx->seen |= SEEN_CALL; + emit(ARM_AND_R(ARM_IP, rt, rn), ctx); + emit(ARM_AND_R(ARM_LR, rd, rm), ctx); + emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); + break; + case BPF_JEQ: + case BPF_JNE: + case BPF_JGT: + case BPF_JGE: + case BPF_JLE: + case BPF_JLT: + emit(ARM_CMP_R(rd, rm), ctx); + _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); + break; + case BPF_JSLE: + case BPF_JSGT: + emit(ARM_CMP_R(rn, rt), ctx); + emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); + break; + case BPF_JSLT: + case BPF_JSGE: + emit(ARM_CMP_R(rt, rn), ctx); + emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); + break; + } +} + +static int out_offset = -1; /* initialized on the first pass of build_body() */ +static int emit_bpf_tail_call(struct jit_ctx *ctx) { - if (!(ctx->seen & SEEN_X)) - ctx->flags |= FLAG_NEED_X_RESET; - ctx->seen |= SEEN_X; + /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ + const u8 *r2 = bpf2a32[BPF_REG_2]; + const u8 *r3 = bpf2a32[BPF_REG_3]; + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const u8 *tcc = bpf2a32[TCALL_CNT]; + const int idx0 = ctx->idx; +#define cur_offset (ctx->idx - idx0) +#define jmp_offset (out_offset - (cur_offset)) + u32 off, lo, hi; + + /* if (index >= array->map.max_entries) + * goto out; + */ + off = offsetof(struct bpf_array, map.max_entries); + /* array->map.max_entries */ + emit_a32_mov_i(tmp[1], off, false, ctx); + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); + emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx); + /* index (64 bit) */ + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); + /* index >= array->map.max_entries */ + emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx); + _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); + + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + * tail_call_cnt++; + */ + lo = (u32)MAX_TAIL_CALL_CNT; + hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); + emit(ARM_LDR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); + emit(ARM_LDR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); + emit(ARM_CMP_I(tmp[0], hi), ctx); + _emit(ARM_COND_EQ, ARM_CMP_I(tmp[1], lo), ctx); + _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); + emit(ARM_ADDS_I(tmp[1], tmp[1], 1), ctx); + emit(ARM_ADC_I(tmp[0], tmp[0], 0), ctx); + emit(ARM_STR_I(tmp[1], ARM_SP, STACK_VAR(tcc[1])), ctx); + emit(ARM_STR_I(tmp[0], ARM_SP, STACK_VAR(tcc[0])), ctx); + + /* prog = array->ptrs[index] + * if (prog == NULL) + * goto out; + */ + off = offsetof(struct bpf_array, ptrs); + emit_a32_mov_i(tmp[1], off, false, ctx); + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx); + emit(ARM_ADD_R(tmp[1], tmp2[1], tmp[1]), ctx); + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx); + emit(ARM_MOV_SI(tmp[0], tmp2[1], SRTYPE_ASL, 2), ctx); + emit(ARM_LDR_R(tmp[1], tmp[1], tmp[0]), ctx); + emit(ARM_CMP_I(tmp[1], 0), ctx); + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); + + /* goto *(prog->bpf_func + prologue_size); */ + off = offsetof(struct bpf_prog, bpf_func); + emit_a32_mov_i(tmp2[1], off, false, ctx); + emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx); + emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); + emit(ARM_BX(tmp[1]), ctx); + + /* out: */ + if (out_offset == -1) + out_offset = cur_offset; + if (cur_offset != out_offset) { + pr_err_once("tail_call out_offset = %d, expected %d!\n", + cur_offset, out_offset); + return -1; + } + return 0; +#undef cur_offset +#undef jmp_offset } -static int build_body(struct jit_ctx *ctx) +/* 0xabcd => 0xcdab */ +static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) { - void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; - const struct bpf_prog *prog = ctx->skf; - const struct sock_filter *inst; - unsigned i, load_order, off, condt; - int imm12; - u32 k; +#if __LINUX_ARM_ARCH__ < 6 + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + + emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx); + emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); + emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx); +#else /* ARMv6+ */ + emit(ARM_REV16(rd, rn), ctx); +#endif +} - for (i = 0; i < prog->len; i++) { - u16 code; +/* 0xabcdefgh => 0xghefcdab */ +static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) +{ +#if __LINUX_ARM_ARCH__ < 6 + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + + emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx); + emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx); + + emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx); + emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx); + emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); + emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx); + emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx); + emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx); + +#else /* ARMv6+ */ + emit(ARM_REV(rd, rn), ctx); +#endif +} - inst = &(prog->insns[i]); - /* K as an immediate value operand */ - k = inst->k; - code = bpf_anc_helper(inst); +// push the scratch stack register on top of the stack +static inline void emit_push_r64(const u8 src[], const u8 shift, + struct jit_ctx *ctx) +{ + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + u16 reg_set = 0; - /* compute offsets only in the fake pass */ - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; + emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(src[1]+shift)), ctx); + emit(ARM_LDR_I(tmp2[0], ARM_SP, STACK_VAR(src[0]+shift)), ctx); + + reg_set = (1 << tmp2[1]) | (1 << tmp2[0]); + emit(ARM_PUSH(reg_set), ctx); +} + +static void build_prologue(struct jit_ctx *ctx) +{ + const u8 r0 = bpf2a32[BPF_REG_0][1]; + const u8 r2 = bpf2a32[BPF_REG_1][1]; + const u8 r3 = bpf2a32[BPF_REG_1][0]; + const u8 r4 = bpf2a32[BPF_REG_6][1]; + const u8 r5 = bpf2a32[BPF_REG_6][0]; + const u8 r6 = bpf2a32[TMP_REG_1][1]; + const u8 r7 = bpf2a32[TMP_REG_1][0]; + const u8 r8 = bpf2a32[TMP_REG_2][1]; + const u8 r10 = bpf2a32[TMP_REG_2][0]; + const u8 fplo = bpf2a32[BPF_REG_FP][1]; + const u8 fphi = bpf2a32[BPF_REG_FP][0]; + const u8 sp = ARM_SP; + const u8 *tcc = bpf2a32[TCALL_CNT]; + + u16 reg_set = 0; + + /* + * eBPF prog stack layout + * + * high + * original ARM_SP => +-----+ eBPF prologue + * |FP/LR| + * current ARM_FP => +-----+ + * | ... | callee saved registers + * eBPF fp register => +-----+ <= (BPF_FP) + * | ... | eBPF JIT scratch space + * | | eBPF prog stack + * +-----+ + * |RSVD | JIT scratchpad + * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE) + * | | + * | ... | Function call stack + * | | + * +-----+ + * low + */ + + /* Save callee saved registers. */ + reg_set |= (1<seen & SEEN_CALL) + reg_set |= (1<stack_size = imm8m(STACK_SIZE); + + /* Set up function call stack */ + emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); - switch (code) { - case BPF_LD | BPF_IMM: - emit_mov_i(r_A, k, ctx); + /* Set up BPF prog stack base register */ + emit_a32_mov_r(fplo, ARM_IP, true, false, ctx); + emit_a32_mov_i(fphi, 0, true, ctx); + + /* mov r4, 0 */ + emit(ARM_MOV_I(r4, 0), ctx); + + /* Move BPF_CTX to BPF_R1 */ + emit(ARM_MOV_R(r3, r4), ctx); + emit(ARM_MOV_R(r2, r0), ctx); + /* Initialize Tail Count */ + emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[0])), ctx); + emit(ARM_STR_I(r4, ARM_SP, STACK_VAR(tcc[1])), ctx); + /* end of prologue */ +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + const u8 r4 = bpf2a32[BPF_REG_6][1]; + const u8 r5 = bpf2a32[BPF_REG_6][0]; + const u8 r6 = bpf2a32[TMP_REG_1][1]; + const u8 r7 = bpf2a32[TMP_REG_1][0]; + const u8 r8 = bpf2a32[TMP_REG_2][1]; + const u8 r10 = bpf2a32[TMP_REG_2][0]; + u16 reg_set = 0; + + /* unwind function call stack */ + emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); + + /* restore callee saved registers. */ + reg_set |= (1<seen & SEEN_CALL) + reg_set |= (1<seen & SEEN_CALL)) + emit(ARM_BX(ARM_LR), ctx); +#endif +} + +/* + * Convert an eBPF instruction to native instruction, i.e + * JITs an eBPF instruction. + * Returns : + * 0 - Successfully JITed an 8-byte eBPF instruction + * >0 - Successfully JITed a 16-byte eBPF instruction + * <0 - Failed to JIT. + */ +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + const u8 *dst = bpf2a32[insn->dst_reg]; + const u8 *src = bpf2a32[insn->src_reg]; + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; + const s16 off = insn->off; + const s32 imm = insn->imm; + const int i = insn - ctx->prog->insnsi; + const bool is64 = BPF_CLASS(code) == BPF_ALU64; + const bool dstk = is_on_stack(insn->dst_reg); + const bool sstk = is_on_stack(insn->src_reg); + u8 rd, rt, rm, rn; + s32 jmp_offset; + +#define check_imm(bits, imm) do { \ + if ((((imm) > 0) && ((imm) >> (bits))) || \ + (((imm) < 0) && (~(imm) >> (bits)))) { \ + pr_info("[%2d] imm=%d(0x%x) out of range\n", \ + i, imm, imm); \ + return -EINVAL; \ + } \ +} while (0) +#define check_imm24(imm) check_imm(24, imm) + + switch (code) { + /* ALU operations */ + + /* dst = src */ + case BPF_ALU | BPF_MOV | BPF_K: + case BPF_ALU | BPF_MOV | BPF_X: + case BPF_ALU64 | BPF_MOV | BPF_K: + case BPF_ALU64 | BPF_MOV | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_a32_mov_r64(is64, dst, src, dstk, sstk, ctx); break; - case BPF_LD | BPF_W | BPF_LEN: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - emit(ARM_LDR_I(r_A, r_skb, - offsetof(struct sk_buff, len)), ctx); + case BPF_K: + /* Sign-extend immediate value to destination reg */ + emit_a32_mov_i64(is64, dst, imm, dstk, ctx); break; - case BPF_LD | BPF_MEM: - /* A = scratch[k] */ - ctx->seen |= SEEN_MEM_WORD(k); - emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); + } + break; + /* dst = dst + src/imm */ + /* dst = dst - src/imm */ + /* dst = dst | src/imm */ + /* dst = dst & src/imm */ + /* dst = dst ^ src/imm */ + /* dst = dst * src/imm */ + /* dst = dst << src */ + /* dst = dst >> src */ + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_X: + switch (BPF_SRC(code)) { + case BPF_X: + emit_a32_alu_r64(is64, dst, src, dstk, sstk, + ctx, BPF_OP(code)); break; - case BPF_LD | BPF_W | BPF_ABS: - load_order = 2; - goto load; - case BPF_LD | BPF_H | BPF_ABS: - load_order = 1; - goto load; - case BPF_LD | BPF_B | BPF_ABS: - load_order = 0; -load: - emit_mov_i(r_off, k, ctx); -load_common: - ctx->seen |= SEEN_DATA | SEEN_CALL; - - if (load_order > 0) { - emit(ARM_SUB_I(r_scratch, r_skb_hl, - 1 << load_order), ctx); - emit(ARM_CMP_R(r_scratch, r_off), ctx); - condt = ARM_COND_GE; - } else { - emit(ARM_CMP_R(r_skb_hl, r_off), ctx); - condt = ARM_COND_HI; - } - - /* - * test for negative offset, only if we are - * currently scheduled to take the fast - * path. this will update the flags so that - * the slowpath instruction are ignored if the - * offset is negative. - * - * for loard_order == 0 the HI condition will - * make loads at offset 0 take the slow path too. + case BPF_K: + /* Move immediate value to the temporary register + * and then do the ALU operation on the temporary + * register as this will sign-extend the immediate + * value into temporary reg and then it would be + * safe to do the operation on it. */ - _emit(condt, ARM_CMP_I(r_off, 0), ctx); - - _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), - ctx); - - if (load_order == 0) - _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), - ctx); - else if (load_order == 1) - emit_load_be16(condt, r_A, r_scratch, ctx); - else if (load_order == 2) - emit_load_be32(condt, r_A, r_scratch, ctx); - - _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); - - /* the slowpath */ - emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); - emit(ARM_MOV_R(ARM_R0, r_skb), ctx); - /* the offset is already in R1 */ - emit_blx_r(ARM_R3, ctx); - /* check the result of skb_copy_bits */ - emit(ARM_CMP_I(ARM_R1, 0), ctx); - emit_err_ret(ARM_COND_NE, ctx); - emit(ARM_MOV_R(r_A, ARM_R0), ctx); + emit_a32_mov_i64(is64, tmp2, imm, false, ctx); + emit_a32_alu_r64(is64, dst, tmp2, dstk, false, + ctx, BPF_OP(code)); break; - case BPF_LD | BPF_W | BPF_IND: - load_order = 2; - goto load_ind; - case BPF_LD | BPF_H | BPF_IND: - load_order = 1; - goto load_ind; - case BPF_LD | BPF_B | BPF_IND: - load_order = 0; -load_ind: - update_on_xread(ctx); - OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); - goto load_common; - case BPF_LDX | BPF_IMM: - ctx->seen |= SEEN_X; - emit_mov_i(r_X, k, ctx); + } + break; + /* dst = dst / src(imm) */ + /* dst = dst % src(imm) */ + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU | BPF_MOD | BPF_X: + rt = src_lo; + rd = dstk ? tmp2[1] : dst_lo; + if (dstk) + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + switch (BPF_SRC(code)) { + case BPF_X: + rt = sstk ? tmp2[0] : rt; + if (sstk) + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(src_lo)), + ctx); break; - case BPF_LDX | BPF_W | BPF_LEN: - ctx->seen |= SEEN_X | SEEN_SKB; - emit(ARM_LDR_I(r_X, r_skb, - offsetof(struct sk_buff, len)), ctx); + case BPF_K: + rt = tmp2[0]; + emit_a32_mov_i(rt, imm, false, ctx); break; - case BPF_LDX | BPF_MEM: - ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); - emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); + } + emit_udivmod(rd, rd, rt, ctx, BPF_OP(code)); + if (dstk) + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_X: + goto notyet; + /* dst = dst >> imm */ + /* dst = dst << imm */ + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_LSH | BPF_K: + if (unlikely(imm > 31)) + return -EINVAL; + if (imm) + emit_a32_alu_i(dst_lo, imm, dstk, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + break; + /* dst = dst << imm */ + case BPF_ALU64 | BPF_LSH | BPF_K: + if (unlikely(imm > 63)) + return -EINVAL; + emit_a32_lsh_i64(dst, dstk, imm, ctx); + break; + /* dst = dst >> imm */ + case BPF_ALU64 | BPF_RSH | BPF_K: + if (unlikely(imm > 63)) + return -EINVAL; + emit_a32_lsr_i64(dst, dstk, imm, ctx); + break; + /* dst = dst << src */ + case BPF_ALU64 | BPF_LSH | BPF_X: + emit_a32_lsh_r64(dst, src, dstk, sstk, ctx); + break; + /* dst = dst >> src */ + case BPF_ALU64 | BPF_RSH | BPF_X: + emit_a32_lsr_r64(dst, src, dstk, sstk, ctx); + break; + /* dst = dst >> src (signed) */ + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit_a32_arsh_r64(dst, src, dstk, sstk, ctx); + break; + /* dst = dst >> imm (signed) */ + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (unlikely(imm > 63)) + return -EINVAL; + emit_a32_arsh_i64(dst, dstk, imm, ctx); + break; + /* dst = ~dst */ + case BPF_ALU | BPF_NEG: + emit_a32_alu_i(dst_lo, 0, dstk, ctx, BPF_OP(code)); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); + break; + /* dst = ~dst (64 bit) */ + case BPF_ALU64 | BPF_NEG: + emit_a32_neg64(dst, dstk, ctx); + break; + /* dst = dst * src/imm */ + case BPF_ALU64 | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + switch (BPF_SRC(code)) { + case BPF_X: + emit_a32_mul_r64(dst, src, dstk, sstk, ctx); break; - case BPF_LDX | BPF_B | BPF_MSH: - /* x = ((*(frame + k)) & 0xf) << 2; */ - ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; - /* the interpreter should deal with the negative K */ - if ((int)k < 0) - return -1; - /* offset in r1: we might have to take the slow path */ - emit_mov_i(r_off, k, ctx); - emit(ARM_CMP_R(r_skb_hl, r_off), ctx); - - /* load in r0: common with the slowpath */ - _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, - ARM_R1), ctx); - /* - * emit_mov_i() might generate one or two instructions, - * the same holds for emit_blx_r() + case BPF_K: + /* Move immediate value to the temporary register + * and then do the multiplication on it as this + * will sign-extend the immediate value into temp + * reg then it would be safe to do the operation + * on it. */ - _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); - - emit(ARM_MOV_R(ARM_R0, r_skb), ctx); - /* r_off is r1 */ - emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); - emit_blx_r(ARM_R3, ctx); - /* check the return value of skb_copy_bits */ - emit(ARM_CMP_I(ARM_R1, 0), ctx); - emit_err_ret(ARM_COND_NE, ctx); - - emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); - emit(ARM_LSL_I(r_X, r_X, 2), ctx); - break; - case BPF_ST: - ctx->seen |= SEEN_MEM_WORD(k); - emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); - break; - case BPF_STX: - update_on_xread(ctx); - ctx->seen |= SEEN_MEM_WORD(k); - emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); - break; - case BPF_ALU | BPF_ADD | BPF_K: - /* A += K */ - OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_ADD | BPF_X: - update_on_xread(ctx); - emit(ARM_ADD_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_SUB | BPF_K: - /* A -= K */ - OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); - break; - case BPF_ALU | BPF_SUB | BPF_X: - update_on_xread(ctx); - emit(ARM_SUB_R(r_A, r_A, r_X), ctx); - break; - case BPF_ALU | BPF_MUL | BPF_K: - /* A *= K */ - emit_mov_i(r_scratch, k, ctx); - emit(ARM_MUL(r_A, r_A, r_scratch), ctx); - break; - case BPF_ALU | BPF_MUL | BPF_X: - update_on_xread(ctx); - emit(ARM_MUL(r_A, r_A, r_X), ctx); + emit_a32_mov_i64(is64, tmp2, imm, false, ctx); + emit_a32_mul_r64(dst, tmp2, dstk, false, ctx); break; - case BPF_ALU | BPF_DIV | BPF_K: - if (k == 1) - break; - emit_mov_i(r_scratch, k, ctx); - emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV); - break; - case BPF_ALU | BPF_DIV | BPF_X: - update_on_xread(ctx); - emit(ARM_CMP_I(r_X, 0), ctx); - emit_err_ret(ARM_COND_EQ, ctx); - emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV); + } + break; + /* dst = htole(dst) */ + /* dst = htobe(dst) */ + case BPF_ALU | BPF_END | BPF_FROM_LE: + case BPF_ALU | BPF_END | BPF_FROM_BE: + rd = dstk ? tmp[0] : dst_hi; + rt = dstk ? tmp[1] : dst_lo; + if (dstk) { + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + if (BPF_SRC(code) == BPF_FROM_LE) + goto emit_bswap_uxt; + switch (imm) { + case 16: + emit_rev16(rt, rt, ctx); + goto emit_bswap_uxt; + case 32: + emit_rev32(rt, rt, ctx); + goto emit_bswap_uxt; + case 64: + /* Because of the usage of ARM_LR */ + ctx->seen |= SEEN_CALL; + emit_rev32(ARM_LR, rt, ctx); + emit_rev32(rt, rd, ctx); + emit(ARM_MOV_R(rd, ARM_LR), ctx); break; - case BPF_ALU | BPF_MOD | BPF_K: - if (k == 1) { - emit_mov_i(r_A, 0, ctx); - break; - } - emit_mov_i(r_scratch, k, ctx); - emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD); + } + goto exit; +emit_bswap_uxt: + switch (imm) { + case 16: + /* zero-extend 16 bits into 64 bits */ +#if __LINUX_ARM_ARCH__ < 6 + emit_a32_mov_i(tmp2[1], 0xffff, false, ctx); + emit(ARM_AND_R(rt, rt, tmp2[1]), ctx); +#else /* ARMv6+ */ + emit(ARM_UXTH(rt, rt), ctx); +#endif + emit(ARM_EOR_R(rd, rd, rd), ctx); break; - case BPF_ALU | BPF_MOD | BPF_X: - update_on_xread(ctx); - emit(ARM_CMP_I(r_X, 0), ctx); - emit_err_ret(ARM_COND_EQ, ctx); - emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD); + case 32: + /* zero-extend 32 bits into 64 bits */ + emit(ARM_EOR_R(rd, rd, rd), ctx); break; - case BPF_ALU | BPF_OR | BPF_K: - /* A |= K */ - OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); + case 64: + /* nop */ break; - case BPF_ALU | BPF_OR | BPF_X: - update_on_xread(ctx); - emit(ARM_ORR_R(r_A, r_A, r_X), ctx); + } +exit: + if (dstk) { + emit(ARM_STR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + break; + /* dst = imm64 */ + case BPF_LD | BPF_IMM | BPF_DW: + { + const struct bpf_insn insn1 = insn[1]; + u32 hi, lo = imm; + + hi = insn1.imm; + emit_a32_mov_i(dst_lo, lo, dstk, ctx); + emit_a32_mov_i(dst_hi, hi, dstk, ctx); + + return 1; + } + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + rn = sstk ? tmp2[1] : src_lo; + if (sstk) + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + /* Load a Word */ + case BPF_H: + /* Load a Half-Word */ + case BPF_B: + /* Load a Byte */ + emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code)); + emit_a32_mov_i(dst_hi, 0, dstk, ctx); break; - case BPF_ALU | BPF_XOR | BPF_K: - /* A ^= K; */ - OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); + case BPF_DW: + /* Load a double word */ + emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W); + emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W); break; - case BPF_ANC | SKF_AD_ALU_XOR_X: - case BPF_ALU | BPF_XOR | BPF_X: - /* A ^= X */ - update_on_xread(ctx); - emit(ARM_EOR_R(r_A, r_A, r_X), ctx); + } + break; + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ + case BPF_LD | BPF_ABS | BPF_W: + case BPF_LD | BPF_ABS | BPF_H: + case BPF_LD | BPF_ABS | BPF_B: + /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ + case BPF_LD | BPF_IND | BPF_W: + case BPF_LD | BPF_IND | BPF_H: + case BPF_LD | BPF_IND | BPF_B: + { + const u8 r4 = bpf2a32[BPF_REG_6][1]; /* r4 = ptr to sk_buff */ + const u8 r0 = bpf2a32[BPF_REG_0][1]; /*r0: struct sk_buff *skb*/ + /* rtn value */ + const u8 r1 = bpf2a32[BPF_REG_0][0]; /* r1: int k */ + const u8 r2 = bpf2a32[BPF_REG_1][1]; /* r2: unsigned int size */ + const u8 r3 = bpf2a32[BPF_REG_1][0]; /* r3: void *buffer */ + const u8 r6 = bpf2a32[TMP_REG_1][1]; /* r6: void *(*func)(..) */ + int size; + + /* Setting up first argument */ + emit(ARM_MOV_R(r0, r4), ctx); + + /* Setting up second argument */ + emit_a32_mov_i(r1, imm, false, ctx); + if (BPF_MODE(code) == BPF_IND) + emit_a32_alu_r(r1, src_lo, false, sstk, ctx, + false, false, BPF_ADD); + + /* Setting up third argument */ + switch (BPF_SIZE(code)) { + case BPF_W: + size = 4; break; - case BPF_ALU | BPF_AND | BPF_K: - /* A &= K */ - OP_IMM3(ARM_AND, r_A, r_A, k, ctx); + case BPF_H: + size = 2; break; - case BPF_ALU | BPF_AND | BPF_X: - update_on_xread(ctx); - emit(ARM_AND_R(r_A, r_A, r_X), ctx); + case BPF_B: + size = 1; break; - case BPF_ALU | BPF_LSH | BPF_K: - if (unlikely(k > 31)) - return -1; - emit(ARM_LSL_I(r_A, r_A, k), ctx); + default: + return -EINVAL; + } + emit_a32_mov_i(r2, size, false, ctx); + + /* Setting up fourth argument */ + emit(ARM_ADD_I(r3, ARM_SP, imm8m(SKB_BUFFER)), ctx); + + /* Setting up function pointer to call */ + emit_a32_mov_i(r6, (unsigned int)bpf_load_pointer, false, ctx); + emit_blx_r(r6, ctx); + + emit(ARM_EOR_R(r1, r1, r1), ctx); + /* Check if return address is NULL or not. + * if NULL then jump to epilogue + * else continue to load the value from retn address + */ + emit(ARM_CMP_I(r0, 0), ctx); + jmp_offset = epilogue_offset(ctx); + check_imm24(jmp_offset); + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); + + /* Load value from the address */ + switch (BPF_SIZE(code)) { + case BPF_W: + emit(ARM_LDR_I(r0, r0, 0), ctx); + emit_rev32(r0, r0, ctx); break; - case BPF_ALU | BPF_LSH | BPF_X: - update_on_xread(ctx); - emit(ARM_LSL_R(r_A, r_A, r_X), ctx); + case BPF_H: + emit(ARM_LDRH_I(r0, r0, 0), ctx); + emit_rev16(r0, r0, ctx); break; - case BPF_ALU | BPF_RSH | BPF_K: - if (unlikely(k > 31)) - return -1; - if (k) - emit(ARM_LSR_I(r_A, r_A, k), ctx); + case BPF_B: + emit(ARM_LDRB_I(r0, r0, 0), ctx); + /* No need to reverse */ break; - case BPF_ALU | BPF_RSH | BPF_X: - update_on_xread(ctx); - emit(ARM_LSR_R(r_A, r_A, r_X), ctx); + } + break; + } + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + switch (BPF_SIZE(code)) { + case BPF_DW: + /* Sign-extend immediate value into temp reg */ + emit_a32_mov_i64(true, tmp2, imm, false, ctx); + emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, BPF_W); + emit_str_r(dst_lo, tmp2[0], dstk, off+4, ctx, BPF_W); break; - case BPF_ALU | BPF_NEG: - /* A = -A */ - emit(ARM_RSB_I(r_A, r_A, 0), ctx); + case BPF_W: + case BPF_H: + case BPF_B: + emit_a32_mov_i(tmp2[1], imm, false, ctx); + emit_str_r(dst_lo, tmp2[1], dstk, off, ctx, + BPF_SIZE(code)); break; - case BPF_JMP | BPF_JA: - /* pc += K */ - emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); + } + break; + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + goto notyet; + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + case BPF_STX | BPF_MEM | BPF_H: + case BPF_STX | BPF_MEM | BPF_B: + case BPF_STX | BPF_MEM | BPF_DW: + { + u8 sz = BPF_SIZE(code); + + rn = sstk ? tmp2[1] : src_lo; + rm = sstk ? tmp2[0] : src_hi; + if (sstk) { + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); + } + + /* Store the value */ + if (BPF_SIZE(code) == BPF_DW) { + emit_str_r(dst_lo, rn, dstk, off, ctx, BPF_W); + emit_str_r(dst_lo, rm, dstk, off+4, ctx, BPF_W); + } else { + emit_str_r(dst_lo, rn, dstk, off, ctx, sz); + } + break; + } + /* PC += off if dst == src */ + /* PC += off if dst > src */ + /* PC += off if dst >= src */ + /* PC += off if dst < src */ + /* PC += off if dst <= src */ + /* PC += off if dst != src */ + /* PC += off if dst > src (signed) */ + /* PC += off if dst >= src (signed) */ + /* PC += off if dst < src (signed) */ + /* PC += off if dst <= src (signed) */ + /* PC += off if dst & src */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + /* Setup source registers */ + rm = sstk ? tmp2[0] : src_hi; + rn = sstk ? tmp2[1] : src_lo; + if (sstk) { + emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx); + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(src_hi)), ctx); + } + goto go_jmp; + /* PC += off if dst == imm */ + /* PC += off if dst > imm */ + /* PC += off if dst >= imm */ + /* PC += off if dst < imm */ + /* PC += off if dst <= imm */ + /* PC += off if dst != imm */ + /* PC += off if dst > imm (signed) */ + /* PC += off if dst >= imm (signed) */ + /* PC += off if dst < imm (signed) */ + /* PC += off if dst <= imm (signed) */ + /* PC += off if dst & imm */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + if (off == 0) break; - case BPF_JMP | BPF_JEQ | BPF_K: - /* pc += (A == K) ? pc->jt : pc->jf */ - condt = ARM_COND_EQ; - goto cmp_imm; - case BPF_JMP | BPF_JGT | BPF_K: - /* pc += (A > K) ? pc->jt : pc->jf */ - condt = ARM_COND_HI; - goto cmp_imm; - case BPF_JMP | BPF_JGE | BPF_K: - /* pc += (A >= K) ? pc->jt : pc->jf */ - condt = ARM_COND_HS; -cmp_imm: - imm12 = imm8m(k); - if (imm12 < 0) { - emit_mov_i_no8m(r_scratch, k, ctx); - emit(ARM_CMP_R(r_A, r_scratch), ctx); - } else { - emit(ARM_CMP_I(r_A, imm12), ctx); - } -cond_jump: - if (inst->jt) - _emit(condt, ARM_B(b_imm(i + inst->jt + 1, - ctx)), ctx); - if (inst->jf) - _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, - ctx)), ctx); + rm = tmp2[0]; + rn = tmp2[1]; + /* Sign-extend immediate value */ + emit_a32_mov_i64(true, tmp2, imm, false, ctx); +go_jmp: + /* Setup destination register */ + rd = dstk ? tmp[0] : dst_hi; + rt = dstk ? tmp[1] : dst_lo; + if (dstk) { + emit(ARM_LDR_I(rt, ARM_SP, STACK_VAR(dst_lo)), ctx); + emit(ARM_LDR_I(rd, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + + /* Check for the condition */ + emit_ar_r(rd, rt, rm, rn, ctx, BPF_OP(code)); + + /* Setup JUMP instruction */ + jmp_offset = bpf2a32_offset(i+off, i, ctx); + switch (BPF_OP(code)) { + case BPF_JNE: + case BPF_JSET: + _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx); break; - case BPF_JMP | BPF_JEQ | BPF_X: - /* pc += (A == X) ? pc->jt : pc->jf */ - condt = ARM_COND_EQ; - goto cmp_x; - case BPF_JMP | BPF_JGT | BPF_X: - /* pc += (A > X) ? pc->jt : pc->jf */ - condt = ARM_COND_HI; - goto cmp_x; - case BPF_JMP | BPF_JGE | BPF_X: - /* pc += (A >= X) ? pc->jt : pc->jf */ - condt = ARM_COND_CS; -cmp_x: - update_on_xread(ctx); - emit(ARM_CMP_R(r_A, r_X), ctx); - goto cond_jump; - case BPF_JMP | BPF_JSET | BPF_K: - /* pc += (A & K) ? pc->jt : pc->jf */ - condt = ARM_COND_NE; - /* not set iff all zeroes iff Z==1 iff EQ */ - - imm12 = imm8m(k); - if (imm12 < 0) { - emit_mov_i_no8m(r_scratch, k, ctx); - emit(ARM_TST_R(r_A, r_scratch), ctx); - } else { - emit(ARM_TST_I(r_A, imm12), ctx); - } - goto cond_jump; - case BPF_JMP | BPF_JSET | BPF_X: - /* pc += (A & X) ? pc->jt : pc->jf */ - update_on_xread(ctx); - condt = ARM_COND_NE; - emit(ARM_TST_R(r_A, r_X), ctx); - goto cond_jump; - case BPF_RET | BPF_A: - emit(ARM_MOV_R(ARM_R0, r_A), ctx); - goto b_epilogue; - case BPF_RET | BPF_K: - if ((k == 0) && (ctx->ret0_fp_idx < 0)) - ctx->ret0_fp_idx = i; - emit_mov_i(ARM_R0, k, ctx); -b_epilogue: - if (i != ctx->skf->len - 1) - emit(ARM_B(b_imm(prog->len, ctx)), ctx); + case BPF_JEQ: + _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); break; - case BPF_MISC | BPF_TAX: - /* X = A */ - ctx->seen |= SEEN_X; - emit(ARM_MOV_R(r_X, r_A), ctx); + case BPF_JGT: + _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); break; - case BPF_MISC | BPF_TXA: - /* A = X */ - update_on_xread(ctx); - emit(ARM_MOV_R(r_A, r_X), ctx); + case BPF_JGE: + _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); break; - case BPF_ANC | SKF_AD_PROTOCOL: - /* A = ntohs(skb->protocol) */ - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - protocol) != 2); - off = offsetof(struct sk_buff, protocol); - emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); - emit_swap16(r_A, r_scratch, ctx); + case BPF_JSGT: + _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); break; - case BPF_ANC | SKF_AD_CPU: - /* r_scratch = current_thread_info() */ - OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); - /* A = current_thread_info()->cpu */ - BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); - off = offsetof(struct thread_info, cpu); - emit(ARM_LDR_I(r_A, r_scratch, off), ctx); + case BPF_JSGE: + _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); break; - case BPF_ANC | SKF_AD_IFINDEX: - case BPF_ANC | SKF_AD_HATYPE: - /* A = skb->dev->ifindex */ - /* A = skb->dev->type */ - ctx->seen |= SEEN_SKB; - off = offsetof(struct sk_buff, dev); - emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); - - emit(ARM_CMP_I(r_scratch, 0), ctx); - emit_err_ret(ARM_COND_EQ, ctx); - - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - ifindex) != 4); - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, - type) != 2); - - if (code == (BPF_ANC | SKF_AD_IFINDEX)) { - off = offsetof(struct net_device, ifindex); - emit(ARM_LDR_I(r_A, r_scratch, off), ctx); - } else { - /* - * offset of field "type" in "struct - * net_device" is above what can be - * used in the ldrh rd, [rn, #imm] - * instruction, so load the offset in - * a register and use ldrh rd, [rn, rm] - */ - off = offsetof(struct net_device, type); - emit_mov_i(ARM_R3, off, ctx); - emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx); - } + case BPF_JLE: + _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx); break; - case BPF_ANC | SKF_AD_MARK: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - off = offsetof(struct sk_buff, mark); - emit(ARM_LDR_I(r_A, r_skb, off), ctx); + case BPF_JLT: + _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx); break; - case BPF_ANC | SKF_AD_RXHASH: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - off = offsetof(struct sk_buff, hash); - emit(ARM_LDR_I(r_A, r_skb, off), ctx); + case BPF_JSLT: + _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); break; - case BPF_ANC | SKF_AD_VLAN_TAG: - case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - off = offsetof(struct sk_buff, vlan_tci); - emit(ARM_LDRH_I(r_A, r_skb, off), ctx); - if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) - OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); - else { - OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); - OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); - } + case BPF_JSLE: + _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); break; - case BPF_ANC | SKF_AD_PKTTYPE: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - __pkt_type_offset[0]) != 1); - off = PKT_TYPE_OFFSET(); - emit(ARM_LDRB_I(r_A, r_skb, off), ctx); - emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx); -#ifdef __BIG_ENDIAN_BITFIELD - emit(ARM_LSR_I(r_A, r_A, 5), ctx); -#endif + } + break; + /* JMP OFF */ + case BPF_JMP | BPF_JA: + { + if (off == 0) break; - case BPF_ANC | SKF_AD_QUEUE: - ctx->seen |= SEEN_SKB; - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, - queue_mapping) != 2); - BUILD_BUG_ON(offsetof(struct sk_buff, - queue_mapping) > 0xff); - off = offsetof(struct sk_buff, queue_mapping); - emit(ARM_LDRH_I(r_A, r_skb, off), ctx); + jmp_offset = bpf2a32_offset(i+off, i, ctx); + check_imm24(jmp_offset); + emit(ARM_B(jmp_offset), ctx); + break; + } + /* tail call */ + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(ctx)) + return -EFAULT; + break; + /* function call */ + case BPF_JMP | BPF_CALL: + { + const u8 *r0 = bpf2a32[BPF_REG_0]; + const u8 *r1 = bpf2a32[BPF_REG_1]; + const u8 *r2 = bpf2a32[BPF_REG_2]; + const u8 *r3 = bpf2a32[BPF_REG_3]; + const u8 *r4 = bpf2a32[BPF_REG_4]; + const u8 *r5 = bpf2a32[BPF_REG_5]; + const u32 func = (u32)__bpf_call_base + (u32)imm; + + emit_a32_mov_r64(true, r0, r1, false, false, ctx); + emit_a32_mov_r64(true, r1, r2, false, true, ctx); + emit_push_r64(r5, 0, ctx); + emit_push_r64(r4, 8, ctx); + emit_push_r64(r3, 16, ctx); + + emit_a32_mov_i(tmp[1], func, false, ctx); + emit_blx_r(tmp[1], ctx); + + emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean + break; + } + /* function return */ + case BPF_JMP | BPF_EXIT: + /* Optimization: when last instruction is EXIT + * simply fallthrough to epilogue. + */ + if (i == ctx->prog->len - 1) break; - case BPF_ANC | SKF_AD_PAY_OFFSET: - ctx->seen |= SEEN_SKB | SEEN_CALL; + jmp_offset = epilogue_offset(ctx); + check_imm24(jmp_offset); + emit(ARM_B(jmp_offset), ctx); + break; +notyet: + pr_info_once("*** NOT YET: opcode %02x ***\n", code); + return -EFAULT; + default: + pr_err_once("unknown opcode %02x\n", code); + return -EINVAL; + } - emit(ARM_MOV_R(ARM_R0, r_skb), ctx); - emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx); - emit_blx_r(ARM_R3, ctx); - emit(ARM_MOV_R(r_A, ARM_R0), ctx); - break; - case BPF_LDX | BPF_W | BPF_ABS: - /* - * load a 32bit word from struct seccomp_data. - * seccomp_check_filter() will already have checked - * that k is 32bit aligned and lies within the - * struct seccomp_data. - */ - ctx->seen |= SEEN_SKB; - emit(ARM_LDR_I(r_A, r_skb, k), ctx); - break; - default: - return -1; + if (ctx->flags & FLAG_IMM_OVERFLOW) + /* + * this instruction generated an overflow when + * trying to access the literal pool, so + * delegate this filter to the kernel interpreter. + */ + return -1; + return 0; +} + +static int build_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + unsigned int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &(prog->insnsi[i]); + int ret; + + ret = build_insn(insn, ctx); + + /* It's used with loading the 64 bit immediate value. */ + if (ret > 0) { + i++; + if (ctx->target == NULL) + ctx->offsets[i] = ctx->idx; + continue; } - if (ctx->flags & FLAG_IMM_OVERFLOW) - /* - * this instruction generated an overflow when - * trying to access the literal pool, so - * delegate this filter to the kernel interpreter. - */ - return -1; + if (ctx->target == NULL) + ctx->offsets[i] = ctx->idx; + + /* If unsuccesfull, return with error code */ + if (ret) + return ret; } + return 0; +} - /* compute offsets only during the first pass */ - if (ctx->target == NULL) - ctx->offsets[i] = ctx->idx * 4; +static int validate_code(struct jit_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->idx; i++) { + if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF)) + return -1; + } return 0; } +void bpf_jit_compile(struct bpf_prog *prog) +{ + /* Nothing to do here. We support Internal BPF. */ +} -void bpf_jit_compile(struct bpf_prog *fp) +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) { + struct bpf_prog *tmp, *orig_prog = prog; struct bpf_binary_header *header; + bool tmp_blinded = false; struct jit_ctx ctx; - unsigned tmp_idx; - unsigned alloc_size; - u8 *target_ptr; + unsigned int tmp_idx; + unsigned int image_size; + u8 *image_ptr; + /* If BPF JIT was not enabled then we must fall back to + * the interpreter. + */ if (!bpf_jit_enable) - return; + return orig_prog; - memset(&ctx, 0, sizeof(ctx)); - ctx.skf = fp; - ctx.ret0_fp_idx = -1; + /* If constant blinding was enabled and we failed during blinding + * then we must fall back to the interpreter. Otherwise, we save + * the new JITed code. + */ + tmp = bpf_jit_blind_constants(prog); - ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); - if (ctx.offsets == NULL) - return; + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } - /* fake pass to fill in the ctx->seen */ - if (unlikely(build_body(&ctx))) + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + /* Not able to allocate memory for offsets[] , then + * we must fall back to the interpreter + */ + ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL); + if (ctx.offsets == NULL) { + prog = orig_prog; goto out; + } + + /* 1) fake pass to find in the length of the JITed code, + * to compute ctx->offsets and other context variables + * needed to compute final JITed code. + * Also, calculate random starting pointer/start of JITed code + * which is prefixed by random number of fault instructions. + * + * If the first pass fails then there is no chance of it + * being successful in the second pass, so just fall back + * to the interpreter. + */ + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } tmp_idx = ctx.idx; build_prologue(&ctx); ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; + ctx.epilogue_offset = ctx.idx; + #if __LINUX_ARM_ARCH__ < 7 tmp_idx = ctx.idx; build_epilogue(&ctx); @@ -1021,64 +1880,83 @@ void bpf_jit_compile(struct bpf_prog *fp) ctx.idx += ctx.imm_count; if (ctx.imm_count) { - ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); - if (ctx.imms == NULL) - goto out; + ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL); + if (ctx.imms == NULL) { + prog = orig_prog; + goto out_off; + } } #else - /* there's nothing after the epilogue on ARMv7 */ + /* there's nothing about the epilogue on ARMv7 */ build_epilogue(&ctx); #endif - alloc_size = 4 * ctx.idx; - header = bpf_jit_binary_alloc(alloc_size, &target_ptr, - 4, jit_fill_hole); - if (header == NULL) - goto out; + /* Now we can get the actual image size of the JITed arm code. + * Currently, we are not considering the THUMB-2 instructions + * for jit, although it can decrease the size of the image. + * + * As each arm instruction is of length 32bit, we are translating + * number of JITed intructions into the size required to store these + * JITed code. + */ + image_size = sizeof(u32) * ctx.idx; - ctx.target = (u32 *) target_ptr; + /* Now we know the size of the structure to make */ + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + /* Not able to allocate memory for the structure then + * we must fall back to the interpretation + */ + if (header == NULL) { + prog = orig_prog; + goto out_imms; + } + + /* 2.) Actual pass to generate final JIT code */ + ctx.target = (u32 *) image_ptr; ctx.idx = 0; build_prologue(&ctx); + + /* If building the body of the JITed code fails somehow, + * we fall back to the interpretation. + */ if (build_body(&ctx) < 0) { -#if __LINUX_ARM_ARCH__ < 7 - if (ctx.imm_count) - kfree(ctx.imms); -#endif + image_ptr = NULL; bpf_jit_binary_free(header); - goto out; + prog = orig_prog; + goto out_imms; } build_epilogue(&ctx); + /* 3.) Extra pass to validate JITed Code */ + if (validate_code(&ctx)) { + image_ptr = NULL; + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_imms; + } flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); -#if __LINUX_ARM_ARCH__ < 7 - if (ctx.imm_count) - kfree(ctx.imms); -#endif - if (bpf_jit_enable > 1) /* there are 2 passes here */ - bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); + bpf_jit_dump(prog->len, image_size, 2, ctx.target); set_memory_ro((unsigned long)header, header->pages); - fp->bpf_func = (void *)ctx.target; - fp->jited = 1; -out: + prog->bpf_func = (void *)ctx.target; + prog->jited = 1; + prog->jited_len = image_size; + +out_imms: +#if __LINUX_ARM_ARCH__ < 7 + if (ctx.imm_count) + kfree(ctx.imms); +#endif +out_off: kfree(ctx.offsets); - return; +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; } -void bpf_jit_free(struct bpf_prog *fp) -{ - unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; - struct bpf_binary_header *header = (void *)addr; - - if (!fp->jited) - goto free_filter; - - set_memory_rw(addr, header->pages); - bpf_jit_binary_free(header); - -free_filter: - bpf_prog_unlock_free(fp); -} diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h index c46fca2972f7..d5cf5f6208aa 100644 --- a/arch/arm/net/bpf_jit_32.h +++ b/arch/arm/net/bpf_jit_32.h @@ -11,6 +11,7 @@ #ifndef PFILTER_OPCODES_ARM_H #define PFILTER_OPCODES_ARM_H +/* ARM 32bit Registers */ #define ARM_R0 0 #define ARM_R1 1 #define ARM_R2 2 @@ -22,38 +23,43 @@ #define ARM_R8 8 #define ARM_R9 9 #define ARM_R10 10 -#define ARM_FP 11 -#define ARM_IP 12 -#define ARM_SP 13 -#define ARM_LR 14 -#define ARM_PC 15 - -#define ARM_COND_EQ 0x0 -#define ARM_COND_NE 0x1 -#define ARM_COND_CS 0x2 +#define ARM_FP 11 /* Frame Pointer */ +#define ARM_IP 12 /* Intra-procedure scratch register */ +#define ARM_SP 13 /* Stack pointer: as load/store base reg */ +#define ARM_LR 14 /* Link Register */ +#define ARM_PC 15 /* Program counter */ + +#define ARM_COND_EQ 0x0 /* == */ +#define ARM_COND_NE 0x1 /* != */ +#define ARM_COND_CS 0x2 /* unsigned >= */ #define ARM_COND_HS ARM_COND_CS -#define ARM_COND_CC 0x3 +#define ARM_COND_CC 0x3 /* unsigned < */ #define ARM_COND_LO ARM_COND_CC -#define ARM_COND_MI 0x4 -#define ARM_COND_PL 0x5 -#define ARM_COND_VS 0x6 -#define ARM_COND_VC 0x7 -#define ARM_COND_HI 0x8 -#define ARM_COND_LS 0x9 -#define ARM_COND_GE 0xa -#define ARM_COND_LT 0xb -#define ARM_COND_GT 0xc -#define ARM_COND_LE 0xd -#define ARM_COND_AL 0xe +#define ARM_COND_MI 0x4 /* < 0 */ +#define ARM_COND_PL 0x5 /* >= 0 */ +#define ARM_COND_VS 0x6 /* Signed Overflow */ +#define ARM_COND_VC 0x7 /* No Signed Overflow */ +#define ARM_COND_HI 0x8 /* unsigned > */ +#define ARM_COND_LS 0x9 /* unsigned <= */ +#define ARM_COND_GE 0xa /* Signed >= */ +#define ARM_COND_LT 0xb /* Signed < */ +#define ARM_COND_GT 0xc /* Signed > */ +#define ARM_COND_LE 0xd /* Signed <= */ +#define ARM_COND_AL 0xe /* None */ /* register shift types */ #define SRTYPE_LSL 0 #define SRTYPE_LSR 1 #define SRTYPE_ASR 2 #define SRTYPE_ROR 3 +#define SRTYPE_ASL (SRTYPE_LSL) #define ARM_INST_ADD_R 0x00800000 +#define ARM_INST_ADDS_R 0x00900000 +#define ARM_INST_ADC_R 0x00a00000 +#define ARM_INST_ADC_I 0x02a00000 #define ARM_INST_ADD_I 0x02800000 +#define ARM_INST_ADDS_I 0x02900000 #define ARM_INST_AND_R 0x00000000 #define ARM_INST_AND_I 0x02000000 @@ -76,8 +82,10 @@ #define ARM_INST_LDRH_I 0x01d000b0 #define ARM_INST_LDRH_R 0x019000b0 #define ARM_INST_LDR_I 0x05900000 +#define ARM_INST_LDR_R 0x07900000 #define ARM_INST_LDM 0x08900000 +#define ARM_INST_LDM_IA 0x08b00000 #define ARM_INST_LSL_I 0x01a00000 #define ARM_INST_LSL_R 0x01a00010 @@ -86,6 +94,7 @@ #define ARM_INST_LSR_R 0x01a00030 #define ARM_INST_MOV_R 0x01a00000 +#define ARM_INST_MOVS_R 0x01b00000 #define ARM_INST_MOV_I 0x03a00000 #define ARM_INST_MOVW 0x03000000 #define ARM_INST_MOVT 0x03400000 @@ -96,17 +105,28 @@ #define ARM_INST_PUSH 0x092d0000 #define ARM_INST_ORR_R 0x01800000 +#define ARM_INST_ORRS_R 0x01900000 #define ARM_INST_ORR_I 0x03800000 #define ARM_INST_REV 0x06bf0f30 #define ARM_INST_REV16 0x06bf0fb0 #define ARM_INST_RSB_I 0x02600000 +#define ARM_INST_RSBS_I 0x02700000 +#define ARM_INST_RSC_I 0x02e00000 #define ARM_INST_SUB_R 0x00400000 +#define ARM_INST_SUBS_R 0x00500000 +#define ARM_INST_RSB_R 0x00600000 #define ARM_INST_SUB_I 0x02400000 +#define ARM_INST_SUBS_I 0x02500000 +#define ARM_INST_SBC_I 0x02c00000 +#define ARM_INST_SBC_R 0x00c00000 +#define ARM_INST_SBCS_R 0x00d00000 #define ARM_INST_STR_I 0x05800000 +#define ARM_INST_STRB_I 0x05c00000 +#define ARM_INST_STRH_I 0x01c000b0 #define ARM_INST_TST_R 0x01100000 #define ARM_INST_TST_I 0x03100000 @@ -117,6 +137,8 @@ #define ARM_INST_MLS 0x00600090 +#define ARM_INST_UXTH 0x06ff0070 + /* * Use a suitable undefined instruction to use for ARM/Thumb2 faulting. * We need to be careful not to conflict with those used by other modules @@ -135,9 +157,15 @@ #define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm)) /* immediate */ #define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm)) +/* register with register-shift */ +#define _AL3_SR(inst) (inst | (1 << 4)) #define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm) +#define ARM_ADDS_R(rd, rn, rm) _AL3_R(ARM_INST_ADDS, rd, rn, rm) #define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm) +#define ARM_ADDS_I(rd, rn, imm) _AL3_I(ARM_INST_ADDS, rd, rn, imm) +#define ARM_ADC_R(rd, rn, rm) _AL3_R(ARM_INST_ADC, rd, rn, rm) +#define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm) #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) @@ -156,7 +184,9 @@ #define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm) #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ - | (off)) + | ((off) & 0xfff)) +#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | (rt) << 12 | (rn) << 16 \ + | (rm)) #define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ | (off)) #define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ @@ -167,15 +197,23 @@ | (rm)) #define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) +#define ARM_LDM_IA(rn, regs) (ARM_INST_LDM_IA | (rn) << 16 | (regs)) #define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8) #define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7) #define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8) #define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7) +#define ARM_ASR_R(rd, rn, rm) (_AL3_R(ARM_INST_ASR, rd, 0, rn) | (rm) << 8) +#define ARM_ASR_I(rd, rn, imm) (_AL3_I(ARM_INST_ASR, rd, 0, rn) | (imm) << 7) #define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm) +#define ARM_MOVS_R(rd, rm) _AL3_R(ARM_INST_MOVS, rd, 0, rm) #define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm) +#define ARM_MOV_SR(rd, rm, type, rs) \ + (_AL3_SR(ARM_MOV_R(rd, rm)) | (type) << 5 | (rs) << 8) +#define ARM_MOV_SI(rd, rm, type, imm6) \ + (ARM_MOV_R(rd, rm) | (type) << 5 | (imm6) << 7) #define ARM_MOVW(rd, imm) \ (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) @@ -190,19 +228,38 @@ #define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm) #define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm) -#define ARM_ORR_S(rd, rn, rm, type, rs) \ - (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7) +#define ARM_ORR_SR(rd, rn, rm, type, rs) \ + (_AL3_SR(ARM_ORR_R(rd, rn, rm)) | (type) << 5 | (rs) << 8) +#define ARM_ORRS_R(rd, rn, rm) _AL3_R(ARM_INST_ORRS, rd, rn, rm) +#define ARM_ORRS_SR(rd, rn, rm, type, rs) \ + (_AL3_SR(ARM_ORRS_R(rd, rn, rm)) | (type) << 5 | (rs) << 8) +#define ARM_ORR_SI(rd, rn, rm, type, imm6) \ + (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (imm6) << 7) +#define ARM_ORRS_SI(rd, rn, rm, type, imm6) \ + (ARM_ORRS_R(rd, rn, rm) | (type) << 5 | (imm6) << 7) #define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm)) #define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm)) #define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm) +#define ARM_RSBS_I(rd, rn, imm) _AL3_I(ARM_INST_RSBS, rd, rn, imm) +#define ARM_RSC_I(rd, rn, imm) _AL3_I(ARM_INST_RSC, rd, rn, imm) #define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm) +#define ARM_SUBS_R(rd, rn, rm) _AL3_R(ARM_INST_SUBS, rd, rn, rm) +#define ARM_RSB_R(rd, rn, rm) _AL3_R(ARM_INST_RSB, rd, rn, rm) +#define ARM_SBC_R(rd, rn, rm) _AL3_R(ARM_INST_SBC, rd, rn, rm) +#define ARM_SBCS_R(rd, rn, rm) _AL3_R(ARM_INST_SBCS, rd, rn, rm) #define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm) +#define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm) +#define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm) #define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ - | (off)) + | ((off) & 0xfff)) +#define ARM_STRH_I(rt, rn, off) (ARM_INST_STRH_I | (rt) << 12 | (rn) << 16 \ + | (((off) & 0xf0) << 4) | ((off) & 0xf)) +#define ARM_STRB_I(rt, rn, off) (ARM_INST_STRB_I | (rt) << 12 | (rn) << 16 \ + | (((off) & 0xf0) << 4) | ((off) & 0xf)) #define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) #define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) @@ -214,5 +271,6 @@ #define ARM_MLS(rd, rn, rm, ra) (ARM_INST_MLS | (rd) << 16 | (rn) | (rm) << 8 \ | (ra) << 12) +#define ARM_UXTH(rd, rm) (ARM_INST_UXTH | (rd) << 12 | (rm)) #endif /* PFILTER_OPCODES_ARM_H */ -- cgit v1.2.3-55-g7522 From d3213fbf695490acbffc7b1fff793c7e26989296 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 21 Aug 2017 12:47:30 +0300 Subject: ethernet: xircom: small clean up in setup_xirc2ps_cs() The get_options() function takes the whole ARRAY_SIZE(). It doesn't matter here because we don't use more than 7 elements. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/xircom/xirc2ps_cs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index f71883264cc0..fd5288ff53b5 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -1781,7 +1781,7 @@ static int __init setup_xirc2ps_cs(char *str) */ int ints[10] = { -1 }; - str = get_options(str, 9, ints); + str = get_options(str, ARRAY_SIZE(ints), ints); #define MAYBE_SET(X,Y) if (ints[0] >= Y && ints[Y] != -1) { X = ints[Y]; } MAYBE_SET(if_port, 3); -- cgit v1.2.3-55-g7522 From 89c9c1636f5aeef7b74cdcc141e0abc9bd764afe Mon Sep 17 00:00:00 2001 From: David Wu Date: Mon, 21 Aug 2017 18:12:55 +0800 Subject: net: ethernet: stmmac: dwmac-rk: Add rv1108 gmac support It only supports rmii interface. Add constants and callback functions for the dwmac on rv1108 socs. As can be seen, the base structure is the same, only registers and the bits in them moved slightly. Signed-off-by: David Wu Signed-off-by: David S. Miller --- .../devicetree/bindings/net/rockchip-dwmac.txt | 1 + drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | 53 ++++++++++++++++++++++ 2 files changed, 54 insertions(+) diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 8f427550720a..c1325387632c 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt @@ -10,6 +10,7 @@ Required properties: "rockchip,rk3366-gmac": found on RK3366 SoCs "rockchip,rk3368-gmac": found on RK3368 SoCs "rockchip,rk3399-gmac": found on RK3399 SoCs + "rockchip,rv1108-gmac": found on RV1108 SoCs - reg: addresses and length of the register sets for the device. - interrupts: Should contain the GMAC interrupts. - interrupt-names: Should contain the interrupt names "macirq". diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 2176403c72d8..99823f54696a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -787,6 +787,58 @@ static const struct rk_gmac_ops rk3399_ops = { .set_rmii_speed = rk3399_set_rmii_speed, }; +#define RV1108_GRF_GMAC_CON0 0X0900 + +/* RV1108_GRF_GMAC_CON0 */ +#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \ + GRF_BIT(6)) +#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3) +#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3) +#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2) +#define RV1108_GMAC_SPEED_100M GRF_BIT(2) +#define RV1108_GMAC_RMII_CLK_25M GRF_BIT(7) +#define RV1108_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7) + +static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, + RV1108_GMAC_PHY_INTF_SEL_RMII); +} + +static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) +{ + struct device *dev = &bsp_priv->pdev->dev; + + if (IS_ERR(bsp_priv->grf)) { + dev_err(dev, "%s: Missing rockchip,grf property\n", __func__); + return; + } + + if (speed == 10) { + regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, + RV1108_GMAC_RMII_CLK_2_5M | + RV1108_GMAC_SPEED_10M); + } else if (speed == 100) { + regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0, + RV1108_GMAC_RMII_CLK_25M | + RV1108_GMAC_SPEED_100M); + } else { + dev_err(dev, "unknown speed value for RMII! speed=%d", speed); + } +} + +static const struct rk_gmac_ops rv1108_ops = { + .set_to_rmii = rv1108_set_to_rmii, + .set_rmii_speed = rv1108_set_rmii_speed, +}; + #define RK_GRF_MACPHY_CON0 0xb00 #define RK_GRF_MACPHY_CON1 0xb04 #define RK_GRF_MACPHY_CON2 0xb08 @@ -1267,6 +1319,7 @@ static const struct of_device_id rk_gmac_dwmac_match[] = { { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops }, { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops }, { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops }, + { .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops }, { } }; MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match); -- cgit v1.2.3-55-g7522 From 51ba902a16e68b786028db8b0482f3a5f22e7d4f Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:47 +0800 Subject: net-next/hinic: Initialize hw interface Initialize hw interface as part of the nic initialization for accessing hw. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- Documentation/networking/hinic.txt | 125 ++++++++++++++ drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/huawei/Kconfig | 19 +++ drivers/net/ethernet/huawei/Makefile | 5 + drivers/net/ethernet/huawei/hinic/Kconfig | 13 ++ drivers/net/ethernet/huawei/hinic/Makefile | 3 + drivers/net/ethernet/huawei/hinic/hinic_dev.h | 33 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 36 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 201 ++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 42 +++++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.c | 208 +++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 160 +++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_main.c | 195 +++++++++++++++++++++ 14 files changed, 1042 insertions(+) create mode 100644 Documentation/networking/hinic.txt create mode 100644 drivers/net/ethernet/huawei/Kconfig create mode 100644 drivers/net/ethernet/huawei/Makefile create mode 100644 drivers/net/ethernet/huawei/hinic/Kconfig create mode 100644 drivers/net/ethernet/huawei/hinic/Makefile create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_dev.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_if.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_main.c diff --git a/Documentation/networking/hinic.txt b/Documentation/networking/hinic.txt new file mode 100644 index 000000000000..989366a4039c --- /dev/null +++ b/Documentation/networking/hinic.txt @@ -0,0 +1,125 @@ +Linux Kernel Driver for Huawei Intelligent NIC(HiNIC) family +============================================================ + +Overview: +========= +HiNIC is a network interface card for the Data Center Area. + +The driver supports a range of link-speed devices (10GbE, 25GbE, 40GbE, etc.). +The driver supports also a negotiated and extendable feature set. + +Some HiNIC devices support SR-IOV. This driver is used for Physical Function +(PF). + +HiNIC devices support MSI-X interrupt vector for each Tx/Rx queue and +adaptive interrupt moderation. + +HiNIC devices support also various offload features such as checksum offload, +TCP Transmit Segmentation Offload(TSO), Receive-Side Scaling(RSS) and +LRO(Large Receive Offload). + + +Supported PCI vendor ID/device IDs: +=================================== + +19e5:1822 - HiNIC PF + + +Driver Architecture and Source Code: +==================================== + +hinic_dev - Implement a Logical Network device that is independent from +specific HW details about HW data structure formats. + +hinic_hwdev - Implement the HW details of the device and include the components +for accessing the PCI NIC. + +hinic_hwdev contains the following components: +=============================================== + +HW Interface: +============= + +The interface for accessing the pci device (DMA memory and PCI BARs). +(hinic_hw_if.c, hinic_hw_if.h) + +Configuration Status Registers Area that describes the HW Registers on the +configuration and status BAR0. (hinic_hw_csr.h) + +MGMT components: +================ + +Asynchronous Event Queues(AEQs) - The event queues for receiving messages from +the MGMT modules on the cards. (hinic_hw_eqs.c, hinic_hw_eqs.h) + +Application Programmable Interface commands(API CMD) - Interface for sending +MGMT commands to the card. (hinic_hw_api_cmd.c, hinic_hw_api_cmd.h) + +Management (MGMT) - the PF to MGMT channel that uses API CMD for sending MGMT +commands to the card and receives notifications from the MGMT modules on the +card by AEQs. Also set the addresses of the IO CMDQs in HW. +(hinic_hw_mgmt.c, hinic_hw_mgmt.h) + +IO components: +============== + +Completion Event Queues(CEQs) - The completion Event Queues that describe IO +tasks that are finished. (hinic_hw_eqs.c, hinic_hw_eqs.h) + +Work Queues(WQ) - Contain the memory and operations for use by CMD queues and +the Queue Pairs. The WQ is a Memory Block in a Page. The Block contains +pointers to Memory Areas that are the Memory for the Work Queue Elements(WQEs). +(hinic_hw_wq.c, hinic_hw_wq.h) + +Command Queues(CMDQ) - The queues for sending commands for IO management and is +used to set the QPs addresses in HW. The commands completion events are +accumulated on the CEQ that is configured to receive the CMDQ completion events. +(hinic_hw_cmdq.c, hinic_hw_cmdq.h) + +Queue Pairs(QPs) - The HW Receive and Send queues for Receiving and Transmitting +Data. (hinic_hw_qp.c, hinic_hw_qp.h, hinic_hw_qp_ctxt.h) + +IO - de/constructs all the IO components. (hinic_hw_io.c, hinic_hw_io.h) + +HW device: +========== + +HW device - de/constructs the HW Interface, the MGMT components on the +initialization of the driver and the IO components on the case of Interface +UP/DOWN Events. (hinic_hw_dev.c, hinic_hw_dev.h) + + +hinic_dev contains the following components: +=============================================== + +PCI ID table - Contains the supported PCI Vendor/Device IDs. +(hinic_pci_tbl.h) + +Port Commands - Send commands to the HW device for port management +(MAC, Vlan, MTU, ...). (hinic_port.c, hinic_port.h) + +Tx Queues - Logical Tx Queues that use the HW Send Queues for transmit. +The Logical Tx queue is not dependent on the format of the HW Send Queue. +(hinic_tx.c, hinic_tx.h) + +Rx Queues - Logical Rx Queues that use the HW Receive Queues for receive. +The Logical Rx queue is not dependent on the format of the HW Receive Queue. +(hinic_rx.c, hinic_rx.h) + +hinic_dev - de/constructs the Logical Tx and Rx Queues. +(hinic_main.c, hinic_dev.h) + + +Miscellaneous: +============= + +Common functions that are used by HW and Logical Device. +(hinic_common.c, hinic_common.h) + + +Support +======= + +If an issue is identified with the released source code on the supported kernel +with a supported adapter, email the specific information related to the issue to +aviad.krawczyk@huawei.com. diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index edae15ac0e98..c60421339a98 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -78,6 +78,7 @@ source "drivers/net/ethernet/freescale/Kconfig" source "drivers/net/ethernet/fujitsu/Kconfig" source "drivers/net/ethernet/hisilicon/Kconfig" source "drivers/net/ethernet/hp/Kconfig" +source "drivers/net/ethernet/huawei/Kconfig" source "drivers/net/ethernet/ibm/Kconfig" source "drivers/net/ethernet/intel/Kconfig" source "drivers/net/ethernet/i825xx/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index bf7f4502cabc..a0a03d4d939a 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/ obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/ obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/ obj-$(CONFIG_NET_VENDOR_HP) += hp/ +obj-$(CONFIG_NET_VENDOR_HUAWEI) += huawei/ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ diff --git a/drivers/net/ethernet/huawei/Kconfig b/drivers/net/ethernet/huawei/Kconfig new file mode 100644 index 000000000000..c1a95ae4058b --- /dev/null +++ b/drivers/net/ethernet/huawei/Kconfig @@ -0,0 +1,19 @@ +# +# Huawei driver configuration +# + +config NET_VENDOR_HUAWEI + bool "Huawei devices" + default y + ---help--- + If you have a network (Ethernet) card belonging to this class, say Y. + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Huawei cards. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_HUAWEI + +source "drivers/net/ethernet/huawei/hinic/Kconfig" + +endif # NET_VENDOR_HUAWEI diff --git a/drivers/net/ethernet/huawei/Makefile b/drivers/net/ethernet/huawei/Makefile new file mode 100644 index 000000000000..5c37cc8fc1bc --- /dev/null +++ b/drivers/net/ethernet/huawei/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Huawei device drivers. +# + +obj-$(CONFIG_HINIC) += hinic/ diff --git a/drivers/net/ethernet/huawei/hinic/Kconfig b/drivers/net/ethernet/huawei/hinic/Kconfig new file mode 100644 index 000000000000..69f2b1fba48d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/Kconfig @@ -0,0 +1,13 @@ +# +# Huawei driver configuration +# + +config HINIC + tristate "Huawei Intelligent PCIE Network Interface Card" + depends on (PCI_MSI && X86) + default m + ---help--- + This driver supports HiNIC PCIE Ethernet cards. + To compile this driver as part of the kernel, choose Y here. + If unsure, choose N. + The default is compiled as module. diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile new file mode 100644 index 000000000000..353cee03cf2d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_HINIC) += hinic.o + +hinic-y := hinic_main.o hinic_hw_dev.o hinic_hw_if.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h new file mode 100644 index 000000000000..6c2c896015a5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -0,0 +1,33 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_DEV_H +#define HINIC_DEV_H + +#include +#include + +#include "hinic_hw_dev.h" + +#define HINIC_DRV_NAME "hinic" + +struct hinic_dev { + struct net_device *netdev; + struct hinic_hwdev *hwdev; + + u32 msg_enable; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h new file mode 100644 index 000000000000..c3440a9f5a1e --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -0,0 +1,36 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_CSR_H +#define HINIC_HW_CSR_H + +/* HW interface registers */ +#define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 +#define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 + +#define HINIC_DMA_ATTR_BASE 0xC80 +#define HINIC_ELECTION_BASE 0x4200 + +#define HINIC_DMA_ATTR_STRIDE 0x4 +#define HINIC_CSR_DMA_ATTR_ADDR(idx) \ + (HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE) + +#define HINIC_PPF_ELECTION_STRIDE 0x4 +#define HINIC_CSR_MAX_PORTS 4 + +#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \ + (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE) + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c new file mode 100644 index 000000000000..f681846e51d5 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -0,0 +1,201 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_dev.h" + +#define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ + (2 * (max_qps) + (num_aeqs) + (num_ceqs)) + +/** + * init_msix - enable the msix and save the entries + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +static int init_msix(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int nr_irqs, num_aeqs, num_ceqs; + size_t msix_entries_size; + int i, err; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); + num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); + nr_irqs = MAX_IRQS(HINIC_MAX_QPS, num_aeqs, num_ceqs); + if (nr_irqs > HINIC_HWIF_NUM_IRQS(hwif)) + nr_irqs = HINIC_HWIF_NUM_IRQS(hwif); + + msix_entries_size = nr_irqs * sizeof(*hwdev->msix_entries); + hwdev->msix_entries = devm_kzalloc(&pdev->dev, msix_entries_size, + GFP_KERNEL); + if (!hwdev->msix_entries) + return -ENOMEM; + + for (i = 0; i < nr_irqs; i++) + hwdev->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, hwdev->msix_entries, nr_irqs); + if (err) { + dev_err(&pdev->dev, "Failed to enable pci msix\n"); + return err; + } + + return 0; +} + +/** + * disable_msix - disable the msix + * @hwdev: the NIC HW device + **/ +static void disable_msix(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + + pci_disable_msix(pdev); +} + +/** + * init_pfhwdev - Initialize the extended components of PF + * @pfhwdev: the HW device for PF + * + * Return 0 - success, negative - failure + **/ +static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) +{ + /* Initialize PF HW device extended components */ + return 0; +} + +/** + * free_pfhwdev - Free the extended components of PF + * @pfhwdev: the HW device for PF + **/ +static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) +{ +} + +/** + * hinic_init_hwdev - Initialize the NIC HW + * @pdev: the NIC pci device + * + * Return initialized NIC HW device + * + * Initialize the NIC HW device and return a pointer to it + **/ +struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) +{ + struct hinic_pfhwdev *pfhwdev; + struct hinic_hwdev *hwdev; + struct hinic_hwif *hwif; + int err; + + hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL); + if (!hwif) + return ERR_PTR(-ENOMEM); + + err = hinic_init_hwif(hwif, pdev); + if (err) { + dev_err(&pdev->dev, "Failed to init HW interface\n"); + return ERR_PTR(err); + } + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + err = -EFAULT; + goto err_func_type; + } + + pfhwdev = devm_kzalloc(&pdev->dev, sizeof(*pfhwdev), GFP_KERNEL); + if (!pfhwdev) { + err = -ENOMEM; + goto err_pfhwdev_alloc; + } + + hwdev = &pfhwdev->hwdev; + hwdev->hwif = hwif; + + err = init_msix(hwdev); + if (err) { + dev_err(&pdev->dev, "Failed to init msix\n"); + goto err_init_msix; + } + + err = init_pfhwdev(pfhwdev); + if (err) { + dev_err(&pdev->dev, "Failed to init PF HW device\n"); + goto err_init_pfhwdev; + } + + return hwdev; + +err_init_pfhwdev: + disable_msix(hwdev); + +err_init_msix: +err_pfhwdev_alloc: +err_func_type: + hinic_free_hwif(hwif); + return ERR_PTR(err); +} + +/** + * hinic_free_hwdev - Free the NIC HW device + * @hwdev: the NIC HW device + **/ +void hinic_free_hwdev(struct hinic_hwdev *hwdev) +{ + struct hinic_pfhwdev *pfhwdev = container_of(hwdev, + struct hinic_pfhwdev, + hwdev); + + free_pfhwdev(pfhwdev); + + disable_msix(hwdev); + + hinic_free_hwif(hwdev->hwif); +} + +/** + * hinic_hwdev_num_qps - return the number QPs available for use + * @hwdev: the NIC HW device + * + * Return number QPs available for use + **/ +int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev) +{ + int num_aeqs, num_ceqs, nr_irqs, num_qps; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); + num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); + nr_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif); + + /* Each QP has its own (SQ + RQ) interrupt */ + num_qps = (nr_irqs - (num_aeqs + num_ceqs)) / 2; + + /* num_qps must be power of 2 */ + return BIT(fls(num_qps) - 1); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h new file mode 100644 index 000000000000..b42e0ebdd97b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -0,0 +1,42 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_DEV_H +#define HINIC_HW_DEV_H + +#include + +#include "hinic_hw_if.h" + +#define HINIC_MAX_QPS 32 + +struct hinic_hwdev { + struct hinic_hwif *hwif; + struct msix_entry *msix_entries; +}; + +struct hinic_pfhwdev { + struct hinic_hwdev hwdev; + + /* PF Extended components should be here */ +}; + +struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); + +void hinic_free_hwdev(struct hinic_hwdev *hwdev); + +int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c new file mode 100644 index 000000000000..edf184242172 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -0,0 +1,208 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_csr.h" +#include "hinic_hw_if.h" + +#define PCIE_ATTR_ENTRY 0 + +/** + * hwif_ready - test if the HW is ready for use + * @hwif: the HW interface of a pci function device + * + * Return 0 - Success, negative - Failure + **/ +static int hwif_ready(struct hinic_hwif *hwif) +{ + struct pci_dev *pdev = hwif->pdev; + u32 addr, attr1; + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + if (!HINIC_FA1_GET(attr1, INIT_STATUS)) { + dev_err(&pdev->dev, "hwif status is not ready\n"); + return -EFAULT; + } + + return 0; +} + +/** + * set_hwif_attr - set the attributes in the relevant members in hwif + * @hwif: the HW interface of a pci function device + * @attr0: the first attribute that was read from the hw + * @attr1: the second attribute that was read from the hw + **/ +static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1) +{ + hwif->attr.func_idx = HINIC_FA0_GET(attr0, FUNC_IDX); + hwif->attr.pf_idx = HINIC_FA0_GET(attr0, PF_IDX); + hwif->attr.pci_intf_idx = HINIC_FA0_GET(attr0, PCI_INTF_IDX); + hwif->attr.func_type = HINIC_FA0_GET(attr0, FUNC_TYPE); + + hwif->attr.num_aeqs = BIT(HINIC_FA1_GET(attr1, AEQS_PER_FUNC)); + hwif->attr.num_ceqs = BIT(HINIC_FA1_GET(attr1, CEQS_PER_FUNC)); + hwif->attr.num_irqs = BIT(HINIC_FA1_GET(attr1, IRQS_PER_FUNC)); + hwif->attr.num_dma_attr = BIT(HINIC_FA1_GET(attr1, DMA_ATTR_PER_FUNC)); +} + +/** + * read_hwif_attr - read the attributes and set members in hwif + * @hwif: the HW interface of a pci function device + **/ +static void read_hwif_attr(struct hinic_hwif *hwif) +{ + u32 addr, attr0, attr1; + + addr = HINIC_CSR_FUNC_ATTR0_ADDR; + attr0 = hinic_hwif_read_reg(hwif, addr); + + addr = HINIC_CSR_FUNC_ATTR1_ADDR; + attr1 = hinic_hwif_read_reg(hwif, addr); + + set_hwif_attr(hwif, attr0, attr1); +} + +/** + * set_ppf - try to set hwif as ppf and set the type of hwif in this case + * @hwif: the HW interface of a pci function device + **/ +static void set_ppf(struct hinic_hwif *hwif) +{ + struct hinic_func_attr *attr = &hwif->attr; + u32 addr, val, ppf_election; + + /* Read Modify Write */ + addr = HINIC_CSR_PPF_ELECTION_ADDR(HINIC_HWIF_PCI_INTF(hwif)); + + val = hinic_hwif_read_reg(hwif, addr); + val = HINIC_PPF_ELECTION_CLEAR(val, IDX); + + ppf_election = HINIC_PPF_ELECTION_SET(HINIC_HWIF_FUNC_IDX(hwif), IDX); + + val |= ppf_election; + hinic_hwif_write_reg(hwif, addr, val); + + /* check PPF */ + val = hinic_hwif_read_reg(hwif, addr); + + attr->ppf_idx = HINIC_PPF_ELECTION_GET(val, IDX); + if (attr->ppf_idx == HINIC_HWIF_FUNC_IDX(hwif)) + attr->func_type = HINIC_PPF; +} + +/** + * set_dma_attr - set the dma attributes in the HW + * @hwif: the HW interface of a pci function device + * @entry_idx: the entry index in the dma table + * @st: PCIE TLP steering tag + * @at: PCIE TLP AT field + * @ph: PCIE TLP Processing Hint field + * @no_snooping: PCIE TLP No snooping + * @tph_en: PCIE TLP Processing Hint Enable + **/ +static void set_dma_attr(struct hinic_hwif *hwif, u32 entry_idx, + u8 st, u8 at, u8 ph, + enum hinic_pcie_nosnoop no_snooping, + enum hinic_pcie_tph tph_en) +{ + u32 addr, val, dma_attr_entry; + + /* Read Modify Write */ + addr = HINIC_CSR_DMA_ATTR_ADDR(entry_idx); + + val = hinic_hwif_read_reg(hwif, addr); + val = HINIC_DMA_ATTR_CLEAR(val, ST) & + HINIC_DMA_ATTR_CLEAR(val, AT) & + HINIC_DMA_ATTR_CLEAR(val, PH) & + HINIC_DMA_ATTR_CLEAR(val, NO_SNOOPING) & + HINIC_DMA_ATTR_CLEAR(val, TPH_EN); + + dma_attr_entry = HINIC_DMA_ATTR_SET(st, ST) | + HINIC_DMA_ATTR_SET(at, AT) | + HINIC_DMA_ATTR_SET(ph, PH) | + HINIC_DMA_ATTR_SET(no_snooping, NO_SNOOPING) | + HINIC_DMA_ATTR_SET(tph_en, TPH_EN); + + val |= dma_attr_entry; + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * dma_attr_table_init - initialize the the default dma attributes + * @hwif: the HW interface of a pci function device + **/ +static void dma_attr_init(struct hinic_hwif *hwif) +{ + set_dma_attr(hwif, PCIE_ATTR_ENTRY, HINIC_PCIE_ST_DISABLE, + HINIC_PCIE_AT_DISABLE, HINIC_PCIE_PH_DISABLE, + HINIC_PCIE_SNOOP, HINIC_PCIE_TPH_DISABLE); +} + +/** + * hinic_init_hwif - initialize the hw interface + * @hwif: the HW interface of a pci function device + * @pdev: the pci device for acessing PCI resources + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev) +{ + int err; + + hwif->pdev = pdev; + + hwif->cfg_regs_bar = pci_ioremap_bar(pdev, HINIC_PCI_CFG_REGS_BAR); + if (!hwif->cfg_regs_bar) { + dev_err(&pdev->dev, "Failed to map configuration regs\n"); + return -ENOMEM; + } + + err = hwif_ready(hwif); + if (err) { + dev_err(&pdev->dev, "HW interface is not ready\n"); + goto err_hwif_ready; + } + + read_hwif_attr(hwif); + + if (HINIC_IS_PF(hwif)) + set_ppf(hwif); + + /* No transactionss before DMA is initialized */ + dma_attr_init(hwif); + return 0; + +err_hwif_ready: + iounmap(hwif->cfg_regs_bar); + return err; +} + +/** + * hinic_free_hwif - free the HW interface + * @hwif: the HW interface of a pci function device + **/ +void hinic_free_hwif(struct hinic_hwif *hwif) +{ + iounmap(hwif->cfg_regs_bar); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h new file mode 100644 index 000000000000..d1a8fa2bc3ee --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -0,0 +1,160 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_IF_H +#define HINIC_HW_IF_H + +#include +#include +#include +#include + +#define HINIC_DMA_ATTR_ST_SHIFT 0 +#define HINIC_DMA_ATTR_AT_SHIFT 8 +#define HINIC_DMA_ATTR_PH_SHIFT 10 +#define HINIC_DMA_ATTR_NO_SNOOPING_SHIFT 12 +#define HINIC_DMA_ATTR_TPH_EN_SHIFT 13 + +#define HINIC_DMA_ATTR_ST_MASK 0xFF +#define HINIC_DMA_ATTR_AT_MASK 0x3 +#define HINIC_DMA_ATTR_PH_MASK 0x3 +#define HINIC_DMA_ATTR_NO_SNOOPING_MASK 0x1 +#define HINIC_DMA_ATTR_TPH_EN_MASK 0x1 + +#define HINIC_DMA_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_DMA_ATTR_##member##_MASK) << \ + HINIC_DMA_ATTR_##member##_SHIFT) + +#define HINIC_DMA_ATTR_CLEAR(val, member) \ + ((val) & (~(HINIC_DMA_ATTR_##member##_MASK \ + << HINIC_DMA_ATTR_##member##_SHIFT))) + +#define HINIC_FA0_FUNC_IDX_SHIFT 0 +#define HINIC_FA0_PF_IDX_SHIFT 10 +#define HINIC_FA0_PCI_INTF_IDX_SHIFT 14 +/* reserved members - off 16 */ +#define HINIC_FA0_FUNC_TYPE_SHIFT 24 + +#define HINIC_FA0_FUNC_IDX_MASK 0x3FF +#define HINIC_FA0_PF_IDX_MASK 0xF +#define HINIC_FA0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_FA0_FUNC_TYPE_MASK 0x1 + +#define HINIC_FA0_GET(val, member) \ + (((val) >> HINIC_FA0_##member##_SHIFT) & HINIC_FA0_##member##_MASK) + +#define HINIC_FA1_AEQS_PER_FUNC_SHIFT 8 +/* reserved members - off 10 */ +#define HINIC_FA1_CEQS_PER_FUNC_SHIFT 12 +/* reserved members - off 15 */ +#define HINIC_FA1_IRQS_PER_FUNC_SHIFT 20 +#define HINIC_FA1_DMA_ATTR_PER_FUNC_SHIFT 24 +/* reserved members - off 27 */ +#define HINIC_FA1_INIT_STATUS_SHIFT 30 + +#define HINIC_FA1_AEQS_PER_FUNC_MASK 0x3 +#define HINIC_FA1_CEQS_PER_FUNC_MASK 0x7 +#define HINIC_FA1_IRQS_PER_FUNC_MASK 0xF +#define HINIC_FA1_DMA_ATTR_PER_FUNC_MASK 0x7 +#define HINIC_FA1_INIT_STATUS_MASK 0x1 + +#define HINIC_FA1_GET(val, member) \ + (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK) + +#define HINIC_PPF_ELECTION_IDX_SHIFT 0 +#define HINIC_PPF_ELECTION_IDX_MASK 0x1F + +#define HINIC_PPF_ELECTION_SET(val, member) \ + (((u32)(val) & HINIC_PPF_ELECTION_##member##_MASK) << \ + HINIC_PPF_ELECTION_##member##_SHIFT) + +#define HINIC_PPF_ELECTION_GET(val, member) \ + (((val) >> HINIC_PPF_ELECTION_##member##_SHIFT) & \ + HINIC_PPF_ELECTION_##member##_MASK) + +#define HINIC_PPF_ELECTION_CLEAR(val, member) \ + ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ + << HINIC_PPF_ELECTION_##member##_SHIFT))) + +#define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) +#define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) +#define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) +#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx) +#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx) + +#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type) +#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF) +#define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF) + +#define HINIC_PCI_CFG_REGS_BAR 0 + +#define HINIC_PCIE_ST_DISABLE 0 +#define HINIC_PCIE_AT_DISABLE 0 +#define HINIC_PCIE_PH_DISABLE 0 + +enum hinic_pcie_nosnoop { + HINIC_PCIE_SNOOP = 0, + HINIC_PCIE_NO_SNOOP = 1, +}; + +enum hinic_pcie_tph { + HINIC_PCIE_TPH_DISABLE = 0, + HINIC_PCIE_TPH_ENABLE = 1, +}; + +enum hinic_func_type { + HINIC_PF = 0, + HINIC_PPF = 2, +}; + +struct hinic_func_attr { + u16 func_idx; + u8 pf_idx; + u8 pci_intf_idx; + + enum hinic_func_type func_type; + + u8 ppf_idx; + + u16 num_irqs; + u8 num_aeqs; + u8 num_ceqs; + + u8 num_dma_attr; +}; + +struct hinic_hwif { + struct pci_dev *pdev; + void __iomem *cfg_regs_bar; + + struct hinic_func_attr attr; +}; + +static inline u32 hinic_hwif_read_reg(struct hinic_hwif *hwif, u32 reg) +{ + return be32_to_cpu(readl(hwif->cfg_regs_bar + reg)); +} + +static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, + u32 val) +{ + writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg); +} + +int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev); + +void hinic_free_hwif(struct hinic_hwif *hwif); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c new file mode 100644 index 000000000000..1d7aed07b25f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -0,0 +1,195 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_dev.h" +#include "hinic_dev.h" + +MODULE_AUTHOR("Huawei Technologies CO., Ltd"); +MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); +MODULE_LICENSE("GPL"); + +#define PCI_DEVICE_ID_HI1822_PF 0x1822 + +#define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) + +static const struct net_device_ops hinic_netdev_ops = { + /* Operations are empty, should be filled */ +}; + +/** + * nic_dev_init - Initialize the NIC device + * @pdev: the NIC pci device + * + * Return 0 - Success, negative - Failure + **/ +static int nic_dev_init(struct pci_dev *pdev) +{ + struct hinic_dev *nic_dev; + struct net_device *netdev; + struct hinic_hwdev *hwdev; + int err, num_qps; + + hwdev = hinic_init_hwdev(pdev); + if (IS_ERR(hwdev)) { + dev_err(&pdev->dev, "Failed to initialize HW device\n"); + return PTR_ERR(hwdev); + } + + num_qps = hinic_hwdev_num_qps(hwdev); + if (num_qps <= 0) { + dev_err(&pdev->dev, "Invalid number of QPS\n"); + err = -EINVAL; + goto err_num_qps; + } + + netdev = alloc_etherdev_mq(sizeof(*nic_dev), num_qps); + if (!netdev) { + dev_err(&pdev->dev, "Failed to allocate Ethernet device\n"); + err = -ENOMEM; + goto err_alloc_etherdev; + } + + netdev->netdev_ops = &hinic_netdev_ops; + + nic_dev = netdev_priv(netdev); + nic_dev->netdev = netdev; + nic_dev->hwdev = hwdev; + nic_dev->msg_enable = MSG_ENABLE_DEFAULT; + + pci_set_drvdata(pdev, netdev); + + netif_carrier_off(netdev); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto err_reg_netdev; + } + + return 0; + +err_reg_netdev: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + +err_alloc_etherdev: +err_num_qps: + hinic_free_hwdev(hwdev); + return err; +} + +static int hinic_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int err = pci_enable_device(pdev); + + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return err; + } + + err = pci_request_regions(pdev, HINIC_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to request PCI regions\n"); + goto err_pci_regions; + } + + pci_set_master(pdev); + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask\n"); + goto err_dma_mask; + } + } + + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_warn(&pdev->dev, + "Couldn't set 64-bit consistent DMA mask\n"); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "Failed to set consistent DMA mask\n"); + goto err_dma_consistent_mask; + } + } + + err = nic_dev_init(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to initialize NIC device\n"); + goto err_nic_dev_init; + } + + dev_info(&pdev->dev, "HiNIC driver - probed\n"); + return 0; + +err_nic_dev_init: +err_dma_consistent_mask: +err_dma_mask: + pci_release_regions(pdev); + +err_pci_regions: + pci_disable_device(pdev); + return err; +} + +static void hinic_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct hinic_dev *nic_dev = netdev_priv(netdev); + + unregister_netdev(netdev); + + pci_set_drvdata(pdev, NULL); + + hinic_free_hwdev(nic_dev->hwdev); + + free_netdev(netdev); + + pci_release_regions(pdev); + pci_disable_device(pdev); + + dev_info(&pdev->dev, "HiNIC driver - removed\n"); +} + +static const struct pci_device_id hinic_pci_table[] = { + { PCI_VDEVICE(HUAWEI, PCI_DEVICE_ID_HI1822_PF), 0}, + { 0, 0} +}; +MODULE_DEVICE_TABLE(pci, hinic_pci_table); + +static struct pci_driver hinic_driver = { + .name = HINIC_DRV_NAME, + .id_table = hinic_pci_table, + .probe = hinic_probe, + .remove = hinic_remove, +}; + +module_pci_driver(hinic_driver); -- cgit v1.2.3-55-g7522 From a5564e7e44268bcac402697eff85286722f753a9 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:48 +0800 Subject: net-next/hinic: Initialize hw device components Initialize hw device by calling the initialization functions of aeqs and management channel. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Makefile | 3 +- drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 172 ++++++++++++++++++++-- drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 14 +- drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 149 +++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 107 ++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 8 + drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 92 ++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 45 ++++++ 8 files changed, 576 insertions(+), 14 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index 353cee03cf2d..717ad71213fb 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_HINIC) += hinic.o -hinic-y := hinic_main.o hinic_hw_dev.o hinic_hw_if.o +hinic-y := hinic_main.o hinic_hw_dev.o hinic_hw_mgmt.o hinic_hw_eqs.o \ + hinic_hw_if.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index f681846e51d5..d430e60192af 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -23,11 +23,132 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_mgmt.h" #include "hinic_hw_dev.h" #define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ (2 * (max_qps) + (num_aeqs) + (num_ceqs)) +enum intr_type { + INTR_MSIX_TYPE, +}; + +/* HW struct */ +struct hinic_dev_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 rsvd1[5]; + u8 intr_type; + u8 rsvd2[66]; + u16 max_sqs; + u16 max_rqs; + u8 rsvd3[208]; +}; + +/** + * get_capability - convert device capabilities to NIC capabilities + * @hwdev: the HW device to set and convert device capabilities for + * @dev_cap: device capabilities from FW + * + * Return 0 - Success, negative - Failure + **/ +static int get_capability(struct hinic_hwdev *hwdev, + struct hinic_dev_cap *dev_cap) +{ + struct hinic_cap *nic_cap = &hwdev->nic_cap; + int num_aeqs, num_ceqs, num_irqs; + + if (!HINIC_IS_PF(hwdev->hwif) && !HINIC_IS_PPF(hwdev->hwif)) + return -EINVAL; + + if (dev_cap->intr_type != INTR_MSIX_TYPE) + return -EFAULT; + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); + num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); + num_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif); + + /* Each QP has its own (SQ + RQ) interrupts */ + nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2; + + /* num_qps must be power of 2 */ + nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1); + + nic_cap->max_qps = dev_cap->max_sqs + 1; + if (nic_cap->max_qps != (dev_cap->max_rqs + 1)) + return -EFAULT; + + if (nic_cap->num_qps > nic_cap->max_qps) + nic_cap->num_qps = nic_cap->max_qps; + + return 0; +} + +/** + * get_cap_from_fw - get device capabilities from FW + * @pfhwdev: the PF HW device to get capabilities for + * + * Return 0 - Success, negative - Failure + **/ +static int get_cap_from_fw(struct hinic_pfhwdev *pfhwdev) +{ + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_dev_cap dev_cap; + u16 in_len, out_len; + int err; + + in_len = 0; + out_len = sizeof(dev_cap); + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_CFGM, + HINIC_CFG_NIC_CAP, &dev_cap, in_len, &dev_cap, + &out_len, HINIC_MGMT_MSG_SYNC); + if (err) { + dev_err(&pdev->dev, "Failed to get capability from FW\n"); + return err; + } + + return get_capability(hwdev, &dev_cap); +} + +/** + * get_dev_cap - get device capabilities + * @hwdev: the NIC HW device to get capabilities for + * + * Return 0 - Success, negative - Failure + **/ +static int get_dev_cap(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + int err; + + switch (HINIC_FUNC_TYPE(hwif)) { + case HINIC_PPF: + case HINIC_PF: + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + err = get_cap_from_fw(pfhwdev); + if (err) { + dev_err(&pdev->dev, "Failed to get capability from FW\n"); + return err; + } + break; + + default: + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + return 0; +} + /** * init_msix - enable the msix and save the entries * @hwdev: the NIC HW device @@ -86,7 +207,17 @@ static void disable_msix(struct hinic_hwdev *hwdev) **/ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) { - /* Initialize PF HW device extended components */ + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + err = hinic_pf_to_mgmt_init(&pfhwdev->pf_to_mgmt, hwif); + if (err) { + dev_err(&pdev->dev, "Failed to initialize PF to MGMT channel\n"); + return err; + } + return 0; } @@ -96,6 +227,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) **/ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) { + hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); } /** @@ -111,7 +243,7 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) struct hinic_pfhwdev *pfhwdev; struct hinic_hwdev *hwdev; struct hinic_hwif *hwif; - int err; + int err, num_aeqs; hwif = devm_kzalloc(&pdev->dev, sizeof(*hwif), GFP_KERNEL); if (!hwif) @@ -144,15 +276,37 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) goto err_init_msix; } + num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); + + err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs, + HINIC_DEFAULT_AEQ_LEN, HINIC_EQ_PAGE_SIZE, + hwdev->msix_entries); + if (err) { + dev_err(&pdev->dev, "Failed to init async event queues\n"); + goto err_aeqs_init; + } + err = init_pfhwdev(pfhwdev); if (err) { dev_err(&pdev->dev, "Failed to init PF HW device\n"); goto err_init_pfhwdev; } + err = get_dev_cap(hwdev); + if (err) { + dev_err(&pdev->dev, "Failed to get device capabilities\n"); + goto err_dev_cap; + } + return hwdev; +err_dev_cap: + free_pfhwdev(pfhwdev); + err_init_pfhwdev: + hinic_aeqs_free(&hwdev->aeqs); + +err_aeqs_init: disable_msix(hwdev); err_init_msix: @@ -174,6 +328,8 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev) free_pfhwdev(pfhwdev); + hinic_aeqs_free(&hwdev->aeqs); + disable_msix(hwdev); hinic_free_hwif(hwdev->hwif); @@ -187,15 +343,7 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev) **/ int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev) { - int num_aeqs, num_ceqs, nr_irqs, num_qps; + struct hinic_cap *nic_cap = &hwdev->nic_cap; - num_aeqs = HINIC_HWIF_NUM_AEQS(hwdev->hwif); - num_ceqs = HINIC_HWIF_NUM_CEQS(hwdev->hwif); - nr_irqs = HINIC_HWIF_NUM_IRQS(hwdev->hwif); - - /* Each QP has its own (SQ + RQ) interrupt */ - num_qps = (nr_irqs - (num_aeqs + num_ceqs)) / 2; - - /* num_qps must be power of 2 */ - return BIT(fls(num_qps) - 1); + return nic_cap->num_qps; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index b42e0ebdd97b..feb601388e59 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -17,20 +17,32 @@ #define HINIC_HW_DEV_H #include +#include #include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_mgmt.h" #define HINIC_MAX_QPS 32 +struct hinic_cap { + u16 max_qps; + u16 num_qps; +}; + struct hinic_hwdev { struct hinic_hwif *hwif; struct msix_entry *msix_entries; + + struct hinic_aeqs aeqs; + + struct hinic_cap nic_cap; }; struct hinic_pfhwdev { struct hinic_hwdev hwdev; - /* PF Extended components should be here */ + struct hinic_pf_to_mgmt pf_to_mgmt; }; struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c new file mode 100644 index 000000000000..a099d20bac8c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -0,0 +1,149 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" + +#define HINIC_EQS_WQ_NAME "hinic_eqs" + +/** + * hinic_aeq_register_hw_cb - register AEQ callback for specific event + * @aeqs: pointer to Async eqs of the chip + * @event: aeq event to register callback for it + * @handle: private data will be used by the callback + * @hw_handler: callback function + **/ +void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event, void *handle, + void (*hwe_handler)(void *handle, void *data, + u8 size)) +{ + struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; + + hwe_cb->hwe_handler = hwe_handler; + hwe_cb->handle = handle; + hwe_cb->hwe_state = HINIC_EQE_ENABLED; +} + +/** + * hinic_aeq_unregister_hw_cb - unregister the AEQ callback for specific event + * @aeqs: pointer to Async eqs of the chip + * @event: aeq event to unregister callback for it + **/ +void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event) +{ + struct hinic_hw_event_cb *hwe_cb = &aeqs->hwe_cb[event]; + + hwe_cb->hwe_state &= ~HINIC_EQE_ENABLED; + + while (hwe_cb->hwe_state & HINIC_EQE_RUNNING) + schedule(); + + hwe_cb->hwe_handler = NULL; +} + +/** + * init_eq - initialize Event Queue + * @eq: the event queue + * @hwif: the HW interface of a PCI function device + * @type: the type of the event queue, aeq or ceq + * @q_id: Queue id number + * @q_len: the number of EQ elements + * @page_size: the page size of the pages in the event queue + * @entry: msix entry associated with the event queue + * + * Return 0 - Success, Negative - Failure + **/ +static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, + enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, + struct msix_entry entry) +{ + /* should be implemented */ + return 0; +} + +/** + * remove_eq - remove Event Queue + * @eq: the event queue + **/ +static void remove_eq(struct hinic_eq *eq) +{ + /* should be implemented */ +} + +/** + * hinic_aeqs_init - initialize all the aeqs + * @aeqs: pointer to Async eqs of the chip + * @hwif: the HW interface of a PCI function device + * @num_aeqs: number of AEQs + * @q_len: number of EQ elements + * @page_size: the page size of the pages in the event queue + * @msix_entries: msix entries associated with the event queues + * + * Return 0 - Success, negative - Failure + **/ +int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, + int num_aeqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries) +{ + struct pci_dev *pdev = hwif->pdev; + int err, i, q_id; + + aeqs->workq = create_singlethread_workqueue(HINIC_EQS_WQ_NAME); + if (!aeqs->workq) + return -ENOMEM; + + aeqs->hwif = hwif; + aeqs->num_aeqs = num_aeqs; + + for (q_id = 0; q_id < num_aeqs; q_id++) { + err = init_eq(&aeqs->aeq[q_id], hwif, HINIC_AEQ, q_id, q_len, + page_size, msix_entries[q_id]); + if (err) { + dev_err(&pdev->dev, "Failed to init aeq %d\n", q_id); + goto err_init_aeq; + } + } + + return 0; + +err_init_aeq: + for (i = 0; i < q_id; i++) + remove_eq(&aeqs->aeq[i]); + + destroy_workqueue(aeqs->workq); + return err; +} + +/** + * hinic_aeqs_free - free all the aeqs + * @aeqs: pointer to Async eqs of the chip + **/ +void hinic_aeqs_free(struct hinic_aeqs *aeqs) +{ + int q_id; + + for (q_id = 0; q_id < aeqs->num_aeqs ; q_id++) + remove_eq(&aeqs->aeq[q_id]); + + destroy_workqueue(aeqs->workq); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h new file mode 100644 index 000000000000..1580127b26c4 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h @@ -0,0 +1,107 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_EQS_H +#define HINIC_HW_EQS_H + +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" + +#define HINIC_MAX_AEQS 4 + +#define HINIC_DEFAULT_AEQ_LEN 64 + +#define HINIC_EQ_PAGE_SIZE SZ_4K + +enum hinic_eq_type { + HINIC_AEQ, +}; + +enum hinic_aeq_type { + HINIC_MSG_FROM_MGMT_CPU = 2, + + HINIC_MAX_AEQ_EVENTS, +}; + +enum hinic_eqe_state { + HINIC_EQE_ENABLED = BIT(0), + HINIC_EQE_RUNNING = BIT(1), +}; + +struct hinic_eq_work { + struct work_struct work; + void *data; +}; + +struct hinic_eq { + struct hinic_hwif *hwif; + + enum hinic_eq_type type; + int q_id; + u32 q_len; + u32 page_size; + + u32 cons_idx; + int wrapped; + + size_t elem_size; + int num_pages; + int num_elem_in_pg; + + struct msix_entry msix_entry; + + dma_addr_t *dma_addr; + void **virt_addr; + + struct hinic_eq_work aeq_work; +}; + +struct hinic_hw_event_cb { + void (*hwe_handler)(void *handle, void *data, u8 size); + void *handle; + unsigned long hwe_state; +}; + +struct hinic_aeqs { + struct hinic_hwif *hwif; + + struct hinic_eq aeq[HINIC_MAX_AEQS]; + int num_aeqs; + + struct hinic_hw_event_cb hwe_cb[HINIC_MAX_AEQ_EVENTS]; + + struct workqueue_struct *workq; +}; + +void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event, void *handle, + void (*hwe_handler)(void *handle, void *data, + u8 size)); + +void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, + enum hinic_aeq_type event); + +int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, + int num_aeqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries); + +void hinic_aeqs_free(struct hinic_aeqs *aeqs); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index d1a8fa2bc3ee..b6d985042907 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -119,6 +119,14 @@ enum hinic_func_type { HINIC_PPF = 2, }; +enum hinic_mod_type { + HINIC_MOD_COMM = 0, /* HW communication module */ + HINIC_MOD_L2NIC = 1, /* L2NIC module */ + HINIC_MOD_CFGM = 7, /* Configuration module */ + + HINIC_MOD_MAX = 15 +}; + struct hinic_func_attr { u16 func_idx; u8 pf_idx; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c new file mode 100644 index 000000000000..8ae8ed90dbec --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -0,0 +1,92 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw_dev.h" + +#define mgmt_to_pfhwdev(pf_mgmt) \ + container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) + +/** + * hinic_msg_to_mgmt - send message to mgmt + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the msg data + * @in_size: the msg data length + * @buf_out: response + * @out_size: returned response length + * @sync: sync msg or async msg + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + enum hinic_mgmt_msg_type sync) +{ + /* should be implemented */ + return -EINVAL; +} + +/** + * mgmt_msg_aeqe_handler - handler for a mgmt message event + * @handle: PF to MGMT channel + * @data: the header of the message + * @size: unused + **/ +static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size) +{ + /* should be implemented */ +} + +/** + * hinic_pf_to_mgmt_init - initialize PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * @hwif: HW interface the PF to MGMT will use for accessing HW + * + * Return 0 - Success, negative - Failure + **/ +int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_hwif *hwif) +{ + struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + + pf_to_mgmt->hwif = hwif; + + hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU, + pf_to_mgmt, + mgmt_msg_aeqe_handler); + return 0; +} + +/** + * hinic_pf_to_mgmt_free - free PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + **/ +void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) +{ + struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + + hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h new file mode 100644 index 000000000000..b4b34b703447 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -0,0 +1,45 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_MGMT_H +#define HINIC_HW_MGMT_H + +#include + +#include "hinic_hw_if.h" + +enum hinic_mgmt_msg_type { + HINIC_MGMT_MSG_SYNC = 1, +}; + +enum hinic_cfg_cmd { + HINIC_CFG_NIC_CAP = 0, +}; + +struct hinic_pf_to_mgmt { + struct hinic_hwif *hwif; +}; + +int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size, + enum hinic_mgmt_msg_type sync); + +int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_hwif *hwif); + +void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt); + +#endif -- cgit v1.2.3-55-g7522 From eabf0fad81d52b8e23f3a6eb5d2c8cac452f50ee Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:49 +0800 Subject: net-next/hinic: Initialize api cmd resources Initialize api cmd resources as part of management initialization. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Makefile | 4 +- .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | 446 +++++++++++++++++++++ .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.h | 102 +++++ drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 10 + drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 3 + 5 files changed, 563 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index 717ad71213fb..beba90a6dc96 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_HINIC) += hinic.o -hinic-y := hinic_main.o hinic_hw_dev.o hinic_hw_mgmt.o hinic_hw_eqs.o \ - hinic_hw_if.o +hinic-y := hinic_main.o hinic_hw_dev.o hinic_hw_mgmt.o hinic_hw_api_cmd.o \ + hinic_hw_eqs.o hinic_hw_if.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c new file mode 100644 index 000000000000..4291f8eb45cc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -0,0 +1,446 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_api_cmd.h" + +#define API_CHAIN_NUM_CELLS 32 + +#define API_CMD_CELL_SIZE_SHIFT 6 +#define API_CMD_CELL_SIZE_MIN (BIT(API_CMD_CELL_SIZE_SHIFT)) + +#define API_CMD_CELL_SIZE(cell_size) \ + (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \ + (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN) + +#define API_CMD_BUF_SIZE 2048 + +/** + * api_cmd_chain_hw_init - initialize the chain in the HW + * @chain: the API CMD specific chain to initialize in HW + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) +{ + /* should be implemented */ + return 0; +} + +/** + * free_cmd_buf - free the dma buffer of API CMD command + * @chain: the API CMD specific chain of the cmd + * @cell_idx: the cell index of the cmd + **/ +static void free_cmd_buf(struct hinic_api_cmd_chain *chain, int cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + dma_free_coherent(&pdev->dev, API_CMD_BUF_SIZE, + cell_ctxt->api_cmd_vaddr, + cell_ctxt->api_cmd_paddr); +} + +/** + * alloc_cmd_buf - allocate a dma buffer for API CMD command + * @chain: the API CMD specific chain for the cmd + * @cell: the cell in the HW for the cmd + * @cell_idx: the index of the cell + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_cmd_buf(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_cell *cell, int cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + dma_addr_t cmd_paddr; + u8 *cmd_vaddr; + int err = 0; + + cmd_vaddr = dma_zalloc_coherent(&pdev->dev, API_CMD_BUF_SIZE, + &cmd_paddr, GFP_KERNEL); + if (!cmd_vaddr) { + dev_err(&pdev->dev, "Failed to allocate API CMD DMA memory\n"); + return -ENOMEM; + } + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + cell_ctxt->api_cmd_vaddr = cmd_vaddr; + cell_ctxt->api_cmd_paddr = cmd_paddr; + + /* set the cmd DMA address in the cell */ + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + /* The data in the HW should be in Big Endian Format */ + cell->write.hw_cmd_paddr = cpu_to_be64(cmd_paddr); + break; + + default: + dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); + free_cmd_buf(chain, cell_idx); + err = -EINVAL; + break; + } + + return err; +} + +/** + * api_cmd_create_cell - create API CMD cell for specific chain + * @chain: the API CMD specific chain to create its cell + * @cell_idx: the index of the cell to create + * @pre_node: previous cell + * @node_vaddr: the returned virt addr of the cell + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_create_cell(struct hinic_api_cmd_chain *chain, + int cell_idx, + struct hinic_api_cmd_cell *pre_node, + struct hinic_api_cmd_cell **node_vaddr) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_api_cmd_cell *node; + dma_addr_t node_paddr; + int err; + + node = dma_zalloc_coherent(&pdev->dev, chain->cell_size, + &node_paddr, GFP_KERNEL); + if (!node) { + dev_err(&pdev->dev, "Failed to allocate dma API CMD cell\n"); + return -ENOMEM; + } + + node->read.hw_wb_resp_paddr = 0; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + cell_ctxt->cell_vaddr = node; + cell_ctxt->cell_paddr = node_paddr; + + if (!pre_node) { + chain->head_cell_paddr = node_paddr; + chain->head_node = node; + } else { + /* The data in the HW should be in Big Endian Format */ + pre_node->next_cell_paddr = cpu_to_be64(node_paddr); + } + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + err = alloc_cmd_buf(chain, node, cell_idx); + if (err) { + dev_err(&pdev->dev, "Failed to allocate cmd buffer\n"); + goto err_alloc_cmd_buf; + } + break; + + default: + dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); + err = -EINVAL; + goto err_alloc_cmd_buf; + } + + *node_vaddr = node; + return 0; + +err_alloc_cmd_buf: + dma_free_coherent(&pdev->dev, chain->cell_size, node, node_paddr); + return err; +} + +/** + * api_cmd_destroy_cell - destroy API CMD cell of specific chain + * @chain: the API CMD specific chain to destroy its cell + * @cell_idx: the cell to destroy + **/ +static void api_cmd_destroy_cell(struct hinic_api_cmd_chain *chain, + int cell_idx) +{ + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_api_cmd_cell *node; + dma_addr_t node_paddr; + size_t node_size; + + cell_ctxt = &chain->cell_ctxt[cell_idx]; + + node = cell_ctxt->cell_vaddr; + node_paddr = cell_ctxt->cell_paddr; + node_size = chain->cell_size; + + if (cell_ctxt->api_cmd_vaddr) { + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + free_cmd_buf(chain, cell_idx); + break; + default: + dev_err(&pdev->dev, "Unsupported API CMD chain type\n"); + break; + } + + dma_free_coherent(&pdev->dev, node_size, node, + node_paddr); + } +} + +/** + * api_cmd_destroy_cells - destroy API CMD cells of specific chain + * @chain: the API CMD specific chain to destroy its cells + * @num_cells: number of cells to destroy + **/ +static void api_cmd_destroy_cells(struct hinic_api_cmd_chain *chain, + int num_cells) +{ + int cell_idx; + + for (cell_idx = 0; cell_idx < num_cells; cell_idx++) + api_cmd_destroy_cell(chain, cell_idx); +} + +/** + * api_cmd_create_cells - create API CMD cells for specific chain + * @chain: the API CMD specific chain + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_create_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_api_cmd_cell *node = NULL, *pre_node = NULL; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + int err, cell_idx; + + for (cell_idx = 0; cell_idx < chain->num_cells; cell_idx++) { + err = api_cmd_create_cell(chain, cell_idx, pre_node, &node); + if (err) { + dev_err(&pdev->dev, "Failed to create API CMD cell\n"); + goto err_create_cell; + } + + pre_node = node; + } + + /* set the Final node to point on the start */ + node->next_cell_paddr = cpu_to_be64(chain->head_cell_paddr); + + /* set the current node to be the head */ + chain->curr_node = chain->head_node; + return 0; + +err_create_cell: + api_cmd_destroy_cells(chain, cell_idx); + return err; +} + +/** + * api_chain_init - initialize API CMD specific chain + * @chain: the API CMD specific chain to initialize + * @attr: attributes to set in the chain + * + * Return 0 - Success, negative - Failure + **/ +static int api_chain_init(struct hinic_api_cmd_chain *chain, + struct hinic_api_cmd_chain_attr *attr) +{ + struct hinic_hwif *hwif = attr->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t cell_ctxt_size; + + chain->hwif = hwif; + chain->chain_type = attr->chain_type; + chain->num_cells = attr->num_cells; + chain->cell_size = attr->cell_size; + + chain->prod_idx = 0; + chain->cons_idx = 0; + + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); + chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); + if (!chain->cell_ctxt) + return -ENOMEM; + + chain->wb_status = dma_zalloc_coherent(&pdev->dev, + sizeof(*chain->wb_status), + &chain->wb_status_paddr, + GFP_KERNEL); + if (!chain->wb_status) { + dev_err(&pdev->dev, "Failed to allocate DMA wb status\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * api_chain_free - free API CMD specific chain + * @chain: the API CMD specific chain to free + **/ +static void api_chain_free(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_free_coherent(&pdev->dev, sizeof(*chain->wb_status), + chain->wb_status, chain->wb_status_paddr); +} + +/** + * api_cmd_create_chain - create API CMD specific chain + * @attr: attributes to set the chain + * + * Return the created chain + **/ +static struct hinic_api_cmd_chain * + api_cmd_create_chain(struct hinic_api_cmd_chain_attr *attr) +{ + struct hinic_hwif *hwif = attr->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_api_cmd_chain *chain; + int err; + + if (attr->num_cells & (attr->num_cells - 1)) { + dev_err(&pdev->dev, "Invalid number of cells, must be power of 2\n"); + return ERR_PTR(-EINVAL); + } + + chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); + if (!chain) + return ERR_PTR(-ENOMEM); + + err = api_chain_init(chain, attr); + if (err) { + dev_err(&pdev->dev, "Failed to initialize chain\n"); + return ERR_PTR(err); + } + + err = api_cmd_create_cells(chain); + if (err) { + dev_err(&pdev->dev, "Failed to create cells for API CMD chain\n"); + goto err_create_cells; + } + + err = api_cmd_chain_hw_init(chain); + if (err) { + dev_err(&pdev->dev, "Failed to initialize chain HW\n"); + goto err_chain_hw_init; + } + + return chain; + +err_chain_hw_init: + api_cmd_destroy_cells(chain, chain->num_cells); + +err_create_cells: + api_chain_free(chain); + return ERR_PTR(err); +} + +/** + * api_cmd_destroy_chain - destroy API CMD specific chain + * @chain: the API CMD specific chain to destroy + **/ +static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) +{ + api_cmd_destroy_cells(chain, chain->num_cells); + api_chain_free(chain); +} + +/** + * hinic_api_cmd_init - Initialize all the API CMD chains + * @chain: the API CMD chains that are initialized + * @hwif: the hardware interface of a pci function device + * + * Return 0 - Success, negative - Failure + **/ +int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, + struct hinic_hwif *hwif) +{ + enum hinic_api_cmd_chain_type type, chain_type; + struct hinic_api_cmd_chain_attr attr; + struct pci_dev *pdev = hwif->pdev; + size_t hw_cell_sz; + int err; + + hw_cell_sz = sizeof(struct hinic_api_cmd_cell); + + attr.hwif = hwif; + attr.num_cells = API_CHAIN_NUM_CELLS; + attr.cell_size = API_CMD_CELL_SIZE(hw_cell_sz); + + chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { + attr.chain_type = chain_type; + + if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) + continue; + + chain[chain_type] = api_cmd_create_chain(&attr); + if (IS_ERR(chain[chain_type])) { + dev_err(&pdev->dev, "Failed to create chain %d\n", + chain_type); + goto err_create_chain; + } + } + + return 0; + +err_create_chain: + type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for ( ; type < chain_type; type++) { + if (type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) + continue; + + api_cmd_destroy_chain(chain[type]); + } + + return err; +} + +/** + * hinic_api_cmd_free - free the API CMD chains + * @chain: the API CMD chains that are freed + **/ +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain) +{ + enum hinic_api_cmd_chain_type chain_type; + + chain_type = HINIC_API_CMD_WRITE_TO_MGMT_CPU; + for ( ; chain_type < HINIC_API_CMD_MAX; chain_type++) { + if (chain_type != HINIC_API_CMD_WRITE_TO_MGMT_CPU) + continue; + + api_cmd_destroy_chain(chain[chain_type]); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h new file mode 100644 index 000000000000..9c7d1e5ee00c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h @@ -0,0 +1,102 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_API_CMD_H +#define HINIC_HW_API_CMD_H + +#include + +#include "hinic_hw_if.h" + +enum hinic_api_cmd_chain_type { + HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, + + HINIC_API_CMD_MAX, +}; + +struct hinic_api_cmd_chain_attr { + struct hinic_hwif *hwif; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; +}; + +struct hinic_api_cmd_status { + u64 header; + u32 status; + u32 rsvd0; + u32 rsvd1; + u32 rsvd2; + u64 rsvd3; +}; + +/* HW struct */ +struct hinic_api_cmd_cell { + u64 ctrl; + + /* address is 64 bit in HW struct */ + u64 next_cell_paddr; + + u64 desc; + + /* HW struct */ + union { + struct { + u64 hw_cmd_paddr; + } write; + + struct { + u64 hw_wb_resp_paddr; + u64 hw_cmd_paddr; + } read; + }; +}; + +struct hinic_api_cmd_cell_ctxt { + dma_addr_t cell_paddr; + struct hinic_api_cmd_cell *cell_vaddr; + + dma_addr_t api_cmd_paddr; + u8 *api_cmd_vaddr; +}; + +struct hinic_api_cmd_chain { + struct hinic_hwif *hwif; + enum hinic_api_cmd_chain_type chain_type; + + u32 num_cells; + u16 cell_size; + + /* HW members in 24 bit format */ + u32 prod_idx; + u32 cons_idx; + + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + + dma_addr_t wb_status_paddr; + struct hinic_api_cmd_status *wb_status; + + dma_addr_t head_cell_paddr; + struct hinic_api_cmd_cell *head_node; + struct hinic_api_cmd_cell *curr_node; +}; + +int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, + struct hinic_hwif *hwif); + +void hinic_api_cmd_free(struct hinic_api_cmd_chain **chain); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index 8ae8ed90dbec..f914bc77dc62 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -19,6 +19,7 @@ #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" +#include "hinic_hw_api_cmd.h" #include "hinic_hw_mgmt.h" #include "hinic_hw_dev.h" @@ -70,9 +71,17 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, { struct hinic_pfhwdev *pfhwdev = mgmt_to_pfhwdev(pf_to_mgmt); struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + struct pci_dev *pdev = hwif->pdev; + int err; pf_to_mgmt->hwif = hwif; + err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); + if (err) { + dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); + return err; + } + hinic_aeq_register_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU, pf_to_mgmt, mgmt_msg_aeqe_handler); @@ -89,4 +98,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt) struct hinic_hwdev *hwdev = &pfhwdev->hwdev; hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU); + hinic_api_cmd_free(pf_to_mgmt->cmd_chain); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index b4b34b703447..dff321c1b793 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -19,6 +19,7 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_api_cmd.h" enum hinic_mgmt_msg_type { HINIC_MGMT_MSG_SYNC = 1, @@ -30,6 +31,8 @@ enum hinic_cfg_cmd { struct hinic_pf_to_mgmt { struct hinic_hwif *hwif; + + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; }; int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, -- cgit v1.2.3-55-g7522 From 3dcea32193d35d9a4cb9ae56f0e831156bd0b479 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:50 +0800 Subject: net-next/hinic: Initialize api cmd hw Update the hardware about api cmd resources and initialize it. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | 173 ++++++++++++++++++++- .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.h | 38 +++++ drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 26 ++++ 3 files changed, 236 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 4291f8eb45cc..4bcdf358cda2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -13,6 +13,7 @@ * */ +#include #include #include #include @@ -21,8 +22,12 @@ #include #include #include +#include +#include +#include #include +#include "hinic_hw_csr.h" #include "hinic_hw_if.h" #include "hinic_hw_api_cmd.h" @@ -35,8 +40,157 @@ (((cell_size) >= API_CMD_CELL_SIZE_MIN) ? \ (1 << (fls(cell_size - 1))) : API_CMD_CELL_SIZE_MIN) +#define API_CMD_CELL_SIZE_VAL(size) \ + ilog2((size) >> API_CMD_CELL_SIZE_SHIFT) + #define API_CMD_BUF_SIZE 2048 +#define API_CMD_TIMEOUT 1000 + +enum api_cmd_xor_chk_level { + XOR_CHK_DIS = 0, + + XOR_CHK_ALL = 3, +}; + +/** + * api_cmd_hw_restart - restart the chain in the HW + * @chain: the API CMD specific chain to restart + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd_hw_restart(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + int err = -ETIMEDOUT; + unsigned long end; + u32 reg_addr, val; + + /* Read Modify Write */ + reg_addr = HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(hwif, reg_addr); + + val = HINIC_API_CMD_CHAIN_REQ_CLEAR(val, RESTART); + val |= HINIC_API_CMD_CHAIN_REQ_SET(1, RESTART); + + hinic_hwif_write_reg(hwif, reg_addr, val); + + end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); + do { + val = hinic_hwif_read_reg(hwif, reg_addr); + + if (!HINIC_API_CMD_CHAIN_REQ_GET(val, RESTART)) { + err = 0; + break; + } + + msleep(20); + } while (time_before(jiffies, end)); + + return err; +} + +/** + * api_cmd_ctrl_init - set the control register of a chain + * @chain: the API CMD specific chain to set control register for + **/ +static void api_cmd_ctrl_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, ctrl; + u16 cell_size; + + /* Read Modify Write */ + addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + cell_size = API_CMD_CELL_SIZE_VAL(chain->cell_size); + + ctrl = hinic_hwif_read_reg(hwif, addr); + + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + ctrl |= HINIC_API_CMD_CHAIN_CTRL_SET(1, XOR_ERR) | + HINIC_API_CMD_CHAIN_CTRL_SET(XOR_CHK_ALL, XOR_CHK_EN) | + HINIC_API_CMD_CHAIN_CTRL_SET(cell_size, CELL_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl); +} + +/** + * api_cmd_set_status_addr - set the status address of a chain in the HW + * @chain: the API CMD specific chain to set in HW status address for + **/ +static void api_cmd_set_status_addr(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_STATUS_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->wb_status_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_set_num_cells - set the number cells of a chain in the HW + * @chain: the API CMD specific chain to set in HW the number of cells for + **/ +static void api_cmd_set_num_cells(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(chain->chain_type); + val = chain->num_cells; + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_head_init - set the head of a chain in the HW + * @chain: the API CMD specific chain to set in HW the head for + **/ +static void api_cmd_head_init(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, val; + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(chain->chain_type); + val = upper_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); + + addr = HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(chain->chain_type); + val = lower_32_bits(chain->head_cell_paddr); + hinic_hwif_write_reg(hwif, addr, val); +} + +/** + * api_cmd_chain_hw_clean - clean the HW + * @chain: the API CMD specific chain + **/ +static void api_cmd_chain_hw_clean(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + u32 addr, ctrl; + + addr = HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(chain->chain_type); + + ctrl = hinic_hwif_read_reg(hwif, addr); + ctrl = HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, RESTART_WB_STAT) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_ERR) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, AEQE_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, XOR_CHK_EN) & + HINIC_API_CMD_CHAIN_CTRL_CLEAR(ctrl, CELL_SIZE); + + hinic_hwif_write_reg(hwif, addr, ctrl); +} + /** * api_cmd_chain_hw_init - initialize the chain in the HW * @chain: the API CMD specific chain to initialize in HW @@ -45,7 +199,23 @@ **/ static int api_cmd_chain_hw_init(struct hinic_api_cmd_chain *chain) { - /* should be implemented */ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + api_cmd_chain_hw_clean(chain); + + api_cmd_set_status_addr(chain); + + err = api_cmd_hw_restart(chain); + if (err) { + dev_err(&pdev->dev, "Failed to restart API CMD HW\n"); + return err; + } + + api_cmd_ctrl_init(chain); + api_cmd_set_num_cells(chain); + api_cmd_head_init(chain); return 0; } @@ -373,6 +543,7 @@ err_create_cells: **/ static void api_cmd_destroy_chain(struct hinic_api_cmd_chain *chain) { + api_cmd_chain_hw_clean(chain); api_cmd_destroy_cells(chain, chain->num_cells); api_chain_free(chain); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h index 9c7d1e5ee00c..0c83b807111e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h @@ -20,6 +20,44 @@ #include "hinic_hw_if.h" +#define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 + +#define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1 + +#define HINIC_API_CMD_CHAIN_REQ_SET(val, member) \ + (((u32)(val) & HINIC_API_CMD_CHAIN_REQ_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_REQ_GET(val, member) \ + (((val) >> HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT) & \ + HINIC_API_CMD_CHAIN_REQ_##member##_MASK) + +#define HINIC_API_CMD_CHAIN_REQ_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_REQ_##member##_MASK \ + << HINIC_API_CMD_CHAIN_REQ_##member##_SHIFT))) + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_SHIFT 1 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_SHIFT 2 +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_SHIFT 4 +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_SHIFT 8 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_SHIFT 28 +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_SHIFT 30 + +#define HINIC_API_CMD_CHAIN_CTRL_RESTART_WB_STAT_MASK 0x1 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_ERR_MASK 0x1 +#define HINIC_API_CMD_CHAIN_CTRL_AEQE_EN_MASK 0x1 +#define HINIC_API_CMD_CHAIN_CTRL_AEQ_ID_MASK 0x3 +#define HINIC_API_CMD_CHAIN_CTRL_XOR_CHK_EN_MASK 0x3 +#define HINIC_API_CMD_CHAIN_CTRL_CELL_SIZE_MASK 0x3 + +#define HINIC_API_CMD_CHAIN_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_API_CMD_CHAIN_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_CHAIN_CTRL_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \ + << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) + enum hinic_api_cmd_chain_type { HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h index c3440a9f5a1e..a9ece6db3414 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -33,4 +33,30 @@ #define HINIC_CSR_PPF_ELECTION_ADDR(idx) \ (HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE) +/* API CMD registers */ +#define HINIC_CSR_API_CMD_BASE 0xF000 + +#define HINIC_CSR_API_CMD_STRIDE 0x100 + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x0 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_HEAD_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x4 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_HI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x8 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_STATUS_LO_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0xC + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_NUM_CELLS_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x10 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) + +#define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) + #endif -- cgit v1.2.3-55-g7522 From 6dd8b68214f4af663faec507169a123222dfa7e7 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:51 +0800 Subject: net-next/hinic: Add management messages Add the management messages for sending to api cmd and the asynchronous event handler for the completion of the messages. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | 35 ++ .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.h | 3 + drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 5 + drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 439 ++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 59 +++ 5 files changed, 538 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 4bcdf358cda2..7e7a76e7d049 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -53,6 +53,41 @@ enum api_cmd_xor_chk_level { XOR_CHK_ALL = 3, }; +/** + * api_cmd - API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * + * Return 0 - Success, negative - Failure + **/ +static int api_cmd(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, u8 *cmd, u16 cmd_size) +{ + /* should be implemented */ + return -EINVAL; +} + +/** + * hinic_api_cmd_write - Write API CMD command + * @chain: chain for write command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @size: the command size + * + * Return 0 - Success, negative - Failure + **/ +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, u8 *cmd, u16 size) +{ + /* Verify the chain type */ + if (chain->chain_type == HINIC_API_CMD_WRITE_TO_MGMT_CPU) + return api_cmd(chain, dest, cmd, size); + + return -EINVAL; +} + /** * api_cmd_hw_restart - restart the chain in the HW * @chain: the API CMD specific chain to restart diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h index 0c83b807111e..e8865d627b58 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h @@ -132,6 +132,9 @@ struct hinic_api_cmd_chain { struct hinic_api_cmd_cell *curr_node; }; +int hinic_api_cmd_write(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, u8 *cmd, u16 size); + int hinic_api_cmd_init(struct hinic_api_cmd_chain **chain, struct hinic_hwif *hwif); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index b6d985042907..98623d69f2ba 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -93,6 +93,7 @@ #define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) #define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx) #define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx) +#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx) #define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type) #define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF) @@ -127,6 +128,10 @@ enum hinic_mod_type { HINIC_MOD_MAX = 15 }; +enum hinic_node_id { + HINIC_NODE_ID_MGMT = 21, +}; + struct hinic_func_attr { u16 func_idx; u8 pf_idx; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index f914bc77dc62..0150b71a5aba 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -16,6 +16,12 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" @@ -23,9 +29,267 @@ #include "hinic_hw_mgmt.h" #include "hinic_hw_dev.h" +#define SYNC_MSG_ID_MASK 0x1FF + +#define SYNC_MSG_ID(pf_to_mgmt) ((pf_to_mgmt)->sync_msg_id) + +#define SYNC_MSG_ID_INC(pf_to_mgmt) (SYNC_MSG_ID(pf_to_mgmt) = \ + ((SYNC_MSG_ID(pf_to_mgmt) + 1) & \ + SYNC_MSG_ID_MASK)) + +#define MSG_SZ_IS_VALID(in_size) ((in_size) <= MAX_MSG_LEN) + +#define MGMT_MSG_LEN_MIN 20 +#define MGMT_MSG_LEN_STEP 16 +#define MGMT_MSG_RSVD_FOR_DEV 8 + +#define SEGMENT_LEN 48 + +#define MAX_PF_MGMT_BUF_SIZE 2048 + +/* Data should be SEG LEN size aligned */ +#define MAX_MSG_LEN 2016 + +#define MSG_NOT_RESP 0xFFFF + +#define MGMT_MSG_TIMEOUT 1000 + #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) +enum msg_segment_type { + NOT_LAST_SEGMENT = 0, + LAST_SEGMENT = 1, +}; + +enum mgmt_direction_type { + MGMT_DIRECT_SEND = 0, + MGMT_RESP = 1, +}; + +enum msg_ack_type { + MSG_ACK = 0, + MSG_NO_ACK = 1, +}; + +/** + * prepare_header - prepare the header of the message + * @pf_to_mgmt: PF to MGMT channel + * @msg_len: the length of the message + * @mod: module in the chip that will get the message + * @ack_type: ask for response + * @direction: the direction of the message + * @cmd: command of the message + * @msg_id: message id + * + * Return the prepared header value + **/ +static u64 prepare_header(struct hinic_pf_to_mgmt *pf_to_mgmt, + u16 msg_len, enum hinic_mod_type mod, + enum msg_ack_type ack_type, + enum mgmt_direction_type direction, + u16 cmd, u16 msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + + return HINIC_MSG_HEADER_SET(msg_len, MSG_LEN) | + HINIC_MSG_HEADER_SET(mod, MODULE) | + HINIC_MSG_HEADER_SET(SEGMENT_LEN, SEG_LEN) | + HINIC_MSG_HEADER_SET(ack_type, NO_ACK) | + HINIC_MSG_HEADER_SET(0, ASYNC_MGMT_TO_PF) | + HINIC_MSG_HEADER_SET(0, SEQID) | + HINIC_MSG_HEADER_SET(LAST_SEGMENT, LAST) | + HINIC_MSG_HEADER_SET(direction, DIRECTION) | + HINIC_MSG_HEADER_SET(cmd, CMD) | + HINIC_MSG_HEADER_SET(HINIC_HWIF_PCI_INTF(hwif), PCI_INTF) | + HINIC_MSG_HEADER_SET(HINIC_HWIF_PF_IDX(hwif), PF_IDX) | + HINIC_MSG_HEADER_SET(msg_id, MSG_ID); +} + +/** + * prepare_mgmt_cmd - prepare the mgmt command + * @mgmt_cmd: pointer to the command to prepare + * @header: pointer of the header for the message + * @msg: the data of the message + * @msg_len: the length of the message + **/ +static void prepare_mgmt_cmd(u8 *mgmt_cmd, u64 *header, u8 *msg, u16 msg_len) +{ + memset(mgmt_cmd, 0, MGMT_MSG_RSVD_FOR_DEV); + + mgmt_cmd += MGMT_MSG_RSVD_FOR_DEV; + memcpy(mgmt_cmd, header, sizeof(*header)); + + mgmt_cmd += sizeof(*header); + memcpy(mgmt_cmd, msg, msg_len); +} + +/** + * mgmt_msg_len - calculate the total message length + * @msg_data_len: the length of the message data + * + * Return the total message length + **/ +static u16 mgmt_msg_len(u16 msg_data_len) +{ + /* RSVD + HEADER_SIZE + DATA_LEN */ + u16 msg_len = MGMT_MSG_RSVD_FOR_DEV + sizeof(u64) + msg_data_len; + + if (msg_len > MGMT_MSG_LEN_MIN) + msg_len = MGMT_MSG_LEN_MIN + + ALIGN((msg_len - MGMT_MSG_LEN_MIN), + MGMT_MSG_LEN_STEP); + else + msg_len = MGMT_MSG_LEN_MIN; + + return msg_len; +} + +/** + * send_msg_to_mgmt - send message to mgmt by API CMD + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @data: the msg data + * @data_len: the msg data length + * @ack_type: ask for response + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * + * Return 0 - Success, negative - Failure + **/ +static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + u8 *data, u16 data_len, + enum msg_ack_type ack_type, + enum mgmt_direction_type direction, + u16 resp_msg_id) +{ + struct hinic_api_cmd_chain *chain; + u64 header; + u16 msg_id; + + msg_id = SYNC_MSG_ID(pf_to_mgmt); + + if (direction == MGMT_RESP) { + header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, + direction, cmd, resp_msg_id); + } else { + SYNC_MSG_ID_INC(pf_to_mgmt); + header = prepare_header(pf_to_mgmt, data_len, mod, ack_type, + direction, cmd, msg_id); + } + + prepare_mgmt_cmd(pf_to_mgmt->sync_msg_buf, &header, data, data_len); + + chain = pf_to_mgmt->cmd_chain[HINIC_API_CMD_WRITE_TO_MGMT_CPU]; + return hinic_api_cmd_write(chain, HINIC_NODE_ID_MGMT, + pf_to_mgmt->sync_msg_buf, + mgmt_msg_len(data_len)); +} + +/** + * msg_to_mgmt_sync - send sync message to mgmt + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the msg data + * @in_size: the msg data length + * @buf_out: response + * @out_size: response length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * + * Return 0 - Success, negative - Failure + **/ +static int msg_to_mgmt_sync(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + u8 *buf_in, u16 in_size, + u8 *buf_out, u16 *out_size, + enum mgmt_direction_type direction, + u16 resp_msg_id) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_recv_msg *recv_msg; + struct completion *recv_done; + u16 msg_id; + int err; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + recv_msg = &pf_to_mgmt->recv_resp_msg_from_mgmt; + recv_done = &recv_msg->recv_done; + + if (resp_msg_id == MSG_NOT_RESP) + msg_id = SYNC_MSG_ID(pf_to_mgmt); + else + msg_id = resp_msg_id; + + init_completion(recv_done); + + err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, + MSG_ACK, direction, resp_msg_id); + if (err) { + dev_err(&pdev->dev, "Failed to send sync msg to mgmt\n"); + goto unlock_sync_msg; + } + + if (!wait_for_completion_timeout(recv_done, MGMT_MSG_TIMEOUT)) { + dev_err(&pdev->dev, "MGMT timeout, MSG id = %d\n", msg_id); + err = -ETIMEDOUT; + goto unlock_sync_msg; + } + + smp_rmb(); /* verify reading after completion */ + + if (recv_msg->msg_id != msg_id) { + dev_err(&pdev->dev, "incorrect MSG for id = %d\n", msg_id); + err = -EFAULT; + goto unlock_sync_msg; + } + + if ((buf_out) && (recv_msg->msg_len <= MAX_PF_MGMT_BUF_SIZE)) { + memcpy(buf_out, recv_msg->msg, recv_msg->msg_len); + *out_size = recv_msg->msg_len; + } + +unlock_sync_msg: + up(&pf_to_mgmt->sync_msg_lock); + return err; +} + +/** + * msg_to_mgmt_async - send message to mgmt without response + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that will get the message + * @cmd: command of the message + * @buf_in: the msg data + * @in_size: the msg data length + * @direction: the direction of the original message + * @resp_msg_id: msg id to response for + * + * Return 0 - Success, negative - Failure + **/ +static int msg_to_mgmt_async(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, u8 cmd, + u8 *buf_in, u16 in_size, + enum mgmt_direction_type direction, + u16 resp_msg_id) +{ + int err; + + /* Lock the sync_msg_buf */ + down(&pf_to_mgmt->sync_msg_lock); + + err = send_msg_to_mgmt(pf_to_mgmt, mod, cmd, buf_in, in_size, + MSG_NO_ACK, direction, resp_msg_id); + + up(&pf_to_mgmt->sync_msg_lock); + return err; +} + /** * hinic_msg_to_mgmt - send message to mgmt * @pf_to_mgmt: PF to MGMT channel @@ -44,8 +308,98 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, enum hinic_mgmt_msg_type sync) { - /* should be implemented */ - return -EINVAL; + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + + if (sync != HINIC_MGMT_MSG_SYNC) { + dev_err(&pdev->dev, "Invalid MGMT msg type\n"); + return -EINVAL; + } + + if (!MSG_SZ_IS_VALID(in_size)) { + dev_err(&pdev->dev, "Invalid MGMT msg buffer size\n"); + return -EINVAL; + } + + return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, + buf_out, out_size, MGMT_DIRECT_SEND, + MSG_NOT_RESP); +} + +/** + * mgmt_recv_msg_handler - handler for message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + u8 *buf_out = recv_msg->buf_out; + u16 out_size = 0; + + dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n", recv_msg->mod); + + if (!recv_msg->async_mgmt_to_pf) + /* MGMT sent sync msg, send the response */ + msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd, + buf_out, out_size, MGMT_RESP, + recv_msg->msg_id); +} + +/** + * mgmt_resp_msg_handler - handler for a response message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: received message details + **/ +static void mgmt_resp_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + wmb(); /* verify writing all, before reading */ + + complete(&recv_msg->recv_done); +} + +/** + * recv_mgmt_msg_handler - handler for a message from mgmt cpu + * @pf_to_mgmt: PF to MGMT channel + * @header: the header of the message + * @recv_msg: received message details + **/ +static void recv_mgmt_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, + u64 *header, struct hinic_recv_msg *recv_msg) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + int seq_id, seg_len; + u8 *msg_body; + + seq_id = HINIC_MSG_HEADER_GET(*header, SEQID); + seg_len = HINIC_MSG_HEADER_GET(*header, SEG_LEN); + + if (seq_id >= (MAX_MSG_LEN / SEGMENT_LEN)) { + dev_err(&pdev->dev, "recv big mgmt msg\n"); + return; + } + + msg_body = (u8 *)header + sizeof(*header); + memcpy(recv_msg->msg + seq_id * SEGMENT_LEN, msg_body, seg_len); + + if (!HINIC_MSG_HEADER_GET(*header, LAST)) + return; + + recv_msg->cmd = HINIC_MSG_HEADER_GET(*header, CMD); + recv_msg->mod = HINIC_MSG_HEADER_GET(*header, MODULE); + recv_msg->async_mgmt_to_pf = HINIC_MSG_HEADER_GET(*header, + ASYNC_MGMT_TO_PF); + recv_msg->msg_len = HINIC_MSG_HEADER_GET(*header, MSG_LEN); + recv_msg->msg_id = HINIC_MSG_HEADER_GET(*header, MSG_ID); + + if (HINIC_MSG_HEADER_GET(*header, DIRECTION) == MGMT_RESP) + mgmt_resp_msg_handler(pf_to_mgmt, recv_msg); + else + mgmt_recv_msg_handler(pf_to_mgmt, recv_msg); } /** @@ -56,7 +410,77 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, **/ static void mgmt_msg_aeqe_handler(void *handle, void *data, u8 size) { - /* should be implemented */ + struct hinic_pf_to_mgmt *pf_to_mgmt = handle; + struct hinic_recv_msg *recv_msg; + u64 *header = (u64 *)data; + + recv_msg = HINIC_MSG_HEADER_GET(*header, DIRECTION) == + MGMT_DIRECT_SEND ? + &pf_to_mgmt->recv_msg_from_mgmt : + &pf_to_mgmt->recv_resp_msg_from_mgmt; + + recv_mgmt_msg_handler(pf_to_mgmt, header, recv_msg); +} + +/** + * alloc_recv_msg - allocate receive message memory + * @pf_to_mgmt: PF to MGMT channel + * @recv_msg: pointer that will hold the allocated data + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_recv_msg(struct hinic_pf_to_mgmt *pf_to_mgmt, + struct hinic_recv_msg *recv_msg) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + + recv_msg->msg = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!recv_msg->msg) + return -ENOMEM; + + recv_msg->buf_out = devm_kzalloc(&pdev->dev, MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!recv_msg->buf_out) + return -ENOMEM; + + return 0; +} + +/** + * alloc_msg_buf - allocate all the message buffers of PF to MGMT channel + * @pf_to_mgmt: PF to MGMT channel + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt) +{ + struct hinic_hwif *hwif = pf_to_mgmt->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + err = alloc_recv_msg(pf_to_mgmt, + &pf_to_mgmt->recv_msg_from_mgmt); + if (err) { + dev_err(&pdev->dev, "Failed to allocate recv msg\n"); + return err; + } + + err = alloc_recv_msg(pf_to_mgmt, + &pf_to_mgmt->recv_resp_msg_from_mgmt); + if (err) { + dev_err(&pdev->dev, "Failed to allocate resp recv msg\n"); + return err; + } + + pf_to_mgmt->sync_msg_buf = devm_kzalloc(&pdev->dev, + MAX_PF_MGMT_BUF_SIZE, + GFP_KERNEL); + if (!pf_to_mgmt->sync_msg_buf) + return -ENOMEM; + + return 0; } /** @@ -76,6 +500,15 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt, pf_to_mgmt->hwif = hwif; + sema_init(&pf_to_mgmt->sync_msg_lock, 1); + pf_to_mgmt->sync_msg_id = 0; + + err = alloc_msg_buf(pf_to_mgmt); + if (err) { + dev_err(&pdev->dev, "Failed to allocate msg buffers\n"); + return err; + } + err = hinic_api_cmd_init(pf_to_mgmt->cmd_chain, hwif); if (err) { dev_err(&pdev->dev, "Failed to initialize cmd chains\n"); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index dff321c1b793..eca7ad865bca 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -17,10 +17,48 @@ #define HINIC_HW_MGMT_H #include +#include +#include #include "hinic_hw_if.h" #include "hinic_hw_api_cmd.h" +#define HINIC_MSG_HEADER_MSG_LEN_SHIFT 0 +#define HINIC_MSG_HEADER_MODULE_SHIFT 11 +#define HINIC_MSG_HEADER_SEG_LEN_SHIFT 16 +#define HINIC_MSG_HEADER_NO_ACK_SHIFT 22 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_SHIFT 23 +#define HINIC_MSG_HEADER_SEQID_SHIFT 24 +#define HINIC_MSG_HEADER_LAST_SHIFT 30 +#define HINIC_MSG_HEADER_DIRECTION_SHIFT 31 +#define HINIC_MSG_HEADER_CMD_SHIFT 32 +#define HINIC_MSG_HEADER_ZEROS_SHIFT 40 +#define HINIC_MSG_HEADER_PCI_INTF_SHIFT 48 +#define HINIC_MSG_HEADER_PF_IDX_SHIFT 50 +#define HINIC_MSG_HEADER_MSG_ID_SHIFT 54 + +#define HINIC_MSG_HEADER_MSG_LEN_MASK 0x7FF +#define HINIC_MSG_HEADER_MODULE_MASK 0x1F +#define HINIC_MSG_HEADER_SEG_LEN_MASK 0x3F +#define HINIC_MSG_HEADER_NO_ACK_MASK 0x1 +#define HINIC_MSG_HEADER_ASYNC_MGMT_TO_PF_MASK 0x1 +#define HINIC_MSG_HEADER_SEQID_MASK 0x3F +#define HINIC_MSG_HEADER_LAST_MASK 0x1 +#define HINIC_MSG_HEADER_DIRECTION_MASK 0x1 +#define HINIC_MSG_HEADER_CMD_MASK 0xFF +#define HINIC_MSG_HEADER_ZEROS_MASK 0xFF +#define HINIC_MSG_HEADER_PCI_INTF_MASK 0x3 +#define HINIC_MSG_HEADER_PF_IDX_MASK 0xF +#define HINIC_MSG_HEADER_MSG_ID_MASK 0x3FF + +#define HINIC_MSG_HEADER_SET(val, member) \ + ((u64)((val) & HINIC_MSG_HEADER_##member##_MASK) << \ + HINIC_MSG_HEADER_##member##_SHIFT) + +#define HINIC_MSG_HEADER_GET(val, member) \ + (((val) >> HINIC_MSG_HEADER_##member##_SHIFT) & \ + HINIC_MSG_HEADER_##member##_MASK) + enum hinic_mgmt_msg_type { HINIC_MGMT_MSG_SYNC = 1, }; @@ -29,9 +67,30 @@ enum hinic_cfg_cmd { HINIC_CFG_NIC_CAP = 0, }; +struct hinic_recv_msg { + u8 *msg; + u8 *buf_out; + + struct completion recv_done; + + u16 cmd; + enum hinic_mod_type mod; + int async_mgmt_to_pf; + + u16 msg_len; + u16 msg_id; +}; + struct hinic_pf_to_mgmt { struct hinic_hwif *hwif; + struct semaphore sync_msg_lock; + u16 sync_msg_id; + u8 *sync_msg_buf; + + struct hinic_recv_msg recv_resp_msg_from_mgmt; + struct hinic_recv_msg recv_msg_from_mgmt; + struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; }; -- cgit v1.2.3-55-g7522 From 0ac599c7903cb9e315081b731a4401e1726630d5 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:52 +0800 Subject: net-next/hinic: Add api cmd commands Add the api cmd commands for sending management messages to the nic. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.c | 329 ++++++++++++++++++++- .../net/ethernet/huawei/hinic/hinic_hw_api_cmd.h | 65 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 6 + 3 files changed, 398 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c index 7e7a76e7d049..8901801fe426 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c @@ -25,7 +25,9 @@ #include #include #include +#include #include +#include #include "hinic_hw_csr.h" #include "hinic_hw_if.h" @@ -45,14 +47,313 @@ #define API_CMD_BUF_SIZE 2048 +/* Sizes of the members in hinic_api_cmd_cell */ +#define API_CMD_CELL_DESC_SIZE 8 +#define API_CMD_CELL_DATA_ADDR_SIZE 8 + +#define API_CMD_CELL_ALIGNMENT 8 + #define API_CMD_TIMEOUT 1000 +#define MASKED_IDX(chain, idx) ((idx) & ((chain)->num_cells - 1)) + +#define SIZE_8BYTES(size) (ALIGN((size), 8) >> 3) +#define SIZE_4BYTES(size) (ALIGN((size), 4) >> 2) + +#define RD_DMA_ATTR_DEFAULT 0 +#define WR_DMA_ATTR_DEFAULT 0 + +enum api_cmd_data_format { + SGE_DATA = 1, /* cell data is passed by hw address */ +}; + +enum api_cmd_type { + API_CMD_WRITE = 0, +}; + +enum api_cmd_bypass { + NO_BYPASS = 0, + BYPASS = 1, +}; + enum api_cmd_xor_chk_level { XOR_CHK_DIS = 0, XOR_CHK_ALL = 3, }; +static u8 xor_chksum_set(void *data) +{ + int idx; + u8 *val, checksum = 0; + + val = data; + + for (idx = 0; idx < 7; idx++) + checksum ^= val[idx]; + + return checksum; +} + +static void set_prod_idx(struct hinic_api_cmd_chain *chain) +{ + enum hinic_api_cmd_chain_type chain_type = chain->chain_type; + struct hinic_hwif *hwif = chain->hwif; + u32 addr, prod_idx; + + addr = HINIC_CSR_API_CMD_CHAIN_PI_ADDR(chain_type); + prod_idx = hinic_hwif_read_reg(hwif, addr); + + prod_idx = HINIC_API_CMD_PI_CLEAR(prod_idx, IDX); + + prod_idx |= HINIC_API_CMD_PI_SET(chain->prod_idx, IDX); + + hinic_hwif_write_reg(hwif, addr, prod_idx); +} + +static u32 get_hw_cons_idx(struct hinic_api_cmd_chain *chain) +{ + u32 addr, val; + + addr = HINIC_CSR_API_CMD_STATUS_ADDR(chain->chain_type); + val = hinic_hwif_read_reg(chain->hwif, addr); + + return HINIC_API_CMD_STATUS_GET(val, CONS_IDX); +} + +/** + * chain_busy - check if the chain is still processing last requests + * @chain: chain to check + * + * Return 0 - Success, negative - Failure + **/ +static int chain_busy(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + u32 prod_idx; + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + chain->cons_idx = get_hw_cons_idx(chain); + prod_idx = chain->prod_idx; + + /* check for a space for a new command */ + if (chain->cons_idx == MASKED_IDX(chain, prod_idx + 1)) { + dev_err(&pdev->dev, "API CMD chain %d is busy\n", + chain->chain_type); + return -EBUSY; + } + break; + + default: + dev_err(&pdev->dev, "Unknown API CMD Chain type\n"); + break; + } + + return 0; +} + +/** + * get_cell_data_size - get the data size of a specific cell type + * @type: chain type + * + * Return the data(Desc + Address) size in the cell + **/ +static u8 get_cell_data_size(enum hinic_api_cmd_chain_type type) +{ + u8 cell_data_size = 0; + + switch (type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + cell_data_size = ALIGN(API_CMD_CELL_DESC_SIZE + + API_CMD_CELL_DATA_ADDR_SIZE, + API_CMD_CELL_ALIGNMENT); + break; + default: + break; + } + + return cell_data_size; +} + +/** + * prepare_cell_ctrl - prepare the ctrl of the cell for the command + * @cell_ctrl: the control of the cell to set the control value into it + * @data_size: the size of the data in the cell + **/ +static void prepare_cell_ctrl(u64 *cell_ctrl, u16 data_size) +{ + u8 chksum; + u64 ctrl; + + ctrl = HINIC_API_CMD_CELL_CTRL_SET(SIZE_8BYTES(data_size), DATA_SZ) | + HINIC_API_CMD_CELL_CTRL_SET(RD_DMA_ATTR_DEFAULT, RD_DMA_ATTR) | + HINIC_API_CMD_CELL_CTRL_SET(WR_DMA_ATTR_DEFAULT, WR_DMA_ATTR); + + chksum = xor_chksum_set(&ctrl); + + ctrl |= HINIC_API_CMD_CELL_CTRL_SET(chksum, XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + *cell_ctrl = cpu_to_be64(ctrl); +} + +/** + * prepare_api_cmd - prepare API CMD command + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + **/ +static void prepare_api_cmd(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell *cell = chain->curr_node; + struct hinic_api_cmd_cell_ctxt *cell_ctxt; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + + cell_ctxt = &chain->cell_ctxt[chain->prod_idx]; + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + cell->desc = HINIC_API_CMD_DESC_SET(SGE_DATA, API_TYPE) | + HINIC_API_CMD_DESC_SET(API_CMD_WRITE, RD_WR) | + HINIC_API_CMD_DESC_SET(NO_BYPASS, MGMT_BYPASS); + break; + + default: + dev_err(&pdev->dev, "unknown Chain type\n"); + return; + } + + cell->desc |= HINIC_API_CMD_DESC_SET(dest, DEST) | + HINIC_API_CMD_DESC_SET(SIZE_4BYTES(cmd_size), SIZE); + + cell->desc |= HINIC_API_CMD_DESC_SET(xor_chksum_set(&cell->desc), + XOR_CHKSUM); + + /* The data in the HW should be in Big Endian Format */ + cell->desc = cpu_to_be64(cell->desc); + + memcpy(cell_ctxt->api_cmd_vaddr, cmd, cmd_size); +} + +/** + * prepare_cell - prepare cell ctrl and cmd in the current cell + * @chain: chain for the command + * @dest: destination node on the card that will receive the command + * @cmd: command data + * @cmd_size: the command size + * + * Return 0 - Success, negative - Failure + **/ +static void prepare_cell(struct hinic_api_cmd_chain *chain, + enum hinic_node_id dest, + void *cmd, u16 cmd_size) +{ + struct hinic_api_cmd_cell *curr_node = chain->curr_node; + u16 data_size = get_cell_data_size(chain->chain_type); + + prepare_cell_ctrl(&curr_node->ctrl, data_size); + prepare_api_cmd(chain, dest, cmd, cmd_size); +} + +static inline void cmd_chain_prod_idx_inc(struct hinic_api_cmd_chain *chain) +{ + chain->prod_idx = MASKED_IDX(chain, chain->prod_idx + 1); +} + +/** + * api_cmd_status_update - update the status in the chain struct + * @chain: chain to update + **/ +static void api_cmd_status_update(struct hinic_api_cmd_chain *chain) +{ + enum hinic_api_cmd_chain_type chain_type; + struct hinic_api_cmd_status *wb_status; + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + u64 status_header; + u32 status; + + wb_status = chain->wb_status; + status_header = be64_to_cpu(wb_status->header); + + status = be32_to_cpu(wb_status->status); + if (HINIC_API_CMD_STATUS_GET(status, CHKSUM_ERR)) { + dev_err(&pdev->dev, "API CMD status: Xor check error\n"); + return; + } + + chain_type = HINIC_API_CMD_STATUS_HEADER_GET(status_header, CHAIN_ID); + if (chain_type >= HINIC_API_CMD_MAX) { + dev_err(&pdev->dev, "unknown API CMD Chain %d\n", chain_type); + return; + } + + chain->cons_idx = HINIC_API_CMD_STATUS_GET(status, CONS_IDX); +} + +/** + * wait_for_status_poll - wait for write to api cmd command to complete + * @chain: the chain of the command + * + * Return 0 - Success, negative - Failure + **/ +static int wait_for_status_poll(struct hinic_api_cmd_chain *chain) +{ + int err = -ETIMEDOUT; + unsigned long end; + + end = jiffies + msecs_to_jiffies(API_CMD_TIMEOUT); + do { + api_cmd_status_update(chain); + + /* wait for CI to be updated - sign for completion */ + if (chain->cons_idx == chain->prod_idx) { + err = 0; + break; + } + + msleep(20); + } while (time_before(jiffies, end)); + + return err; +} + +/** + * wait_for_api_cmd_completion - wait for command to complete + * @chain: chain for the command + * + * Return 0 - Success, negative - Failure + **/ +static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain) +{ + struct hinic_hwif *hwif = chain->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + switch (chain->chain_type) { + case HINIC_API_CMD_WRITE_TO_MGMT_CPU: + err = wait_for_status_poll(chain); + if (err) { + dev_err(&pdev->dev, "API CMD Poll status timeout\n"); + break; + } + break; + + default: + dev_err(&pdev->dev, "unknown API CMD Chain type\n"); + err = -EINVAL; + break; + } + + return err; +} + /** * api_cmd - API CMD command * @chain: chain for the command @@ -65,8 +366,30 @@ enum api_cmd_xor_chk_level { static int api_cmd(struct hinic_api_cmd_chain *chain, enum hinic_node_id dest, u8 *cmd, u16 cmd_size) { - /* should be implemented */ - return -EINVAL; + struct hinic_api_cmd_cell_ctxt *ctxt; + int err; + + down(&chain->sem); + if (chain_busy(chain)) { + up(&chain->sem); + return -EBUSY; + } + + prepare_cell(chain, dest, cmd, cmd_size); + cmd_chain_prod_idx_inc(chain); + + wmb(); /* inc pi before issue the command */ + + set_prod_idx(chain); /* issue the command */ + + ctxt = &chain->cell_ctxt[chain->prod_idx]; + + chain->curr_node = ctxt->cell_vaddr; + + err = wait_for_api_cmd_completion(chain); + + up(&chain->sem); + return err; } /** @@ -491,6 +814,8 @@ static int api_chain_init(struct hinic_api_cmd_chain *chain, chain->prod_idx = 0; chain->cons_idx = 0; + sema_init(&chain->sem, 1); + cell_ctxt_size = chain->num_cells * sizeof(*chain->cell_ctxt); chain->cell_ctxt = devm_kzalloc(&pdev->dev, cell_ctxt_size, GFP_KERNEL); if (!chain->cell_ctxt) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h index e8865d627b58..31b94d5d47f7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h @@ -17,9 +17,22 @@ #define HINIC_HW_API_CMD_H #include +#include #include "hinic_hw_if.h" +#define HINIC_API_CMD_PI_IDX_SHIFT 0 + +#define HINIC_API_CMD_PI_IDX_MASK 0xFFFFFF + +#define HINIC_API_CMD_PI_SET(val, member) \ + (((u32)(val) & HINIC_API_CMD_PI_##member##_MASK) << \ + HINIC_API_CMD_PI_##member##_SHIFT) + +#define HINIC_API_CMD_PI_CLEAR(val, member) \ + ((val) & (~(HINIC_API_CMD_PI_##member##_MASK \ + << HINIC_API_CMD_PI_##member##_SHIFT))) + #define HINIC_API_CMD_CHAIN_REQ_RESTART_SHIFT 1 #define HINIC_API_CMD_CHAIN_REQ_RESTART_MASK 0x1 @@ -58,6 +71,56 @@ ((val) & (~(HINIC_API_CMD_CHAIN_CTRL_##member##_MASK \ << HINIC_API_CMD_CHAIN_CTRL_##member##_SHIFT))) +#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_SHIFT 0 +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_SHIFT 16 +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_SHIFT 24 +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_CELL_CTRL_DATA_SZ_MASK 0x3F +#define HINIC_API_CMD_CELL_CTRL_RD_DMA_ATTR_MASK 0x3F +#define HINIC_API_CMD_CELL_CTRL_WR_DMA_ATTR_MASK 0x3F +#define HINIC_API_CMD_CELL_CTRL_XOR_CHKSUM_MASK 0xFF + +#define HINIC_API_CMD_CELL_CTRL_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_CELL_CTRL_##member##_MASK) << \ + HINIC_API_CMD_CELL_CTRL_##member##_SHIFT) + +#define HINIC_API_CMD_DESC_API_TYPE_SHIFT 0 +#define HINIC_API_CMD_DESC_RD_WR_SHIFT 1 +#define HINIC_API_CMD_DESC_MGMT_BYPASS_SHIFT 2 +#define HINIC_API_CMD_DESC_DEST_SHIFT 32 +#define HINIC_API_CMD_DESC_SIZE_SHIFT 40 +#define HINIC_API_CMD_DESC_XOR_CHKSUM_SHIFT 56 + +#define HINIC_API_CMD_DESC_API_TYPE_MASK 0x1 +#define HINIC_API_CMD_DESC_RD_WR_MASK 0x1 +#define HINIC_API_CMD_DESC_MGMT_BYPASS_MASK 0x1 +#define HINIC_API_CMD_DESC_DEST_MASK 0x1F +#define HINIC_API_CMD_DESC_SIZE_MASK 0x7FF +#define HINIC_API_CMD_DESC_XOR_CHKSUM_MASK 0xFF + +#define HINIC_API_CMD_DESC_SET(val, member) \ + ((((u64)val) & HINIC_API_CMD_DESC_##member##_MASK) << \ + HINIC_API_CMD_DESC_##member##_SHIFT) + +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_SHIFT 16 + +#define HINIC_API_CMD_STATUS_HEADER_CHAIN_ID_MASK 0xFF + +#define HINIC_API_CMD_STATUS_HEADER_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_HEADER_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_HEADER_##member##_MASK) + +#define HINIC_API_CMD_STATUS_CONS_IDX_SHIFT 0 +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_SHIFT 28 + +#define HINIC_API_CMD_STATUS_CONS_IDX_MASK 0xFFFFFF +#define HINIC_API_CMD_STATUS_CHKSUM_ERR_MASK 0x3 + +#define HINIC_API_CMD_STATUS_GET(val, member) \ + (((val) >> HINIC_API_CMD_STATUS_##member##_SHIFT) & \ + HINIC_API_CMD_STATUS_##member##_MASK) + enum hinic_api_cmd_chain_type { HINIC_API_CMD_WRITE_TO_MGMT_CPU = 2, @@ -122,6 +185,8 @@ struct hinic_api_cmd_chain { u32 prod_idx; u32 cons_idx; + struct semaphore sem; + struct hinic_api_cmd_cell_ctxt *cell_ctxt; dma_addr_t wb_status_paddr; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h index a9ece6db3414..ebbf0549404c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -56,7 +56,13 @@ #define HINIC_CSR_API_CMD_CHAIN_CTRL_ADDR(idx) \ (HINIC_CSR_API_CMD_BASE + 0x14 + (idx) * HINIC_CSR_API_CMD_STRIDE) +#define HINIC_CSR_API_CMD_CHAIN_PI_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x1C + (idx) * HINIC_CSR_API_CMD_STRIDE) + #define HINIC_CSR_API_CMD_CHAIN_REQ_ADDR(idx) \ (HINIC_CSR_API_CMD_BASE + 0x20 + (idx) * HINIC_CSR_API_CMD_STRIDE) +#define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \ + (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) + #endif -- cgit v1.2.3-55-g7522 From f00fe738b5d863d170907de908870b762f1b6387 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:53 +0800 Subject: net-next/hinic: Add aeqs Handle aeq elements that are accumulated on the aeq by calling the registered handler for the specific event. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 49 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 463 ++++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 81 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.c | 90 +++++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 46 +++ 5 files changed, 727 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h index ebbf0549404c..52eb89c7df9a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -65,4 +65,53 @@ #define HINIC_CSR_API_CMD_STATUS_ADDR(idx) \ (HINIC_CSR_API_CMD_BASE + 0x30 + (idx) * HINIC_CSR_API_CMD_STRIDE) +/* MSI-X registers */ +#define HINIC_CSR_MSIX_CTRL_BASE 0x2000 +#define HINIC_CSR_MSIX_CNT_BASE 0x2004 + +#define HINIC_CSR_MSIX_STRIDE 0x8 + +#define HINIC_CSR_MSIX_CTRL_ADDR(idx) \ + (HINIC_CSR_MSIX_CTRL_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +#define HINIC_CSR_MSIX_CNT_ADDR(idx) \ + (HINIC_CSR_MSIX_CNT_BASE + (idx) * HINIC_CSR_MSIX_STRIDE) + +/* EQ registers */ +#define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 + +#define HINIC_EQ_MTT_OFF_STRIDE 0x40 + +#define HINIC_CSR_AEQ_MTT_OFF(id) \ + (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + +#define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 + +#define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + +#define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + +#define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 +#define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 +#define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08 +#define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C + +#define HINIC_EQ_OFF_STRIDE 0x80 + +#define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ + (HINIC_AEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CTRL_1_ADDR(idx) \ + (HINIC_AEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_CONS_IDX_ADDR(idx) \ + (HINIC_AEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ + (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index a099d20bac8c..6b03bc6ae940 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -13,17 +13,74 @@ * */ +#include #include #include #include #include #include +#include +#include +#include +#include +#include +#include +#include "hinic_hw_csr.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #define HINIC_EQS_WQ_NAME "hinic_eqs" +#define GET_EQ_NUM_PAGES(eq, pg_size) \ + (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) + +#define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) + +#define EQ_CONS_IDX_REG_ADDR(eq) HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) +#define EQ_PROD_IDX_REG_ADDR(eq) HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) + +#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) \ + HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) + +#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) \ + HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) + +#define GET_EQ_ELEMENT(eq, idx) \ + ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ + (((idx) & ((eq)->num_elem_in_pg - 1)) * (eq)->elem_size)) + +#define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \ + GET_EQ_ELEMENT(eq, idx)) + +#define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) + +#define PAGE_IN_4K(page_size) ((page_size) >> 12) +#define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size))) + +#define ELEMENT_SIZE_IN_32B(eq) (((eq)->elem_size) >> 5) +#define EQ_SET_HW_ELEM_SIZE_VAL(eq) (ilog2(ELEMENT_SIZE_IN_32B(eq))) + +#define EQ_MAX_PAGES 8 + +#define aeq_to_aeqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) + +#define work_to_aeq_work(work) \ + container_of(work, struct hinic_eq_work, work) + +#define DMA_ATTR_AEQ_DEFAULT 0 + +enum eq_int_mode { + EQ_INT_MODE_ARMED, + EQ_INT_MODE_ALWAYS +}; + +enum eq_arm_state { + EQ_NOT_ARMED, + EQ_ARMED +}; + /** * hinic_aeq_register_hw_cb - register AEQ callback for specific event * @aeqs: pointer to Async eqs of the chip @@ -61,6 +118,325 @@ void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, hwe_cb->hwe_handler = NULL; } +static u8 eq_cons_idx_checksum_set(u32 val) +{ + u8 checksum = 0; + int idx; + + for (idx = 0; idx < 32; idx += 4) + checksum ^= ((val >> idx) & 0xF); + + return (checksum & 0xF); +} + +/** + * eq_update_ci - update the HW cons idx of event queue + * @eq: the event queue to update the cons idx for + **/ +static void eq_update_ci(struct hinic_eq *eq) +{ + u32 val, addr = EQ_CONS_IDX_REG_ADDR(eq); + + /* Read Modify Write */ + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_EQ_CI_CLEAR(val, IDX) & + HINIC_EQ_CI_CLEAR(val, WRAPPED) & + HINIC_EQ_CI_CLEAR(val, INT_ARMED) & + HINIC_EQ_CI_CLEAR(val, XOR_CHKSUM); + + val |= HINIC_EQ_CI_SET(eq->cons_idx, IDX) | + HINIC_EQ_CI_SET(eq->wrapped, WRAPPED) | + HINIC_EQ_CI_SET(EQ_ARMED, INT_ARMED); + + val |= HINIC_EQ_CI_SET(eq_cons_idx_checksum_set(val), XOR_CHKSUM); + + hinic_hwif_write_reg(eq->hwif, addr, val); +} + +/** + * aeq_irq_handler - handler for the AEQ event + * @eq: the Async Event Queue that received the event + **/ +static void aeq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_aeqs *aeqs = aeq_to_aeqs(eq); + struct hinic_hwif *hwif = aeqs->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_aeq_elem *aeqe_curr; + struct hinic_hw_event_cb *hwe_cb; + enum hinic_aeq_type event; + unsigned long eqe_state; + u32 aeqe_desc; + int i, size; + + for (i = 0; i < eq->q_len; i++) { + aeqe_curr = GET_CURR_AEQ_ELEM(eq); + + /* Data in HW is in Big endian Format */ + aeqe_desc = be32_to_cpu(aeqe_curr->desc); + + /* HW toggles the wrapped bit, when it adds eq element */ + if (HINIC_EQ_ELEM_DESC_GET(aeqe_desc, WRAPPED) == eq->wrapped) + break; + + event = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, TYPE); + if (event >= HINIC_MAX_AEQ_EVENTS) { + dev_err(&pdev->dev, "Unknown AEQ Event %d\n", event); + return; + } + + if (!HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SRC)) { + hwe_cb = &aeqs->hwe_cb[event]; + + size = HINIC_EQ_ELEM_DESC_GET(aeqe_desc, SIZE); + + eqe_state = cmpxchg(&hwe_cb->hwe_state, + HINIC_EQE_ENABLED, + HINIC_EQE_ENABLED | + HINIC_EQE_RUNNING); + if ((eqe_state == HINIC_EQE_ENABLED) && + (hwe_cb->hwe_handler)) + hwe_cb->hwe_handler(hwe_cb->handle, + aeqe_curr->data, size); + else + dev_err(&pdev->dev, "Unhandled AEQ Event %d\n", + event); + + hwe_cb->hwe_state &= ~HINIC_EQE_RUNNING; + } + + eq->cons_idx++; + + if (eq->cons_idx == eq->q_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + } +} + +/** + * eq_irq_handler - handler for the EQ event + * @data: the Event Queue that received the event + **/ +static void eq_irq_handler(void *data) +{ + struct hinic_eq *eq = data; + + if (eq->type == HINIC_AEQ) + aeq_irq_handler(eq); + + eq_update_ci(eq); +} + +/** + * eq_irq_work - the work of the EQ that received the event + * @work: the work struct that is associated with the EQ + **/ +static void eq_irq_work(struct work_struct *work) +{ + struct hinic_eq_work *aeq_work = work_to_aeq_work(work); + struct hinic_eq *aeq; + + aeq = aeq_work->data; + eq_irq_handler(aeq); +} + +/** + * aeq_interrupt - aeq interrupt handler + * @irq: irq number + * @data: the Async Event Queue that collected the event + **/ +static irqreturn_t aeq_interrupt(int irq, void *data) +{ + struct hinic_eq_work *aeq_work; + struct hinic_eq *aeq = data; + struct hinic_aeqs *aeqs; + + /* clear resend timer cnt register */ + hinic_msix_attr_cnt_clear(aeq->hwif, aeq->msix_entry.entry); + + aeq_work = &aeq->aeq_work; + aeq_work->data = aeq; + + aeqs = aeq_to_aeqs(aeq); + queue_work(aeqs->workq, &aeq_work->work); + + return IRQ_HANDLED; +} + +void set_ctrl0(struct hinic_eq *eq) +{ + struct msix_entry *msix_entry = &eq->msix_entry; + enum hinic_eq_type type = eq->type; + u32 addr, val, ctrl0; + + if (type == HINIC_AEQ) { + /* RMW Ctrl0 */ + addr = HINIC_CSR_AEQ_CTRL_0_ADDR(eq->q_id); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_AEQ_CTRL_0_CLEAR(val, INT_IDX) & + HINIC_AEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + HINIC_AEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + HINIC_AEQ_CTRL_0_CLEAR(val, INT_MODE); + + ctrl0 = HINIC_AEQ_CTRL_0_SET(msix_entry->entry, INT_IDX) | + HINIC_AEQ_CTRL_0_SET(DMA_ATTR_AEQ_DEFAULT, DMA_ATTR) | + HINIC_AEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), + PCI_INTF_IDX) | + HINIC_AEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INT_MODE); + + val |= ctrl0; + + hinic_hwif_write_reg(eq->hwif, addr, val); + } +} + +void set_ctrl1(struct hinic_eq *eq) +{ + enum hinic_eq_type type = eq->type; + u32 page_size_val, elem_size; + u32 addr, val, ctrl1; + + if (type == HINIC_AEQ) { + /* RMW Ctrl1 */ + addr = HINIC_CSR_AEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + elem_size = EQ_SET_HW_ELEM_SIZE_VAL(eq); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_AEQ_CTRL_1_CLEAR(val, LEN) & + HINIC_AEQ_CTRL_1_CLEAR(val, ELEM_SIZE) & + HINIC_AEQ_CTRL_1_CLEAR(val, PAGE_SIZE); + + ctrl1 = HINIC_AEQ_CTRL_1_SET(eq->q_len, LEN) | + HINIC_AEQ_CTRL_1_SET(elem_size, ELEM_SIZE) | + HINIC_AEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + val |= ctrl1; + + hinic_hwif_write_reg(eq->hwif, addr, val); + } +} + +/** + * set_eq_ctrls - setting eq's ctrl registers + * @eq: the Event Queue for setting + **/ +static void set_eq_ctrls(struct hinic_eq *eq) +{ + set_ctrl0(eq); + set_ctrl1(eq); +} + +/** + * aeq_elements_init - initialize all the elements in the aeq + * @eq: the Async Event Queue + * @init_val: value to initialize the elements with it + **/ +static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + struct hinic_aeq_elem *aeqe; + int i; + + for (i = 0; i < eq->q_len; i++) { + aeqe = GET_AEQ_ELEM(eq, i); + aeqe->desc = cpu_to_be32(init_val); + } + + wmb(); /* Write the initilzation values */ +} + +/** + * alloc_eq_pages - allocate the pages for the queue + * @eq: the event queue + * + * Return 0 - Success, Negative - Failure + **/ +static int alloc_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwif; + struct pci_dev *pdev = hwif->pdev; + u32 init_val, addr, val; + size_t addr_size; + int err, pg; + + addr_size = eq->num_pages * sizeof(*eq->dma_addr); + eq->dma_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + if (!eq->dma_addr) + return -ENOMEM; + + addr_size = eq->num_pages * sizeof(*eq->virt_addr); + eq->virt_addr = devm_kzalloc(&pdev->dev, addr_size, GFP_KERNEL); + if (!eq->virt_addr) { + err = -ENOMEM; + goto err_virt_addr_alloc; + } + + for (pg = 0; pg < eq->num_pages; pg++) { + eq->virt_addr[pg] = dma_zalloc_coherent(&pdev->dev, + eq->page_size, + &eq->dma_addr[pg], + GFP_KERNEL); + if (!eq->virt_addr[pg]) { + err = -ENOMEM; + goto err_dma_alloc; + } + + addr = EQ_HI_PHYS_ADDR_REG(eq, pg); + val = upper_32_bits(eq->dma_addr[pg]); + + hinic_hwif_write_reg(hwif, addr, val); + + addr = EQ_LO_PHYS_ADDR_REG(eq, pg); + val = lower_32_bits(eq->dma_addr[pg]); + + hinic_hwif_write_reg(hwif, addr, val); + } + + init_val = HINIC_EQ_ELEM_DESC_SET(eq->wrapped, WRAPPED); + + if (eq->type == HINIC_AEQ) + aeq_elements_init(eq, init_val); + + return 0; + +err_dma_alloc: + while (--pg >= 0) + dma_free_coherent(&pdev->dev, eq->page_size, + eq->virt_addr[pg], + eq->dma_addr[pg]); + + devm_kfree(&pdev->dev, eq->virt_addr); + +err_virt_addr_alloc: + devm_kfree(&pdev->dev, eq->dma_addr); + return err; +} + +/** + * free_eq_pages - free the pages of the queue + * @eq: the Event Queue + **/ +static void free_eq_pages(struct hinic_eq *eq) +{ + struct hinic_hwif *hwif = eq->hwif; + struct pci_dev *pdev = hwif->pdev; + int pg; + + for (pg = 0; pg < eq->num_pages; pg++) + dma_free_coherent(&pdev->dev, eq->page_size, + eq->virt_addr[pg], + eq->dma_addr[pg]); + + devm_kfree(&pdev->dev, eq->virt_addr); + devm_kfree(&pdev->dev, eq->dma_addr); +} + /** * init_eq - initialize Event Queue * @eq: the event queue @@ -77,8 +453,81 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, enum hinic_eq_type type, int q_id, u32 q_len, u32 page_size, struct msix_entry entry) { - /* should be implemented */ + struct pci_dev *pdev = hwif->pdev; + int err; + + eq->hwif = hwif; + eq->type = type; + eq->q_id = q_id; + eq->q_len = q_len; + eq->page_size = page_size; + + /* Clear PI and CI, also clear the ARM bit */ + hinic_hwif_write_reg(eq->hwif, EQ_CONS_IDX_REG_ADDR(eq), 0); + hinic_hwif_write_reg(eq->hwif, EQ_PROD_IDX_REG_ADDR(eq), 0); + + eq->cons_idx = 0; + eq->wrapped = 0; + + if (type == HINIC_AEQ) { + eq->elem_size = HINIC_AEQE_SIZE; + } else { + dev_err(&pdev->dev, "Invalid EQ type\n"); + return -EINVAL; + } + + eq->num_pages = GET_EQ_NUM_PAGES(eq, page_size); + eq->num_elem_in_pg = GET_EQ_NUM_ELEMS_IN_PG(eq, page_size); + + eq->msix_entry = entry; + + if (eq->num_elem_in_pg & (eq->num_elem_in_pg - 1)) { + dev_err(&pdev->dev, "num elements in eq page != power of 2\n"); + return -EINVAL; + } + + if (eq->num_pages > EQ_MAX_PAGES) { + dev_err(&pdev->dev, "too many pages for eq\n"); + return -EINVAL; + } + + set_eq_ctrls(eq); + eq_update_ci(eq); + + err = alloc_eq_pages(eq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate pages for eq\n"); + return err; + } + + if (type == HINIC_AEQ) { + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + INIT_WORK(&aeq_work->work, eq_irq_work); + } + + /* set the attributes of the msix entry */ + hinic_msix_attr_set(eq->hwif, eq->msix_entry.entry, + HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT, + HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT, + HINIC_EQ_MSIX_LLI_TIMER_DEFAULT, + HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT, + HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT); + + if (type == HINIC_AEQ) + err = request_irq(entry.vector, aeq_interrupt, 0, + "hinic_aeq", eq); + + if (err) { + dev_err(&pdev->dev, "Failed to request irq for the EQ\n"); + goto err_req_irq; + } + return 0; + +err_req_irq: + free_eq_pages(eq); + return err; } /** @@ -87,7 +536,17 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, **/ static void remove_eq(struct hinic_eq *eq) { - /* should be implemented */ + struct msix_entry *entry = &eq->msix_entry; + + free_irq(entry->vector, eq); + + if (eq->type == HINIC_AEQ) { + struct hinic_eq_work *aeq_work = &eq->aeq_work; + + cancel_work_sync(&aeq_work->work); + } + + free_eq_pages(eq); } /** diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h index 1580127b26c4..7f50b2f6cc22 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h @@ -24,8 +24,84 @@ #include "hinic_hw_if.h" +#define HINIC_AEQ_CTRL_0_INT_IDX_SHIFT 0 +#define HINIC_AEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_SHIFT 20 +#define HINIC_AEQ_CTRL_0_INT_MODE_SHIFT 31 + +#define HINIC_AEQ_CTRL_0_INT_IDX_MASK 0x3FF +#define HINIC_AEQ_CTRL_0_DMA_ATTR_MASK 0x3F +#define HINIC_AEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_AEQ_CTRL_0_INT_MODE_MASK 0x1 + +#define HINIC_AEQ_CTRL_0_SET(val, member) \ + (((u32)(val) & HINIC_AEQ_CTRL_0_##member##_MASK) << \ + HINIC_AEQ_CTRL_0_##member##_SHIFT) + +#define HINIC_AEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(HINIC_AEQ_CTRL_0_##member##_MASK \ + << HINIC_AEQ_CTRL_0_##member##_SHIFT))) + +#define HINIC_AEQ_CTRL_1_LEN_SHIFT 0 +#define HINIC_AEQ_CTRL_1_ELEM_SIZE_SHIFT 24 +#define HINIC_AEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define HINIC_AEQ_CTRL_1_LEN_MASK 0x1FFFFF +#define HINIC_AEQ_CTRL_1_ELEM_SIZE_MASK 0x3 +#define HINIC_AEQ_CTRL_1_PAGE_SIZE_MASK 0xF + +#define HINIC_AEQ_CTRL_1_SET(val, member) \ + (((u32)(val) & HINIC_AEQ_CTRL_1_##member##_MASK) << \ + HINIC_AEQ_CTRL_1_##member##_SHIFT) + +#define HINIC_AEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \ + << HINIC_AEQ_CTRL_1_##member##_SHIFT))) + +#define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0 +#define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7 +#define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8 +#define HINIC_EQ_ELEM_DESC_WRAPPED_SHIFT 31 + +#define HINIC_EQ_ELEM_DESC_TYPE_MASK 0x7F +#define HINIC_EQ_ELEM_DESC_SRC_MASK 0x1 +#define HINIC_EQ_ELEM_DESC_SIZE_MASK 0xFF +#define HINIC_EQ_ELEM_DESC_WRAPPED_MASK 0x1 + +#define HINIC_EQ_ELEM_DESC_SET(val, member) \ + (((u32)(val) & HINIC_EQ_ELEM_DESC_##member##_MASK) << \ + HINIC_EQ_ELEM_DESC_##member##_SHIFT) + +#define HINIC_EQ_ELEM_DESC_GET(val, member) \ + (((val) >> HINIC_EQ_ELEM_DESC_##member##_SHIFT) & \ + HINIC_EQ_ELEM_DESC_##member##_MASK) + +#define HINIC_EQ_CI_IDX_SHIFT 0 +#define HINIC_EQ_CI_WRAPPED_SHIFT 20 +#define HINIC_EQ_CI_XOR_CHKSUM_SHIFT 24 +#define HINIC_EQ_CI_INT_ARMED_SHIFT 31 + +#define HINIC_EQ_CI_IDX_MASK 0xFFFFF +#define HINIC_EQ_CI_WRAPPED_MASK 0x1 +#define HINIC_EQ_CI_XOR_CHKSUM_MASK 0xF +#define HINIC_EQ_CI_INT_ARMED_MASK 0x1 + +#define HINIC_EQ_CI_SET(val, member) \ + (((u32)(val) & HINIC_EQ_CI_##member##_MASK) << \ + HINIC_EQ_CI_##member##_SHIFT) + +#define HINIC_EQ_CI_CLEAR(val, member) \ + ((val) & (~(HINIC_EQ_CI_##member##_MASK \ + << HINIC_EQ_CI_##member##_SHIFT))) + #define HINIC_MAX_AEQS 4 +#define HINIC_AEQE_SIZE 64 + +#define HINIC_AEQE_DESC_SIZE 4 +#define HINIC_AEQE_DATA_SIZE \ + (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) + #define HINIC_DEFAULT_AEQ_LEN 64 #define HINIC_EQ_PAGE_SIZE SZ_4K @@ -45,6 +121,11 @@ enum hinic_eqe_state { HINIC_EQE_RUNNING = BIT(1), }; +struct hinic_aeq_elem { + u8 data[HINIC_AEQE_DATA_SIZE]; + u32 desc; +}; + struct hinic_eq_work { struct work_struct work; void *data; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c index edf184242172..0cfada793a7a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -25,6 +25,96 @@ #define PCIE_ATTR_ENTRY 0 +#define VALID_MSIX_IDX(attr, msix_index) ((msix_index) < (attr)->num_irqs) + +/** + * hinic_msix_attr_set - set message attribute for msix entry + * @hwif: the HW interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix (unit coalesc period) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer, u8 lli_credit_limit, + u8 resend_timer) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_ATTR_SET(pending_limit, PENDING_LIMIT) | + HINIC_MSIX_ATTR_SET(coalesc_timer, COALESC_TIMER) | + HINIC_MSIX_ATTR_SET(lli_timer, LLI_TIMER) | + HINIC_MSIX_ATTR_SET(lli_credit_limit, LLI_CREDIT) | + HINIC_MSIX_ATTR_SET(resend_timer, RESEND_TIMER); + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + return 0; +} + +/** + * hinic_msix_attr_get - get message attribute of msix entry + * @hwif: the HW interface of a pci function device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix (unit coalesc period) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer, + u8 *lli_timer, u8 *lli_credit_limit, + u8 *resend_timer) +{ + u32 addr, val; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index); + val = hinic_hwif_read_reg(hwif, addr); + + *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT); + *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER); + *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER); + *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT); + *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER); + return 0; +} + +/** + * hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry + * @hwif: the HW interface of a pci function device + * @msix_index: msix_index + * + * Return 0 - Success, negative - Failure + **/ +int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index) +{ + u32 msix_ctrl, addr; + + if (!VALID_MSIX_IDX(&hwif->attr, msix_index)) + return -EINVAL; + + msix_ctrl = HINIC_MSIX_CNT_SET(1, RESEND_TIMER); + addr = HINIC_CSR_MSIX_CNT_ADDR(msix_index); + + hinic_hwif_write_reg(hwif, addr, msix_ctrl); + return 0; +} + /** * hwif_ready - test if the HW is ready for use * @hwif: the HW interface of a pci function device diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index 98623d69f2ba..707a04614d5b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -88,6 +88,34 @@ ((val) & (~(HINIC_PPF_ELECTION_##member##_MASK \ << HINIC_PPF_ELECTION_##member##_SHIFT))) +#define HINIC_MSIX_PENDING_LIMIT_SHIFT 0 +#define HINIC_MSIX_COALESC_TIMER_SHIFT 8 +#define HINIC_MSIX_LLI_TIMER_SHIFT 16 +#define HINIC_MSIX_LLI_CREDIT_SHIFT 24 +#define HINIC_MSIX_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_PENDING_LIMIT_MASK 0xFF +#define HINIC_MSIX_COALESC_TIMER_MASK 0xFF +#define HINIC_MSIX_LLI_TIMER_MASK 0xFF +#define HINIC_MSIX_LLI_CREDIT_MASK 0x1F +#define HINIC_MSIX_RESEND_TIMER_MASK 0x7 + +#define HINIC_MSIX_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_MSIX_##member##_MASK) << \ + HINIC_MSIX_##member##_SHIFT) + +#define HINIC_MSIX_ATTR_GET(val, member) \ + (((val) >> HINIC_MSIX_##member##_SHIFT) & \ + HINIC_MSIX_##member##_MASK) + +#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29 + +#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1 + +#define HINIC_MSIX_CNT_SET(val, member) \ + (((u32)(val) & HINIC_MSIX_CNT_##member##_MASK) << \ + HINIC_MSIX_CNT_##member##_SHIFT) + #define HINIC_HWIF_NUM_AEQS(hwif) ((hwif)->attr.num_aeqs) #define HINIC_HWIF_NUM_CEQS(hwif) ((hwif)->attr.num_ceqs) #define HINIC_HWIF_NUM_IRQS(hwif) ((hwif)->attr.num_irqs) @@ -105,6 +133,12 @@ #define HINIC_PCIE_AT_DISABLE 0 #define HINIC_PCIE_PH_DISABLE 0 +#define HINIC_EQ_MSIX_PENDING_LIMIT_DEFAULT 0 /* Disabled */ +#define HINIC_EQ_MSIX_COALESC_TIMER_DEFAULT 0xFF /* max */ +#define HINIC_EQ_MSIX_LLI_TIMER_DEFAULT 0 /* Disabled */ +#define HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT 0 /* Disabled */ +#define HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT 7 /* max */ + enum hinic_pcie_nosnoop { HINIC_PCIE_SNOOP = 0, HINIC_PCIE_NO_SNOOP = 1, @@ -166,6 +200,18 @@ static inline void hinic_hwif_write_reg(struct hinic_hwif *hwif, u32 reg, writel(cpu_to_be32(val), hwif->cfg_regs_bar + reg); } +int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); + +int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, + u8 *pending_limit, u8 *coalesc_timer_cfg, + u8 *lli_timer, u8 *lli_credit_limit, + u8 *resend_timer); + +int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index); + int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev); void hinic_free_hwif(struct hinic_hwif *hwif); -- cgit v1.2.3-55-g7522 From 25a3ba610609375751aa991f4a4054aca1b7fc5e Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:54 +0800 Subject: net-next/hinic: Add port management commands Add the port management commands that are sent as management messages. The port management commands are used for netdev operations. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Makefile | 4 +- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 4 + drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 30 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 29 +++ drivers/net/ethernet/huawei/hinic/hinic_main.c | 195 +++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_port.c | 224 +++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_port.h | 68 +++++++ 7 files changed, 551 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_port.h diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index beba90a6dc96..dbb1b9dbaa59 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_HINIC) += hinic.o -hinic-y := hinic_main.o hinic_hw_dev.o hinic_hw_mgmt.o hinic_hw_api_cmd.o \ - hinic_hw_eqs.o hinic_hw_if.o +hinic-y := hinic_main.o hinic_port.o hinic_hw_dev.o hinic_hw_mgmt.o \ + hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 6c2c896015a5..e54a45cd5bbe 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -18,6 +18,7 @@ #include #include +#include #include "hinic_hw_dev.h" @@ -28,6 +29,9 @@ struct hinic_dev { struct hinic_hwdev *hwdev; u32 msg_enable; + + struct semaphore mgmt_lock; + unsigned long *vlan_bitmap; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index d430e60192af..6bb6c3313fc3 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -199,6 +199,36 @@ static void disable_msix(struct hinic_hwdev *hwdev) pci_disable_msix(pdev); } +/** + * hinic_port_msg_cmd - send port msg to mgmt + * @hwdev: the NIC HW device + * @cmd: the port command + * @buf_in: input buffer + * @in_size: input size + * @buf_out: output buffer + * @out_size: returned output size + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, + void *buf_in, u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return -EINVAL; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, cmd, + buf_in, in_size, buf_out, out_size, + HINIC_MGMT_MSG_SYNC); +} + /** * init_pfhwdev - Initialize the extended components of PF * @pfhwdev: the HW device for PF diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index feb601388e59..ee9e76ada303 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -30,6 +30,31 @@ struct hinic_cap { u16 num_qps; }; +enum hinic_port_cmd { + HINIC_PORT_CMD_CHANGE_MTU = 2, + + HINIC_PORT_CMD_ADD_VLAN = 3, + HINIC_PORT_CMD_DEL_VLAN = 4, + + HINIC_PORT_CMD_SET_MAC = 9, + HINIC_PORT_CMD_GET_MAC = 10, + HINIC_PORT_CMD_DEL_MAC = 11, + + HINIC_PORT_CMD_SET_RX_MODE = 12, + + HINIC_PORT_CMD_GET_LINK_STATE = 24, + + HINIC_PORT_CMD_SET_PORT_STATE = 41, + + HINIC_PORT_CMD_FWCTXT_INIT = 69, + + HINIC_PORT_CMD_SET_FUNC_STATE = 93, + + HINIC_PORT_CMD_GET_GLOBAL_QPN = 102, + + HINIC_PORT_CMD_GET_CAP = 170, +}; + struct hinic_hwdev { struct hinic_hwif *hwif; struct msix_entry *msix_entries; @@ -45,6 +70,10 @@ struct hinic_pfhwdev { struct hinic_pf_to_mgmt pf_to_mgmt; }; +int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, + void *buf_in, u16 in_size, void *buf_out, + u16 *out_size); + struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); void hinic_free_hwdev(struct hinic_hwdev *hwdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 1d7aed07b25f..772e4e6e68af 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -13,6 +13,7 @@ * */ +#include #include #include #include @@ -20,9 +21,16 @@ #include #include #include +#include +#include +#include +#include +#include +#include #include #include "hinic_hw_dev.h" +#include "hinic_port.h" #include "hinic_dev.h" MODULE_AUTHOR("Huawei Technologies CO., Ltd"); @@ -35,10 +43,163 @@ MODULE_LICENSE("GPL"); NETIF_MSG_IFUP | \ NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) +#define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) + +static int hinic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int err; + + netif_info(nic_dev, drv, netdev, "set_mtu = %d\n", new_mtu); + + err = hinic_port_set_mtu(nic_dev, new_mtu); + if (err) + netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); + else + netdev->mtu = new_mtu; + + return err; +} + +/** + * change_mac_addr - change the main mac address of network device + * @netdev: network device + * @addr: mac address to set + * + * Return 0 - Success, negative - Failure + **/ +static int change_mac_addr(struct net_device *netdev, const u8 *addr) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 vid = 0; + int err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + netif_info(nic_dev, drv, netdev, "change mac addr = %02x %02x %02x %02x %02x %02x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + down(&nic_dev->mgmt_lock); + + do { + err = hinic_port_del_mac(nic_dev, netdev->dev_addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to delete mac\n"); + break; + } + + err = hinic_port_add_mac(nic_dev, addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); + break; + } + + vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); + } while (vid != VLAN_N_VID); + + up(&nic_dev->mgmt_lock); + return err; +} + +static int hinic_set_mac_addr(struct net_device *netdev, void *addr) +{ + unsigned char new_mac[ETH_ALEN]; + struct sockaddr *saddr = addr; + int err; + + memcpy(new_mac, saddr->sa_data, ETH_ALEN); + + err = change_mac_addr(netdev, new_mac); + if (!err) + memcpy(netdev->dev_addr, new_mac, ETH_ALEN); + + return err; +} + +static int hinic_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int ret, err; + + netif_info(nic_dev, drv, netdev, "add vid = %d\n", vid); + + down(&nic_dev->mgmt_lock); + + err = hinic_port_add_vlan(nic_dev, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to add vlan\n"); + goto err_vlan_add; + } + + err = hinic_port_add_mac(nic_dev, netdev->dev_addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to set mac\n"); + goto err_add_mac; + } + + bitmap_set(nic_dev->vlan_bitmap, vid, 1); + + up(&nic_dev->mgmt_lock); + return 0; + +err_add_mac: + ret = hinic_port_del_vlan(nic_dev, vid); + if (ret) + netif_err(nic_dev, drv, netdev, + "Failed to revert by removing vlan\n"); + +err_vlan_add: + up(&nic_dev->mgmt_lock); + return err; +} + +static int hinic_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int err; + + netif_info(nic_dev, drv, netdev, "remove vid = %d\n", vid); + + down(&nic_dev->mgmt_lock); + + err = hinic_port_del_vlan(nic_dev, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to delete vlan\n"); + goto err_del_vlan; + } + + bitmap_clear(nic_dev->vlan_bitmap, vid, 1); + + up(&nic_dev->mgmt_lock); + return 0; + +err_del_vlan: + up(&nic_dev->mgmt_lock); + return err; +} + static const struct net_device_ops hinic_netdev_ops = { - /* Operations are empty, should be filled */ + .ndo_change_mtu = hinic_change_mtu, + .ndo_set_mac_address = hinic_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, + /* more operations should be filled */ }; +static void netdev_features_init(struct net_device *netdev) +{ + netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; + + netdev->vlan_features = netdev->hw_features; + + netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; +} + /** * nic_dev_init - Initialize the NIC device * @pdev: the NIC pci device @@ -79,8 +240,36 @@ static int nic_dev_init(struct pci_dev *pdev) nic_dev->hwdev = hwdev; nic_dev->msg_enable = MSG_ENABLE_DEFAULT; + sema_init(&nic_dev->mgmt_lock, 1); + + nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, + VLAN_BITMAP_SIZE(nic_dev), + GFP_KERNEL); + if (!nic_dev->vlan_bitmap) { + err = -ENOMEM; + goto err_vlan_bitmap; + } + pci_set_drvdata(pdev, netdev); + err = hinic_port_get_mac(nic_dev, netdev->dev_addr); + if (err) + dev_warn(&pdev->dev, "Failed to get mac address\n"); + + err = hinic_port_add_mac(nic_dev, netdev->dev_addr, 0); + if (err) { + dev_err(&pdev->dev, "Failed to add mac\n"); + goto err_add_mac; + } + + err = hinic_port_set_mtu(nic_dev, netdev->mtu); + if (err) { + dev_err(&pdev->dev, "Failed to set mtu\n"); + goto err_set_mtu; + } + + netdev_features_init(netdev); + netif_carrier_off(netdev); err = register_netdev(netdev); @@ -92,7 +281,11 @@ static int nic_dev_init(struct pci_dev *pdev) return 0; err_reg_netdev: +err_set_mtu: +err_add_mac: pci_set_drvdata(pdev, NULL); + +err_vlan_bitmap: free_netdev(netdev); err_alloc_etherdev: diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c new file mode 100644 index 000000000000..5b249e8121bd --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -0,0 +1,224 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_dev.h" +#include "hinic_port.h" +#include "hinic_dev.h" + +#define HINIC_MIN_MTU_SIZE 256 +#define HINIC_MAX_JUMBO_FRAME_SIZE 15872 + +enum mac_op { + MAC_DEL, + MAC_SET, +}; + +/** + * change_mac - change(add or delete) mac address + * @nic_dev: nic device + * @addr: mac address + * @vlan_id: vlan number to set with the mac + * @op: add or delete the mac + * + * Return 0 - Success, negative - Failure + **/ +static int change_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id, enum mac_op op) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_mac_cmd port_mac_cmd; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + enum hinic_port_cmd cmd; + u16 out_size; + int err; + + if (vlan_id >= VLAN_N_VID) { + netif_err(nic_dev, drv, netdev, "Invalid VLAN number\n"); + return -EINVAL; + } + + if (op == MAC_SET) + cmd = HINIC_PORT_CMD_SET_MAC; + else + cmd = HINIC_PORT_CMD_DEL_MAC; + + port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + port_mac_cmd.vlan_id = vlan_id; + memcpy(port_mac_cmd.mac, addr, ETH_ALEN); + + err = hinic_port_msg_cmd(hwdev, cmd, &port_mac_cmd, + sizeof(port_mac_cmd), + &port_mac_cmd, &out_size); + if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { + dev_err(&pdev->dev, "Failed to change MAC, ret = %d\n", + port_mac_cmd.status); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_port_add_mac - add mac address + * @nic_dev: nic device + * @addr: mac address + * @vlan_id: vlan number to set with the mac + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_add_mac(struct hinic_dev *nic_dev, + const u8 *addr, u16 vlan_id) +{ + return change_mac(nic_dev, addr, vlan_id, MAC_SET); +} + +/** + * hinic_port_del_mac - remove mac address + * @nic_dev: nic device + * @addr: mac address + * @vlan_id: vlan number that is connected to the mac + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id) +{ + return change_mac(nic_dev, addr, vlan_id, MAC_DEL); +} + +/** + * hinic_port_get_mac - get the mac address of the nic device + * @nic_dev: nic device + * @addr: returned mac address + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_mac_cmd port_mac_cmd; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + port_mac_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_MAC, + &port_mac_cmd, sizeof(port_mac_cmd), + &port_mac_cmd, &out_size); + if (err || (out_size != sizeof(port_mac_cmd)) || port_mac_cmd.status) { + dev_err(&pdev->dev, "Failed to get mac, ret = %d\n", + port_mac_cmd.status); + return -EFAULT; + } + + memcpy(addr, port_mac_cmd.mac, ETH_ALEN); + return 0; +} + +/** + * hinic_port_set_mtu - set mtu + * @nic_dev: nic device + * @new_mtu: new mtu + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu) +{ + struct net_device *netdev = nic_dev->netdev; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_mtu_cmd port_mtu_cmd; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int err, max_frame; + u16 out_size; + + if (new_mtu < HINIC_MIN_MTU_SIZE) { + netif_err(nic_dev, drv, netdev, "mtu < MIN MTU size"); + return -EINVAL; + } + + max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + if (max_frame > HINIC_MAX_JUMBO_FRAME_SIZE) { + netif_err(nic_dev, drv, netdev, "mtu > MAX MTU size"); + return -EINVAL; + } + + port_mtu_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + port_mtu_cmd.mtu = new_mtu; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_CHANGE_MTU, + &port_mtu_cmd, sizeof(port_mtu_cmd), + &port_mtu_cmd, &out_size); + if (err || (out_size != sizeof(port_mtu_cmd)) || port_mtu_cmd.status) { + dev_err(&pdev->dev, "Failed to set mtu, ret = %d\n", + port_mtu_cmd.status); + return -EFAULT; + } + + return 0; +} + +/** + * hinic_port_add_vlan - add vlan to the nic device + * @nic_dev: nic device + * @vlan_id: the vlan number to add + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_vlan_cmd port_vlan_cmd; + + port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + port_vlan_cmd.vlan_id = vlan_id; + + return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_ADD_VLAN, + &port_vlan_cmd, sizeof(port_vlan_cmd), + NULL, NULL); +} + +/** + * hinic_port_del_vlan - delete vlan from the nic device + * @nic_dev: nic device + * @vlan_id: the vlan number to delete + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_vlan_cmd port_vlan_cmd; + + port_vlan_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + port_vlan_cmd.vlan_id = vlan_id; + + return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_DEL_VLAN, + &port_vlan_cmd, sizeof(port_vlan_cmd), + NULL, NULL); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h new file mode 100644 index 000000000000..4cafb9448ea8 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -0,0 +1,68 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_PORT_H +#define HINIC_PORT_H + +#include +#include + +#include "hinic_dev.h" + +struct hinic_port_mac_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 vlan_id; + u16 rsvd1; + unsigned char mac[ETH_ALEN]; +}; + +struct hinic_port_mtu_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u32 mtu; +}; + +struct hinic_port_vlan_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 vlan_id; +}; + +int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id); + +int hinic_port_del_mac(struct hinic_dev *nic_dev, const u8 *addr, + u16 vlan_id); + +int hinic_port_get_mac(struct hinic_dev *nic_dev, u8 *addr); + +int hinic_port_set_mtu(struct hinic_dev *nic_dev, int new_mtu); + +int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id); + +int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id); + +#endif -- cgit v1.2.3-55-g7522 From c4d06d2d208a6cd86bc76e52473cc853f83cad03 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:55 +0800 Subject: net-next/hinic: Add Rx mode and link event handler Add port management message for setting Rx mode in the card, used for rx_mode netdev operation. The link event handler is used for getting a notification about the link state. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 17 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 2 + drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 122 ++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 37 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.c | 17 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 17 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c | 64 ++++- drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 28 +++ drivers/net/ethernet/huawei/hinic/hinic_main.c | 284 ++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_port.c | 92 +++++++ drivers/net/ethernet/huawei/hinic/hinic_port.h | 66 +++++ 11 files changed, 745 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index e54a45cd5bbe..5c5b4e974c56 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -19,19 +19,36 @@ #include #include #include +#include +#include #include "hinic_hw_dev.h" #define HINIC_DRV_NAME "hinic" +enum hinic_flags { + HINIC_LINK_UP = BIT(0), + HINIC_INTF_UP = BIT(1), +}; + +struct hinic_rx_mode_work { + struct work_struct work; + u32 rx_mode; +}; + struct hinic_dev { struct net_device *netdev; struct hinic_hwdev *hwdev; u32 msg_enable; + unsigned int flags; + struct semaphore mgmt_lock; unsigned long *vlan_bitmap; + + struct hinic_rx_mode_work rx_mode_work; + struct workqueue_struct *workq; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h index 52eb89c7df9a..1f57301ce527 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -20,6 +20,8 @@ #define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 #define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 +#define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 + #define HINIC_DMA_ATTR_BASE 0xC80 #define HINIC_ELECTION_BASE 0x4200 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 6bb6c3313fc3..c3122b030740 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -229,6 +229,118 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, HINIC_MGMT_MSG_SYNC); } +/** + * hinic_hwdev_cb_register - register callback handler for MGMT events + * @hwdev: the NIC HW device + * @cmd: the mgmt event + * @handle: private data for the handler + * @handler: event handler + **/ +void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd, void *handle, + void (*handler)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + struct hinic_nic_cb *nic_cb; + u8 cmd_cb; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; + nic_cb = &pfhwdev->nic_cb[cmd_cb]; + + nic_cb->handler = handler; + nic_cb->handle = handle; + nic_cb->cb_state = HINIC_CB_ENABLED; +} + +/** + * hinic_hwdev_cb_unregister - unregister callback handler for MGMT events + * @hwdev: the NIC HW device + * @cmd: the mgmt event + **/ +void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + struct hinic_nic_cb *nic_cb; + u8 cmd_cb; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; + nic_cb = &pfhwdev->nic_cb[cmd_cb]; + + nic_cb->cb_state &= ~HINIC_CB_ENABLED; + + while (nic_cb->cb_state & HINIC_CB_RUNNING) + schedule(); + + nic_cb->handler = NULL; +} + +/** + * nic_mgmt_msg_handler - nic mgmt event handler + * @handle: private data for the handler + * @buf_in: input buffer + * @in_size: input size + * @buf_out: output buffer + * @out_size: returned output size + **/ +static void nic_mgmt_msg_handler(void *handle, u8 cmd, void *buf_in, + u16 in_size, void *buf_out, u16 *out_size) +{ + struct hinic_pfhwdev *pfhwdev = handle; + enum hinic_cb_state cb_state; + struct hinic_nic_cb *nic_cb; + struct hinic_hwdev *hwdev; + struct hinic_hwif *hwif; + struct pci_dev *pdev; + u8 cmd_cb; + + hwdev = &pfhwdev->hwdev; + hwif = hwdev->hwif; + pdev = hwif->pdev; + + if ((cmd < HINIC_MGMT_MSG_CMD_BASE) || + (cmd >= HINIC_MGMT_MSG_CMD_MAX)) { + dev_err(&pdev->dev, "unknown L2NIC event, cmd = %d\n", cmd); + return; + } + + cmd_cb = cmd - HINIC_MGMT_MSG_CMD_BASE; + + nic_cb = &pfhwdev->nic_cb[cmd_cb]; + + cb_state = cmpxchg(&nic_cb->cb_state, + HINIC_CB_ENABLED, + HINIC_CB_ENABLED | HINIC_CB_RUNNING); + + if ((cb_state == HINIC_CB_ENABLED) && (nic_cb->handler)) + nic_cb->handler(nic_cb->handle, buf_in, + in_size, buf_out, out_size); + else + dev_err(&pdev->dev, "Unhandled NIC Event %d\n", cmd); + + nic_cb->cb_state &= ~HINIC_CB_RUNNING; +} + /** * init_pfhwdev - Initialize the extended components of PF * @pfhwdev: the HW device for PF @@ -248,6 +360,10 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) return err; } + hinic_register_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC, + pfhwdev, nic_mgmt_msg_handler); + + hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE); return 0; } @@ -257,6 +373,12 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) **/ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev) { + struct hinic_hwdev *hwdev = &pfhwdev->hwdev; + + hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT); + + hinic_unregister_mgmt_msg_cb(&pfhwdev->pf_to_mgmt, HINIC_MOD_L2NIC); + hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index ee9e76ada303..1cd8159766d4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -18,6 +18,7 @@ #include #include +#include #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" @@ -25,6 +26,9 @@ #define HINIC_MAX_QPS 32 +#define HINIC_MGMT_NUM_MSG_CMD (HINIC_MGMT_MSG_CMD_MAX - \ + HINIC_MGMT_MSG_CMD_BASE) + struct hinic_cap { u16 max_qps; u16 num_qps; @@ -55,6 +59,19 @@ enum hinic_port_cmd { HINIC_PORT_CMD_GET_CAP = 170, }; +enum hinic_mgmt_msg_cmd { + HINIC_MGMT_MSG_CMD_BASE = 160, + + HINIC_MGMT_MSG_CMD_LINK_STATUS = 160, + + HINIC_MGMT_MSG_CMD_MAX, +}; + +enum hinic_cb_state { + HINIC_CB_ENABLED = BIT(0), + HINIC_CB_RUNNING = BIT(1), +}; + struct hinic_hwdev { struct hinic_hwif *hwif; struct msix_entry *msix_entries; @@ -64,12 +81,32 @@ struct hinic_hwdev { struct hinic_cap nic_cap; }; +struct hinic_nic_cb { + void (*handler)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size); + + void *handle; + unsigned long cb_state; +}; + struct hinic_pfhwdev { struct hinic_hwdev hwdev; struct hinic_pf_to_mgmt pf_to_mgmt; + + struct hinic_nic_cb nic_cb[HINIC_MGMT_NUM_MSG_CMD]; }; +void hinic_hwdev_cb_register(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd, void *handle, + void (*handler)(void *handle, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)); + +void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev, + enum hinic_mgmt_msg_cmd cmd); + int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c index 0cfada793a7a..b340695bff8b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -115,6 +115,23 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index) return 0; } +/** + * hinic_set_pf_action - set action on pf channel + * @hwif: the HW interface of a pci function device + * @action: action on pf channel + * + * Return 0 - Success, negative - Failure + **/ +void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action) +{ + u32 attr5 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR); + + attr5 = HINIC_FA5_CLEAR(attr5, PF_ACTION); + attr5 |= HINIC_FA5_SET(action, PF_ACTION); + + hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5); +} + /** * hwif_ready - test if the HW is ready for use * @hwif: the HW interface of a pci function device diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index 707a04614d5b..228069895f3b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -73,6 +73,15 @@ #define HINIC_FA1_GET(val, member) \ (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK) +#define HINIC_FA5_PF_ACTION_SHIFT 0 +#define HINIC_FA5_PF_ACTION_MASK 0xFFFF + +#define HINIC_FA5_SET(val, member) \ + (((u32)(val) & HINIC_FA5_##member##_MASK) << HINIC_FA5_##member##_SHIFT) + +#define HINIC_FA5_CLEAR(val, member) \ + ((val) & (~(HINIC_FA5_##member##_MASK << HINIC_FA5_##member##_SHIFT))) + #define HINIC_PPF_ELECTION_IDX_SHIFT 0 #define HINIC_PPF_ELECTION_IDX_MASK 0x1F @@ -166,6 +175,12 @@ enum hinic_node_id { HINIC_NODE_ID_MGMT = 21, }; +enum hinic_pf_action { + HINIC_PF_MGMT_INIT = 0x0, + + HINIC_PF_MGMT_ACTIVE = 0x11, +}; + struct hinic_func_attr { u16 func_idx; u8 pf_idx; @@ -212,6 +227,8 @@ int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index, int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index); +void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action); + int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev); void hinic_free_hwif(struct hinic_hwif *hwif); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index 0150b71a5aba..278dc13f3dae 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -72,6 +72,46 @@ enum msg_ack_type { MSG_NO_ACK = 1, }; +/** + * hinic_register_mgmt_msg_cb - register msg handler for a msg from a module + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that this handler will handle its messages + * @handle: private data for the callback + * @callback: the handler that will handle messages + **/ +void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, + void *handle, + void (*callback)(void *handle, + u8 cmd, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)) +{ + struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; + + mgmt_cb->cb = callback; + mgmt_cb->handle = handle; + mgmt_cb->state = HINIC_MGMT_CB_ENABLED; +} + +/** + * hinic_unregister_mgmt_msg_cb - unregister msg handler for a msg from a module + * @pf_to_mgmt: PF to MGMT channel + * @mod: module in the chip that this handler handles its messages + **/ +void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod) +{ + struct hinic_mgmt_cb *mgmt_cb = &pf_to_mgmt->mgmt_cb[mod]; + + mgmt_cb->state &= ~HINIC_MGMT_CB_ENABLED; + + while (mgmt_cb->state & HINIC_MGMT_CB_RUNNING) + schedule(); + + mgmt_cb->cb = NULL; +} + /** * prepare_header - prepare the header of the message * @pf_to_mgmt: PF to MGMT channel @@ -337,9 +377,31 @@ static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt, struct hinic_hwif *hwif = pf_to_mgmt->hwif; struct pci_dev *pdev = hwif->pdev; u8 *buf_out = recv_msg->buf_out; + struct hinic_mgmt_cb *mgmt_cb; + unsigned long cb_state; u16 out_size = 0; - dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n", recv_msg->mod); + if (recv_msg->mod >= HINIC_MOD_MAX) { + dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n", + recv_msg->mod); + return; + } + + mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod]; + + cb_state = cmpxchg(&mgmt_cb->state, + HINIC_MGMT_CB_ENABLED, + HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING); + + if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb)) + mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd, + recv_msg->msg, recv_msg->msg_len, + buf_out, &out_size); + else + dev_err(&pdev->dev, "No MGMT msg handler, mod = %d\n", + recv_msg->mod); + + mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING; if (!recv_msg->async_mgmt_to_pf) /* MGMT sent sync msg, send the response */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index eca7ad865bca..8021406cca7c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -19,6 +19,7 @@ #include #include #include +#include #include "hinic_hw_if.h" #include "hinic_hw_api_cmd.h" @@ -67,6 +68,11 @@ enum hinic_cfg_cmd { HINIC_CFG_NIC_CAP = 0, }; +enum hinic_mgmt_cb_state { + HINIC_MGMT_CB_ENABLED = BIT(0), + HINIC_MGMT_CB_RUNNING = BIT(1), +}; + struct hinic_recv_msg { u8 *msg; u8 *buf_out; @@ -81,6 +87,15 @@ struct hinic_recv_msg { u16 msg_id; }; +struct hinic_mgmt_cb { + void (*cb)(void *handle, u8 cmd, + void *buf_in, u16 in_size, + void *buf_out, u16 *out_size); + + void *handle; + unsigned long state; +}; + struct hinic_pf_to_mgmt { struct hinic_hwif *hwif; @@ -92,8 +107,21 @@ struct hinic_pf_to_mgmt { struct hinic_recv_msg recv_msg_from_mgmt; struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX]; + + struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX]; }; +void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod, + void *handle, + void (*callback)(void *handle, + u8 cmd, void *buf_in, + u16 in_size, void *buf_out, + u16 *out_size)); + +void hinic_unregister_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt, + enum hinic_mod_type mod); + int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, enum hinic_mod_type mod, u8 cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 772e4e6e68af..7aebc6207931 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -24,9 +24,11 @@ #include #include #include +#include #include #include #include +#include #include #include "hinic_hw_dev.h" @@ -39,12 +41,99 @@ MODULE_LICENSE("GPL"); #define PCI_DEVICE_ID_HI1822_PF 0x1822 +#define HINIC_WQ_NAME "hinic_dev" + #define MSG_ENABLE_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ NETIF_MSG_IFUP | \ NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR) #define VLAN_BITMAP_SIZE(nic_dev) (ALIGN(VLAN_N_VID, 8) / 8) +#define work_to_rx_mode_work(work) \ + container_of(work, struct hinic_rx_mode_work, work) + +#define rx_mode_work_to_nic_dev(rx_mode_work) \ + container_of(rx_mode_work, struct hinic_dev, rx_mode_work) + +static int change_mac_addr(struct net_device *netdev, const u8 *addr); + +static int hinic_open(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + enum hinic_port_link_state link_state; + int err, ret; + + err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to set port state\n"); + return err; + } + + /* Wait up to 3 sec between port enable to link state */ + msleep(3000); + + down(&nic_dev->mgmt_lock); + + err = hinic_port_link_state(nic_dev, &link_state); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to get link state\n"); + goto err_port_link; + } + + if (link_state == HINIC_LINK_STATE_UP) + nic_dev->flags |= HINIC_LINK_UP; + + nic_dev->flags |= HINIC_INTF_UP; + + if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == + (HINIC_LINK_UP | HINIC_INTF_UP)) { + netif_info(nic_dev, drv, netdev, "link + intf UP\n"); + netif_carrier_on(netdev); + netif_tx_wake_all_queues(netdev); + } + + up(&nic_dev->mgmt_lock); + + netif_info(nic_dev, drv, netdev, "HINIC_INTF is UP\n"); + return 0; + +err_port_link: + up(&nic_dev->mgmt_lock); + ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); + if (ret) + netif_warn(nic_dev, drv, netdev, + "Failed to revert port state\n"); + return err; +} + +static int hinic_close(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + unsigned int flags; + int err; + + down(&nic_dev->mgmt_lock); + + flags = nic_dev->flags; + nic_dev->flags &= ~HINIC_INTF_UP; + + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + up(&nic_dev->mgmt_lock); + + err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); + nic_dev->flags |= (flags & HINIC_INTF_UP); + return err; + } + + netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); + return 0; +} + static int hinic_change_mtu(struct net_device *netdev, int new_mtu) { struct hinic_dev *nic_dev = netdev_priv(netdev); @@ -118,6 +207,77 @@ static int hinic_set_mac_addr(struct net_device *netdev, void *addr) return err; } +/** + * add_mac_addr - add mac address to network device + * @netdev: network device + * @addr: mac address to add + * + * Return 0 - Success, negative - Failure + **/ +static int add_mac_addr(struct net_device *netdev, const u8 *addr) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 vid = 0; + int err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + down(&nic_dev->mgmt_lock); + + do { + err = hinic_port_add_mac(nic_dev, addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, "Failed to add mac\n"); + break; + } + + vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); + } while (vid != VLAN_N_VID); + + up(&nic_dev->mgmt_lock); + return err; +} + +/** + * remove_mac_addr - remove mac address from network device + * @netdev: network device + * @addr: mac address to remove + * + * Return 0 - Success, negative - Failure + **/ +static int remove_mac_addr(struct net_device *netdev, const u8 *addr) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + u16 vid = 0; + int err; + + if (!is_valid_ether_addr(addr)) + return -EADDRNOTAVAIL; + + netif_info(nic_dev, drv, netdev, "remove mac addr = %02x %02x %02x %02x %02x %02x\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + down(&nic_dev->mgmt_lock); + + do { + err = hinic_port_del_mac(nic_dev, addr, vid); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to delete mac\n"); + break; + } + + vid = find_next_bit(nic_dev->vlan_bitmap, VLAN_N_VID, vid + 1); + } while (vid != VLAN_N_VID); + + up(&nic_dev->mgmt_lock); + return err; +} + static int hinic_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto, u16 vid) { @@ -182,12 +342,56 @@ err_del_vlan: return err; } +static void set_rx_mode(struct work_struct *work) +{ + struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); + struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); + + netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n"); + + hinic_port_set_rx_mode(nic_dev, rx_mode_work->rx_mode); + + __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); + __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); +} + +static void hinic_set_rx_mode(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rx_mode_work *rx_mode_work; + u32 rx_mode; + + rx_mode_work = &nic_dev->rx_mode_work; + + rx_mode = HINIC_RX_MODE_UC | + HINIC_RX_MODE_MC | + HINIC_RX_MODE_BC; + + if (netdev->flags & IFF_PROMISC) + rx_mode |= HINIC_RX_MODE_PROMISC; + else if (netdev->flags & IFF_ALLMULTI) + rx_mode |= HINIC_RX_MODE_MC_ALL; + + rx_mode_work->rx_mode = rx_mode; + + queue_work(nic_dev->workq, &rx_mode_work->work); +} + +netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + return NETDEV_TX_BUSY; +} + static const struct net_device_ops hinic_netdev_ops = { + .ndo_open = hinic_open, + .ndo_stop = hinic_close, .ndo_change_mtu = hinic_change_mtu, .ndo_set_mac_address = hinic_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = hinic_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, + .ndo_set_rx_mode = hinic_set_rx_mode, + .ndo_start_xmit = hinic_xmit_frame, /* more operations should be filled */ }; @@ -200,6 +404,57 @@ static void netdev_features_init(struct net_device *netdev) netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; } +/** + * link_status_event_handler - link event handler + * @handle: nic device for the handler + * @buf_in: input buffer + * @in_size: input size + * @buf_in: output buffer + * @out_size: returned output size + * + * Return 0 - Success, negative - Failure + **/ +static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, + void *buf_out, u16 *out_size) +{ + struct hinic_port_link_status *link_status, *ret_link_status; + struct hinic_dev *nic_dev = handle; + + link_status = buf_in; + + if (link_status->link == HINIC_LINK_STATE_UP) { + down(&nic_dev->mgmt_lock); + + nic_dev->flags |= HINIC_LINK_UP; + + if ((nic_dev->flags & (HINIC_LINK_UP | HINIC_INTF_UP)) == + (HINIC_LINK_UP | HINIC_INTF_UP)) { + netif_carrier_on(nic_dev->netdev); + netif_tx_wake_all_queues(nic_dev->netdev); + } + + up(&nic_dev->mgmt_lock); + + netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n"); + } else { + down(&nic_dev->mgmt_lock); + + nic_dev->flags &= ~HINIC_LINK_UP; + + netif_carrier_off(nic_dev->netdev); + netif_tx_disable(nic_dev->netdev); + + up(&nic_dev->mgmt_lock); + + netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is DOWN\n"); + } + + ret_link_status = buf_out; + ret_link_status->status = 0; + + *out_size = sizeof(*ret_link_status); +} + /** * nic_dev_init - Initialize the NIC device * @pdev: the NIC pci device @@ -208,6 +463,7 @@ static void netdev_features_init(struct net_device *netdev) **/ static int nic_dev_init(struct pci_dev *pdev) { + struct hinic_rx_mode_work *rx_mode_work; struct hinic_dev *nic_dev; struct net_device *netdev; struct hinic_hwdev *hwdev; @@ -239,6 +495,7 @@ static int nic_dev_init(struct pci_dev *pdev) nic_dev->netdev = netdev; nic_dev->hwdev = hwdev; nic_dev->msg_enable = MSG_ENABLE_DEFAULT; + nic_dev->flags = 0; sema_init(&nic_dev->mgmt_lock, 1); @@ -250,6 +507,12 @@ static int nic_dev_init(struct pci_dev *pdev) goto err_vlan_bitmap; } + nic_dev->workq = create_singlethread_workqueue(HINIC_WQ_NAME); + if (!nic_dev->workq) { + err = -ENOMEM; + goto err_workq; + } + pci_set_drvdata(pdev, netdev); err = hinic_port_get_mac(nic_dev, netdev->dev_addr); @@ -268,10 +531,16 @@ static int nic_dev_init(struct pci_dev *pdev) goto err_set_mtu; } + rx_mode_work = &nic_dev->rx_mode_work; + INIT_WORK(&rx_mode_work->work, set_rx_mode); + netdev_features_init(netdev); netif_carrier_off(netdev); + hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, + nic_dev, link_status_event_handler); + err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "Failed to register netdev\n"); @@ -281,10 +550,16 @@ static int nic_dev_init(struct pci_dev *pdev) return 0; err_reg_netdev: + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_LINK_STATUS); + cancel_work_sync(&rx_mode_work->work); + err_set_mtu: err_add_mac: pci_set_drvdata(pdev, NULL); + destroy_workqueue(nic_dev->workq); +err_workq: err_vlan_bitmap: free_netdev(netdev); @@ -357,11 +632,20 @@ static void hinic_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rx_mode_work *rx_mode_work; unregister_netdev(netdev); + hinic_hwdev_cb_unregister(nic_dev->hwdev, + HINIC_MGMT_MSG_CMD_LINK_STATUS); + + rx_mode_work = &nic_dev->rx_mode_work; + cancel_work_sync(&rx_mode_work->work); + pci_set_drvdata(pdev, NULL); + destroy_workqueue(nic_dev->workq); + hinic_free_hwdev(nic_dev->hwdev); free_netdev(netdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 5b249e8121bd..0dafede7169e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -222,3 +222,95 @@ int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id) &port_vlan_cmd, sizeof(port_vlan_cmd), NULL, NULL); } + +/** + * hinic_port_set_rx_mode - set rx mode in the nic device + * @nic_dev: nic device + * @rx_mode: the rx mode to set + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_rx_mode_cmd rx_mode_cmd; + + rx_mode_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwdev->hwif); + rx_mode_cmd.rx_mode = rx_mode; + + return hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_MODE, + &rx_mode_cmd, sizeof(rx_mode_cmd), + NULL, NULL); +} + +/** + * hinic_port_link_state - get the link state + * @nic_dev: nic device + * @link_state: the returned link state + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_link_state(struct hinic_dev *nic_dev, + enum hinic_port_link_state *link_state) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_port_link_cmd link_cmd; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return -EINVAL; + } + + link_cmd.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_STATE, + &link_cmd, sizeof(link_cmd), + &link_cmd, &out_size); + if (err || (out_size != sizeof(link_cmd)) || link_cmd.status) { + dev_err(&pdev->dev, "Failed to get link state, ret = %d\n", + link_cmd.status); + return -EINVAL; + } + + *link_state = link_cmd.state; + return 0; +} + +/** + * hinic_port_set_state - set port state + * @nic_dev: nic device + * @state: the state to set + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_port_state_cmd port_state; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "unsupported PCI Function type\n"); + return -EINVAL; + } + + port_state.state = state; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PORT_STATE, + &port_state, sizeof(port_state), + &port_state, &out_size); + if (err || (out_size != sizeof(port_state)) || port_state.status) { + dev_err(&pdev->dev, "Failed to set port state, ret = %d\n", + port_state.status); + return -EFAULT; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 4cafb9448ea8..3a8da8eadb9b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -18,9 +18,28 @@ #include #include +#include #include "hinic_dev.h" +enum hinic_rx_mode { + HINIC_RX_MODE_UC = BIT(0), + HINIC_RX_MODE_MC = BIT(1), + HINIC_RX_MODE_BC = BIT(2), + HINIC_RX_MODE_MC_ALL = BIT(3), + HINIC_RX_MODE_PROMISC = BIT(4), +}; + +enum hinic_port_link_state { + HINIC_LINK_STATE_DOWN, + HINIC_LINK_STATE_UP, +}; + +enum hinic_port_state { + HINIC_PORT_DISABLE = 0, + HINIC_PORT_ENABLE = 3, +}; + struct hinic_port_mac_cmd { u8 status; u8 version; @@ -51,6 +70,45 @@ struct hinic_port_vlan_cmd { u16 vlan_id; }; +struct hinic_port_rx_mode_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd; + u32 rx_mode; +}; + +struct hinic_port_link_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; +}; + +struct hinic_port_state_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u8 state; + u8 rsvd1[3]; +}; + +struct hinic_port_link_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 rsvd1; + u8 link; + u8 rsvd2; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -65,4 +123,12 @@ int hinic_port_add_vlan(struct hinic_dev *nic_dev, u16 vlan_id); int hinic_port_del_vlan(struct hinic_dev *nic_dev, u16 vlan_id); +int hinic_port_set_rx_mode(struct hinic_dev *nic_dev, u32 rx_mode); + +int hinic_port_link_state(struct hinic_dev *nic_dev, + enum hinic_port_link_state *link_state); + +int hinic_port_set_state(struct hinic_dev *nic_dev, + enum hinic_port_state state); + #endif -- cgit v1.2.3-55-g7522 From c3e79baf1b03b3ba53f60a8698f2fd9462a906b5 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:56 +0800 Subject: net-next/hinic: Add logical Txq and Rxq Create the logical queues of the nic. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Makefile | 5 +- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 5 + drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 131 +++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 20 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 144 +++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 46 ++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 32 +++++ drivers/net/ethernet/huawei/hinic/hinic_main.c | 172 ++++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_rx.c | 72 ++++++++++ drivers/net/ethernet/huawei/hinic/hinic_rx.h | 46 ++++++ drivers/net/ethernet/huawei/hinic/hinic_tx.c | 75 ++++++++++ drivers/net/ethernet/huawei/hinic/hinic_tx.h | 49 +++++++ 12 files changed, 793 insertions(+), 4 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_io.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_rx.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_rx.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_tx.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_tx.h diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index dbb1b9dbaa59..f60c449fbb40 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_HINIC) += hinic.o -hinic-y := hinic_main.o hinic_port.o hinic_hw_dev.o hinic_hw_mgmt.o \ - hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o +hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ + hinic_hw_io.o hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o \ + hinic_hw_if.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 5c5b4e974c56..5b8231dc3ff1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -23,6 +23,8 @@ #include #include "hinic_hw_dev.h" +#include "hinic_tx.h" +#include "hinic_rx.h" #define HINIC_DRV_NAME "hinic" @@ -49,6 +51,9 @@ struct hinic_dev { struct hinic_rx_mode_work rx_mode_work; struct workqueue_struct *workq; + + struct hinic_txq *txqs; + struct hinic_rxq *rxqs; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index c3122b030740..5ae1c3682be0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -25,6 +25,8 @@ #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_io.h" #include "hinic_hw_dev.h" #define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ @@ -229,6 +231,99 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, HINIC_MGMT_MSG_SYNC); } +/** + * get_base_qpn - get the first qp number + * @hwdev: the NIC HW device + * @base_qpn: returned qp number + * + * Return 0 - Success, negative - Failure + **/ +static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn) +{ + struct hinic_cmd_base_qpn cmd_base_qpn; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + cmd_base_qpn.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_GLOBAL_QPN, + &cmd_base_qpn, sizeof(cmd_base_qpn), + &cmd_base_qpn, &out_size); + if (err || (out_size != sizeof(cmd_base_qpn)) || cmd_base_qpn.status) { + dev_err(&pdev->dev, "Failed to get base qpn, status = %d\n", + cmd_base_qpn.status); + return -EFAULT; + } + + *base_qpn = cmd_base_qpn.qpn; + return 0; +} + +/** + * hinic_hwdev_ifup - Preparing the HW for passing IO + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_ifup(struct hinic_hwdev *hwdev) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_cap *nic_cap = &hwdev->nic_cap; + struct hinic_hwif *hwif = hwdev->hwif; + int err, num_aeqs, num_ceqs, num_qps; + struct msix_entry *sq_msix_entries; + struct msix_entry *rq_msix_entries; + struct pci_dev *pdev = hwif->pdev; + u16 base_qpn; + + err = get_base_qpn(hwdev, &base_qpn); + if (err) { + dev_err(&pdev->dev, "Failed to get global base qp number\n"); + return err; + } + + num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); + num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); + err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, 0, NULL); + if (err) { + dev_err(&pdev->dev, "Failed to init IO channel\n"); + return err; + } + + num_qps = nic_cap->num_qps; + sq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs]; + rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps]; + + err = hinic_io_create_qps(func_to_io, base_qpn, num_qps, + sq_msix_entries, rq_msix_entries); + if (err) { + dev_err(&pdev->dev, "Failed to create QPs\n"); + goto err_create_qps; + } + + return 0; + +err_create_qps: + hinic_io_free(func_to_io); + return err; +} + +/** + * hinic_hwdev_ifdown - Closing the HW for passing IO + * @hwdev: the NIC HW device + * + **/ +void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_cap *nic_cap = &hwdev->nic_cap; + + hinic_io_destroy_qps(func_to_io, nic_cap->num_qps); + hinic_io_free(func_to_io); +} + /** * hinic_hwdev_cb_register - register callback handler for MGMT events * @hwdev: the NIC HW device @@ -499,3 +594,39 @@ int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev) return nic_cap->num_qps; } + +/** + * hinic_hwdev_get_sq - get SQ + * @hwdev: the NIC HW device + * @i: the position of the SQ + * + * Return: the SQ in the i position + **/ +struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_qp *qp = &func_to_io->qps[i]; + + if (i >= hinic_hwdev_num_qps(hwdev)) + return NULL; + + return &qp->sq; +} + +/** + * hinic_hwdev_get_sq - get RQ + * @hwdev: the NIC HW device + * @i: the position of the RQ + * + * Return: the RQ in the i position + **/ +struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i) +{ + struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; + struct hinic_qp *qp = &func_to_io->qps[i]; + + if (i >= hinic_hwdev_num_qps(hwdev)) + return NULL; + + return &qp->rq; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 1cd8159766d4..81c2c6e92898 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -23,6 +23,8 @@ #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_io.h" #define HINIC_MAX_QPS 32 @@ -72,11 +74,21 @@ enum hinic_cb_state { HINIC_CB_RUNNING = BIT(1), }; +struct hinic_cmd_base_qpn { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 qpn; +}; + struct hinic_hwdev { struct hinic_hwif *hwif; struct msix_entry *msix_entries; struct hinic_aeqs aeqs; + struct hinic_func_to_io func_to_io; struct hinic_cap nic_cap; }; @@ -111,10 +123,18 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, void *buf_in, u16 in_size, void *buf_out, u16 *out_size); +int hinic_hwdev_ifup(struct hinic_hwdev *hwdev); + +void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev); + struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev); void hinic_free_hwdev(struct hinic_hwdev *hwdev); int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev); +struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i); + +struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c new file mode 100644 index 000000000000..ebe28ee9af9c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -0,0 +1,144 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_qp.h" +#include "hinic_hw_io.h" + +/** + * init_qp - Initialize a Queue Pair + * @func_to_io: func to io channel that holds the IO components + * @qp: pointer to the qp to initialize + * @q_id: the id of the qp + * @sq_msix_entry: msix entry for sq + * @rq_msix_entry: msix entry for rq + * + * Return 0 - Success, negative - Failure + **/ +static int init_qp(struct hinic_func_to_io *func_to_io, + struct hinic_qp *qp, int q_id, + struct msix_entry *sq_msix_entry, + struct msix_entry *rq_msix_entry) +{ + /* should be implemented */ + return 0; +} + +/** + * destroy_qp - Clean the resources of a Queue Pair + * @func_to_io: func to io channel that holds the IO components + * @qp: pointer to the qp to clean + **/ +static void destroy_qp(struct hinic_func_to_io *func_to_io, + struct hinic_qp *qp) +{ + /* should be implemented */ +} + +/** + * hinic_io_create_qps - Create Queue Pairs + * @func_to_io: func to io channel that holds the IO components + * @base_qpn: base qp number + * @num_qps: number queue pairs to create + * @sq_msix_entry: msix entries for sq + * @rq_msix_entry: msix entries for rq + * + * Return 0 - Success, negative - Failure + **/ +int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, + u16 base_qpn, int num_qps, + struct msix_entry *sq_msix_entries, + struct msix_entry *rq_msix_entries) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t qps_size; + int i, j, err; + + qps_size = num_qps * sizeof(*func_to_io->qps); + func_to_io->qps = devm_kzalloc(&pdev->dev, qps_size, GFP_KERNEL); + if (!func_to_io->qps) + return -ENOMEM; + + for (i = 0; i < num_qps; i++) { + err = init_qp(func_to_io, &func_to_io->qps[i], i, + &sq_msix_entries[i], &rq_msix_entries[i]); + if (err) { + dev_err(&pdev->dev, "Failed to create QP %d\n", i); + goto err_init_qp; + } + } + + return 0; + +err_init_qp: + for (j = 0; j < i; j++) + destroy_qp(func_to_io, &func_to_io->qps[j]); + + devm_kfree(&pdev->dev, func_to_io->qps); + return err; +} + +/** + * hinic_io_destroy_qps - Destroy the IO Queue Pairs + * @func_to_io: func to io channel that holds the IO components + * @num_qps: number queue pairs to destroy + **/ +void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct pci_dev *pdev = hwif->pdev; + int i; + + for (i = 0; i < num_qps; i++) + destroy_qp(func_to_io, &func_to_io->qps[i]); + + devm_kfree(&pdev->dev, func_to_io->qps); +} + +/** + * hinic_io_init - Initialize the IO components + * @func_to_io: func to io channel that holds the IO components + * @hwif: HW interface for accessing IO + * @max_qps: maximum QPs in HW + * @num_ceqs: number completion event queues + * @ceq_msix_entries: msix entries for ceqs + * + * Return 0 - Success, negative - Failure + **/ +int hinic_io_init(struct hinic_func_to_io *func_to_io, + struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, + struct msix_entry *ceq_msix_entries) +{ + func_to_io->hwif = hwif; + func_to_io->qps = NULL; + func_to_io->max_qps = max_qps; + + return 0; +} + +/** + * hinic_io_free - Free the IO components + * @func_to_io: func to io channel that holds the IO components + **/ +void hinic_io_free(struct hinic_func_to_io *func_to_io) +{ +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h new file mode 100644 index 000000000000..7cdcffd4cf77 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -0,0 +1,46 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_IO_H +#define HINIC_HW_IO_H + +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_qp.h" + +struct hinic_func_to_io { + struct hinic_hwif *hwif; + + struct hinic_qp *qps; + u16 max_qps; +}; + +int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, + u16 base_qpn, int num_qps, + struct msix_entry *sq_msix_entries, + struct msix_entry *rq_msix_entries); + +void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, + int num_qps); + +int hinic_io_init(struct hinic_func_to_io *func_to_io, + struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, + struct msix_entry *ceq_msix_entries); + +void hinic_io_free(struct hinic_func_to_io *func_to_io); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h new file mode 100644 index 000000000000..64330fb6f10f --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -0,0 +1,32 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_QP_H +#define HINIC_HW_QP_H + +struct hinic_sq { + /* should be implemented */ +}; + +struct hinic_rq { + /* should be implemented */ +}; + +struct hinic_qp { + struct hinic_sq sq; + struct hinic_rq rq; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 7aebc6207931..22d5b61b0426 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -31,8 +31,11 @@ #include #include +#include "hinic_hw_qp.h" #include "hinic_hw_dev.h" #include "hinic_port.h" +#include "hinic_tx.h" +#include "hinic_rx.h" #include "hinic_dev.h" MODULE_AUTHOR("Huawei Technologies CO., Ltd"); @@ -57,17 +60,164 @@ MODULE_LICENSE("GPL"); static int change_mac_addr(struct net_device *netdev, const u8 *addr); +/** + * create_txqs - Create the Logical Tx Queues of specific NIC device + * @nic_dev: the specific NIC device + * + * Return 0 - Success, negative - Failure + **/ +static int create_txqs(struct hinic_dev *nic_dev) +{ + int err, i, j, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + size_t txq_size; + + if (nic_dev->txqs) + return -EINVAL; + + txq_size = num_txqs * sizeof(*nic_dev->txqs); + nic_dev->txqs = devm_kzalloc(&netdev->dev, txq_size, GFP_KERNEL); + if (!nic_dev->txqs) + return -ENOMEM; + + for (i = 0; i < num_txqs; i++) { + struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); + + err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to init Txq\n"); + goto err_init_txq; + } + } + + return 0; + +err_init_txq: + for (j = 0; j < i; j++) + hinic_clean_txq(&nic_dev->txqs[j]); + + devm_kfree(&netdev->dev, nic_dev->txqs); + return err; +} + +/** + * free_txqs - Free the Logical Tx Queues of specific NIC device + * @nic_dev: the specific NIC device + **/ +static void free_txqs(struct hinic_dev *nic_dev) +{ + int i, num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + + if (!nic_dev->txqs) + return; + + for (i = 0; i < num_txqs; i++) + hinic_clean_txq(&nic_dev->txqs[i]); + + devm_kfree(&netdev->dev, nic_dev->txqs); + nic_dev->txqs = NULL; +} + +/** + * create_txqs - Create the Logical Rx Queues of specific NIC device + * @nic_dev: the specific NIC device + * + * Return 0 - Success, negative - Failure + **/ +static int create_rxqs(struct hinic_dev *nic_dev) +{ + int err, i, j, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + size_t rxq_size; + + if (nic_dev->rxqs) + return -EINVAL; + + rxq_size = num_rxqs * sizeof(*nic_dev->rxqs); + nic_dev->rxqs = devm_kzalloc(&netdev->dev, rxq_size, GFP_KERNEL); + if (!nic_dev->rxqs) + return -ENOMEM; + + for (i = 0; i < num_rxqs; i++) { + struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i); + + err = hinic_init_rxq(&nic_dev->rxqs[i], rq, netdev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to init rxq\n"); + goto err_init_rxq; + } + } + + return 0; + +err_init_rxq: + for (j = 0; j < i; j++) + hinic_clean_rxq(&nic_dev->rxqs[j]); + + devm_kfree(&netdev->dev, nic_dev->rxqs); + return err; +} + +/** + * free_txqs - Free the Logical Rx Queues of specific NIC device + * @nic_dev: the specific NIC device + **/ +static void free_rxqs(struct hinic_dev *nic_dev) +{ + int i, num_rxqs = hinic_hwdev_num_qps(nic_dev->hwdev); + struct net_device *netdev = nic_dev->netdev; + + if (!nic_dev->rxqs) + return; + + for (i = 0; i < num_rxqs; i++) + hinic_clean_rxq(&nic_dev->rxqs[i]); + + devm_kfree(&netdev->dev, nic_dev->rxqs); + nic_dev->rxqs = NULL; +} + static int hinic_open(struct net_device *netdev) { struct hinic_dev *nic_dev = netdev_priv(netdev); enum hinic_port_link_state link_state; - int err, ret; + int err, ret, num_qps; + + if (!(nic_dev->flags & HINIC_INTF_UP)) { + err = hinic_hwdev_ifup(nic_dev->hwdev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed - HW interface up\n"); + return err; + } + } + + err = create_txqs(nic_dev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to create Tx queues\n"); + goto err_create_txqs; + } + + err = create_rxqs(nic_dev); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to create Rx queues\n"); + goto err_create_rxqs; + } + + num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); + netif_set_real_num_tx_queues(netdev, num_qps); + netif_set_real_num_rx_queues(netdev, num_qps); err = hinic_port_set_state(nic_dev, HINIC_PORT_ENABLE); if (err) { netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); - return err; + goto err_port_state; } /* Wait up to 3 sec between port enable to link state */ @@ -104,6 +254,16 @@ err_port_link: if (ret) netif_warn(nic_dev, drv, netdev, "Failed to revert port state\n"); + +err_port_state: + free_rxqs(nic_dev); + +err_create_rxqs: + free_txqs(nic_dev); + +err_create_txqs: + if (!(nic_dev->flags & HINIC_INTF_UP)) + hinic_hwdev_ifdown(nic_dev->hwdev); return err; } @@ -130,6 +290,12 @@ static int hinic_close(struct net_device *netdev) return err; } + free_rxqs(nic_dev); + free_txqs(nic_dev); + + if (flags & HINIC_INTF_UP) + hinic_hwdev_ifdown(nic_dev->hwdev); + netif_info(nic_dev, drv, netdev, "HINIC_INTF is DOWN\n"); return 0; } @@ -496,6 +662,8 @@ static int nic_dev_init(struct pci_dev *pdev) nic_dev->hwdev = hwdev; nic_dev->msg_enable = MSG_ENABLE_DEFAULT; nic_dev->flags = 0; + nic_dev->txqs = NULL; + nic_dev->rxqs = NULL; sema_init(&nic_dev->mgmt_lock, 1); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c new file mode 100644 index 000000000000..3c79f65d44da --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -0,0 +1,72 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include + +#include "hinic_hw_qp.h" +#include "hinic_rx.h" + +/** + * hinic_rxq_clean_stats - Clean the statistics of specific queue + * @rxq: Logical Rx Queue + **/ +void hinic_rxq_clean_stats(struct hinic_rxq *rxq) +{ + struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; + + u64_stats_update_begin(&rxq_stats->syncp); + rxq_stats->pkts = 0; + rxq_stats->bytes = 0; + u64_stats_update_end(&rxq_stats->syncp); +} + +/** + * rxq_stats_init - Initialize the statistics of specific queue + * @rxq: Logical Rx Queue + **/ +static void rxq_stats_init(struct hinic_rxq *rxq) +{ + struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; + + u64_stats_init(&rxq_stats->syncp); + hinic_rxq_clean_stats(rxq); +} + +/** + * hinic_init_rxq - Initialize the Rx Queue + * @rxq: Logical Rx Queue + * @rq: Hardware Rx Queue to connect the Logical queue with + * @netdev: network device to connect the Logical queue with + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, + struct net_device *netdev) +{ + rxq->netdev = netdev; + rxq->rq = rq; + + rxq_stats_init(rxq); + return 0; +} + +/** + * hinic_clean_rxq - Clean the Rx Queue + * @rxq: Logical Rx Queue + **/ +void hinic_clean_rxq(struct hinic_rxq *rxq) +{ +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h new file mode 100644 index 000000000000..fbd0246165dc --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -0,0 +1,46 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_RX_H +#define HINIC_RX_H + +#include +#include +#include + +#include "hinic_hw_qp.h" + +struct hinic_rxq_stats { + u64 pkts; + u64 bytes; + + struct u64_stats_sync syncp; +}; + +struct hinic_rxq { + struct net_device *netdev; + struct hinic_rq *rq; + + struct hinic_rxq_stats rxq_stats; +}; + +void hinic_rxq_clean_stats(struct hinic_rxq *rxq); + +int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, + struct net_device *netdev); + +void hinic_clean_rxq(struct hinic_rxq *rxq); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c new file mode 100644 index 000000000000..9835912039c9 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -0,0 +1,75 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include + +#include "hinic_hw_qp.h" +#include "hinic_tx.h" + +/** + * hinic_txq_clean_stats - Clean the statistics of specific queue + * @txq: Logical Tx Queue + **/ +void hinic_txq_clean_stats(struct hinic_txq *txq) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + + u64_stats_update_begin(&txq_stats->syncp); + txq_stats->pkts = 0; + txq_stats->bytes = 0; + txq_stats->tx_busy = 0; + txq_stats->tx_wake = 0; + txq_stats->tx_dropped = 0; + u64_stats_update_end(&txq_stats->syncp); +} + +/** + * txq_stats_init - Initialize the statistics of specific queue + * @txq: Logical Tx Queue + **/ +static void txq_stats_init(struct hinic_txq *txq) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + + u64_stats_init(&txq_stats->syncp); + hinic_txq_clean_stats(txq); +} + +/** + * hinic_init_txq - Initialize the Tx Queue + * @txq: Logical Tx Queue + * @sq: Hardware Tx Queue to connect the Logical queue with + * @netdev: network device to connect the Logical queue with + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, + struct net_device *netdev) +{ + txq->netdev = netdev; + txq->sq = sq; + + txq_stats_init(txq); + return 0; +} + +/** + * hinic_clean_txq - Clean the Tx Queue + * @txq: Logical Tx Queue + **/ +void hinic_clean_txq(struct hinic_txq *txq) +{ +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h new file mode 100644 index 000000000000..bbdb4b62d940 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -0,0 +1,49 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_TX_H +#define HINIC_TX_H + +#include +#include +#include + +#include "hinic_hw_qp.h" + +struct hinic_txq_stats { + u64 pkts; + u64 bytes; + u64 tx_busy; + u64 tx_wake; + u64 tx_dropped; + + struct u64_stats_sync syncp; +}; + +struct hinic_txq { + struct net_device *netdev; + struct hinic_sq *sq; + + struct hinic_txq_stats txq_stats; +}; + +void hinic_txq_clean_stats(struct hinic_txq *txq); + +int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, + struct net_device *netdev); + +void hinic_clean_txq(struct hinic_txq *txq); + +#endif -- cgit v1.2.3-55-g7522 From b15a9f37be2bc90f46f7e6b0c615c80e72e96431 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:57 +0800 Subject: net-next/hinic: Add wq Create work queues for being used by the queue pairs. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Makefile | 4 +- drivers/net/ethernet/huawei/hinic/hinic_common.h | 25 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 69 ++- drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 6 + drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 14 + drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 516 +++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 86 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h | 253 +++++++++++ 8 files changed, 968 insertions(+), 5 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index f60c449fbb40..0575a3452692 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_HINIC) += hinic.o hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ - hinic_hw_io.o hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o \ - hinic_hw_if.o + hinic_hw_io.o hinic_hw_wq.o hinic_hw_mgmt.o hinic_hw_api_cmd.o \ + hinic_hw_eqs.o hinic_hw_if.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h new file mode 100644 index 000000000000..6a83c157b978 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_common.h @@ -0,0 +1,25 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_COMMON_H +#define HINIC_COMMON_H + +struct hinic_sge { + u32 hi_addr; + u32 lo_addr; + u32 len; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index ebe28ee9af9c..1bf944e5aa28 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -20,6 +20,8 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" #include "hinic_hw_qp.h" #include "hinic_hw_io.h" @@ -38,8 +40,33 @@ static int init_qp(struct hinic_func_to_io *func_to_io, struct msix_entry *sq_msix_entry, struct msix_entry *rq_msix_entry) { - /* should be implemented */ + struct hinic_hwif *hwif = func_to_io->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + qp->q_id = q_id; + + err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], + HINIC_SQ_WQEBB_SIZE, HINIC_SQ_PAGE_SIZE, + HINIC_SQ_DEPTH, HINIC_SQ_WQE_MAX_SIZE); + if (err) { + dev_err(&pdev->dev, "Failed to allocate WQ for SQ\n"); + return err; + } + + err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], + HINIC_RQ_WQEBB_SIZE, HINIC_RQ_PAGE_SIZE, + HINIC_RQ_DEPTH, HINIC_RQ_WQE_SIZE); + if (err) { + dev_err(&pdev->dev, "Failed to allocate WQ for RQ\n"); + goto err_rq_alloc; + } + return 0; + +err_rq_alloc: + hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); + return err; } /** @@ -50,7 +77,10 @@ static int init_qp(struct hinic_func_to_io *func_to_io, static void destroy_qp(struct hinic_func_to_io *func_to_io, struct hinic_qp *qp) { - /* should be implemented */ + int q_id = qp->q_id; + + hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); + hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); } /** @@ -70,7 +100,7 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; - size_t qps_size; + size_t qps_size, wq_size; int i, j, err; qps_size = num_qps * sizeof(*func_to_io->qps); @@ -78,6 +108,20 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, if (!func_to_io->qps) return -ENOMEM; + wq_size = num_qps * sizeof(*func_to_io->sq_wq); + func_to_io->sq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + if (!func_to_io->sq_wq) { + err = -ENOMEM; + goto err_sq_wq; + } + + wq_size = num_qps * sizeof(*func_to_io->rq_wq); + func_to_io->rq_wq = devm_kzalloc(&pdev->dev, wq_size, GFP_KERNEL); + if (!func_to_io->rq_wq) { + err = -ENOMEM; + goto err_rq_wq; + } + for (i = 0; i < num_qps; i++) { err = init_qp(func_to_io, &func_to_io->qps[i], i, &sq_msix_entries[i], &rq_msix_entries[i]); @@ -93,6 +137,12 @@ err_init_qp: for (j = 0; j < i; j++) destroy_qp(func_to_io, &func_to_io->qps[j]); + devm_kfree(&pdev->dev, func_to_io->rq_wq); + +err_rq_wq: + devm_kfree(&pdev->dev, func_to_io->sq_wq); + +err_sq_wq: devm_kfree(&pdev->dev, func_to_io->qps); return err; } @@ -111,6 +161,9 @@ void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) for (i = 0; i < num_qps; i++) destroy_qp(func_to_io, &func_to_io->qps[i]); + devm_kfree(&pdev->dev, func_to_io->rq_wq); + devm_kfree(&pdev->dev, func_to_io->sq_wq); + devm_kfree(&pdev->dev, func_to_io->qps); } @@ -128,10 +181,19 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io, struct hinic_hwif *hwif, u16 max_qps, int num_ceqs, struct msix_entry *ceq_msix_entries) { + struct pci_dev *pdev = hwif->pdev; + int err; + func_to_io->hwif = hwif; func_to_io->qps = NULL; func_to_io->max_qps = max_qps; + err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); + if (err) { + dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); + return err; + } + return 0; } @@ -141,4 +203,5 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io, **/ void hinic_io_free(struct hinic_func_to_io *func_to_io) { + hinic_wqs_free(&func_to_io->wqs); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h index 7cdcffd4cf77..6cacb8eddd2e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -20,11 +20,17 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_wq.h" #include "hinic_hw_qp.h" struct hinic_func_to_io { struct hinic_hwif *hwif; + struct hinic_wqs wqs; + + struct hinic_wq *sq_wq; + struct hinic_wq *rq_wq; + struct hinic_qp *qps; u16 max_qps; }; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index 64330fb6f10f..4031728a2281 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -16,6 +16,18 @@ #ifndef HINIC_HW_QP_H #define HINIC_HW_QP_H +#include +#include + +#define HINIC_SQ_WQEBB_SIZE 64 +#define HINIC_RQ_WQEBB_SIZE 32 + +#define HINIC_SQ_PAGE_SIZE SZ_4K +#define HINIC_RQ_PAGE_SIZE SZ_4K + +#define HINIC_SQ_DEPTH SZ_4K +#define HINIC_RQ_DEPTH SZ_4K + struct hinic_sq { /* should be implemented */ }; @@ -27,6 +39,8 @@ struct hinic_rq { struct hinic_qp { struct hinic_sq sq; struct hinic_rq rq; + + u16 q_id; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c new file mode 100644 index 000000000000..fc72b76e744b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -0,0 +1,516 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_wq.h" + +#define WQS_BLOCKS_PER_PAGE 4 + +#define WQ_BLOCK_SIZE 4096 +#define WQS_PAGE_SIZE (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE) + +#define WQS_MAX_NUM_BLOCKS 128 +#define WQS_FREE_BLOCKS_SIZE(wqs) (WQS_MAX_NUM_BLOCKS * \ + sizeof((wqs)->free_blocks[0])) + +#define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) + +#define WQ_PAGE_ADDR_SIZE sizeof(u64) +#define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) + +#define WQ_BASE_VADDR(wqs, wq) \ + ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_PADDR(wqs, wq) \ + ((wqs)->page_paddr[(wq)->page_idx] \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +#define WQ_BASE_ADDR(wqs, wq) \ + ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ + + (wq)->block_idx * WQ_BLOCK_SIZE) + +/** + * queue_alloc_page - allocate page for Queue + * @hwif: HW interface for allocating DMA + * @vaddr: virtual address will be returned in this address + * @paddr: physical address will be returned in this address + * @shadow_vaddr: VM area will be return here for holding WQ page addresses + * @page_sz: page size of each WQ page + * + * Return 0 - Success, negative - Failure + **/ +static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr, + void ***shadow_vaddr, size_t page_sz) +{ + struct pci_dev *pdev = hwif->pdev; + dma_addr_t dma_addr; + + *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr, + GFP_KERNEL); + if (!*vaddr) { + dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n"); + return -ENOMEM; + } + + *paddr = (u64)dma_addr; + + /* use vzalloc for big mem */ + *shadow_vaddr = vzalloc(page_sz); + if (!*shadow_vaddr) + goto err_shadow_vaddr; + + return 0; + +err_shadow_vaddr: + dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr); + return -ENOMEM; +} + +/** + * wqs_allocate_page - allocate page for WQ set + * @wqs: Work Queue Set + * @page_idx: the page index of the page will be allocated + * + * Return 0 - Success, negative - Failure + **/ +static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx) +{ + return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx], + &wqs->page_paddr[page_idx], + &wqs->shadow_page_vaddr[page_idx], + WQS_PAGE_SIZE); +} + +/** + * wqs_free_page - free page of WQ set + * @wqs: Work Queue Set + * @page_idx: the page index of the page will be freed + **/ +static void wqs_free_page(struct hinic_wqs *wqs, int page_idx) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE, + wqs->page_vaddr[page_idx], + (dma_addr_t)wqs->page_paddr[page_idx]); + vfree(wqs->shadow_page_vaddr[page_idx]); +} + +static int alloc_page_arrays(struct hinic_wqs *wqs) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t size; + + size = wqs->num_pages * sizeof(*wqs->page_paddr); + wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wqs->page_paddr) + return -ENOMEM; + + size = wqs->num_pages * sizeof(*wqs->page_vaddr); + wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wqs->page_vaddr) + goto err_page_vaddr; + + size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr); + wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wqs->shadow_page_vaddr) + goto err_page_shadow_vaddr; + + return 0; + +err_page_shadow_vaddr: + devm_kfree(&pdev->dev, wqs->page_vaddr); + +err_page_vaddr: + devm_kfree(&pdev->dev, wqs->page_paddr); + return -ENOMEM; +} + +static void free_page_arrays(struct hinic_wqs *wqs) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + devm_kfree(&pdev->dev, wqs->shadow_page_vaddr); + devm_kfree(&pdev->dev, wqs->page_vaddr); + devm_kfree(&pdev->dev, wqs->page_paddr); +} + +static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx, + int *block_idx) +{ + int pos; + + down(&wqs->alloc_blocks_lock); + + wqs->num_free_blks--; + + if (wqs->num_free_blks < 0) { + wqs->num_free_blks++; + up(&wqs->alloc_blocks_lock); + return -ENOMEM; + } + + pos = wqs->alloc_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + *page_idx = wqs->free_blocks[pos].page_idx; + *block_idx = wqs->free_blocks[pos].block_idx; + + wqs->free_blocks[pos].page_idx = -1; + wqs->free_blocks[pos].block_idx = -1; + + up(&wqs->alloc_blocks_lock); + return 0; +} + +static void wqs_return_block(struct hinic_wqs *wqs, int page_idx, + int block_idx) +{ + int pos; + + down(&wqs->alloc_blocks_lock); + + pos = wqs->return_blk_pos++; + pos &= WQS_MAX_NUM_BLOCKS - 1; + + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = block_idx; + + wqs->num_free_blks++; + + up(&wqs->alloc_blocks_lock); +} + +static void init_wqs_blocks_arr(struct hinic_wqs *wqs) +{ + int page_idx, blk_idx, pos = 0; + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) { + wqs->free_blocks[pos].page_idx = page_idx; + wqs->free_blocks[pos].block_idx = blk_idx; + pos++; + } + } + + wqs->alloc_blk_pos = 0; + wqs->return_blk_pos = pos; + wqs->num_free_blks = pos; + + sema_init(&wqs->alloc_blocks_lock, 1); +} + +/** + * hinic_wqs_alloc - allocate Work Queues set + * @wqs: Work Queue Set + * @max_wqs: maximum wqs to allocate + * @hwif: HW interface for use for the allocation + * + * Return 0 - Success, negative - Failure + **/ +int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs, + struct hinic_hwif *hwif) +{ + struct pci_dev *pdev = hwif->pdev; + int err, i, page_idx; + + max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE); + if (max_wqs > WQS_MAX_NUM_BLOCKS) { + dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs); + return -EINVAL; + } + + wqs->hwif = hwif; + wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE; + + if (alloc_page_arrays(wqs)) { + dev_err(&pdev->dev, + "Failed to allocate mem for page addresses\n"); + return -ENOMEM; + } + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) { + err = wqs_allocate_page(wqs, page_idx); + if (err) { + dev_err(&pdev->dev, "Failed wq page allocation\n"); + goto err_wq_allocate_page; + } + } + + wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs), + GFP_KERNEL); + if (!wqs->free_blocks) { + err = -ENOMEM; + goto err_alloc_blocks; + } + + init_wqs_blocks_arr(wqs); + return 0; + +err_alloc_blocks: +err_wq_allocate_page: + for (i = 0; i < page_idx; i++) + wqs_free_page(wqs, i); + + free_page_arrays(wqs); + return err; +} + +/** + * hinic_wqs_free - free Work Queues set + * @wqs: Work Queue Set + **/ +void hinic_wqs_free(struct hinic_wqs *wqs) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + int page_idx; + + devm_kfree(&pdev->dev, wqs->free_blocks); + + for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) + wqs_free_page(wqs, page_idx); + + free_page_arrays(wqs); +} + +/** + * alloc_wqes_shadow - allocate WQE shadows for WQ + * @wq: WQ to allocate shadows for + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_wqes_shadow(struct hinic_wq *wq) +{ + struct hinic_hwif *hwif = wq->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t size; + + size = wq->num_q_pages * wq->max_wqe_size; + wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wq->shadow_wqe) + return -ENOMEM; + + size = wq->num_q_pages * sizeof(wq->prod_idx); + wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!wq->shadow_idx) + goto err_shadow_idx; + + return 0; + +err_shadow_idx: + devm_kfree(&pdev->dev, wq->shadow_wqe); + return -ENOMEM; +} + +/** + * free_wqes_shadow - free WQE shadows of WQ + * @wq: WQ to free shadows from + **/ +static void free_wqes_shadow(struct hinic_wq *wq) +{ + struct hinic_hwif *hwif = wq->hwif; + struct pci_dev *pdev = hwif->pdev; + + devm_kfree(&pdev->dev, wq->shadow_idx); + devm_kfree(&pdev->dev, wq->shadow_wqe); +} + +/** + * free_wq_pages - free pages of WQ + * @hwif: HW interface for releasing dma addresses + * @wq: WQ to free pages from + * @num_q_pages: number pages to free + **/ +static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, + int num_q_pages) +{ + struct pci_dev *pdev = hwif->pdev; + int i; + + for (i = 0; i < num_q_pages; i++) { + void **vaddr = &wq->shadow_block_vaddr[i]; + u64 *paddr = &wq->block_vaddr[i]; + dma_addr_t dma_addr; + + dma_addr = (dma_addr_t)be64_to_cpu(*paddr); + dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr, + dma_addr); + } + + free_wqes_shadow(wq); +} + +/** + * alloc_wq_pages - alloc pages for WQ + * @hwif: HW interface for allocating dma addresses + * @wq: WQ to allocate pages for + * @max_pages: maximum pages allowed + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, + int max_pages) +{ + struct pci_dev *pdev = hwif->pdev; + int i, err, num_q_pages; + + num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; + if (num_q_pages > max_pages) { + dev_err(&pdev->dev, "Number wq pages exceeds the limit\n"); + return -EINVAL; + } + + if (num_q_pages & (num_q_pages - 1)) { + dev_err(&pdev->dev, "Number wq pages must be power of 2\n"); + return -EINVAL; + } + + wq->num_q_pages = num_q_pages; + + err = alloc_wqes_shadow(wq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate wqe shadow\n"); + return err; + } + + for (i = 0; i < num_q_pages; i++) { + void **vaddr = &wq->shadow_block_vaddr[i]; + u64 *paddr = &wq->block_vaddr[i]; + dma_addr_t dma_addr; + + *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size, + &dma_addr, GFP_KERNEL); + if (!*vaddr) { + dev_err(&pdev->dev, "Failed to allocate wq page\n"); + goto err_alloc_wq_pages; + } + + /* HW uses Big Endian Format */ + *paddr = cpu_to_be64(dma_addr); + } + + return 0; + +err_alloc_wq_pages: + free_wq_pages(wq, hwif, i); + return -ENOMEM; +} + +/** + * hinic_wq_allocate - Allocate the WQ resources from the WQS + * @wqs: WQ set from which to allocate the WQ resources + * @wq: WQ to allocate resources for it from the WQ set + * @wqebb_size: Work Queue Block Byte Size + * @wq_page_size: the page size in the Work Queue + * @q_depth: number of wqebbs in WQ + * @max_wqe_size: maximum WQE size that will be used in the WQ + * + * Return 0 - Success, negative - Failure + **/ +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u16 wqebb_size, u16 wq_page_size, u16 q_depth, + u16 max_wqe_size) +{ + struct hinic_hwif *hwif = wqs->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 num_wqebbs_per_page; + int err; + + if (wqebb_size == 0) { + dev_err(&pdev->dev, "wqebb_size must be > 0\n"); + return -EINVAL; + } + + if (wq_page_size == 0) { + dev_err(&pdev->dev, "wq_page_size must be > 0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); + return -EINVAL; + } + + wq->hwif = hwif; + + err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); + if (err) { + dev_err(&pdev->dev, "Failed to get free wqs next block\n"); + return err; + } + + wq->wqebb_size = wqebb_size; + wq->wq_page_size = wq_page_size; + wq->q_depth = q_depth; + wq->max_wqe_size = max_wqe_size; + wq->num_wqebbs_per_page = num_wqebbs_per_page; + + wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); + wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); + wq->block_paddr = WQ_BASE_PADDR(wqs, wq); + + err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES); + if (err) { + dev_err(&pdev->dev, "Failed to allocate wq pages\n"); + goto err_alloc_wq_pages; + } + + atomic_set(&wq->cons_idx, 0); + atomic_set(&wq->prod_idx, 0); + atomic_set(&wq->delta, q_depth); + wq->mask = q_depth - 1; + + return 0; + +err_alloc_wq_pages: + wqs_return_block(wqs, wq->page_idx, wq->block_idx); + return err; +} + +/** + * hinic_wq_free - Free the WQ resources to the WQS + * @wqs: WQ set to free the WQ resources to it + * @wq: WQ to free its resources to the WQ set resources + **/ +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) +{ + free_wq_pages(wq, wqs->hwif, wq->num_q_pages); + + wqs_return_block(wqs, wq->page_idx, wq->block_idx); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h new file mode 100644 index 000000000000..7c114daf13ef --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -0,0 +1,86 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_WQ_H +#define HINIC_HW_WQ_H + +#include +#include +#include + +#include "hinic_hw_if.h" + +struct hinic_free_block { + int page_idx; + int block_idx; +}; + +struct hinic_wq { + struct hinic_hwif *hwif; + + int page_idx; + int block_idx; + + u16 wqebb_size; + u16 wq_page_size; + u16 q_depth; + u16 max_wqe_size; + u16 num_wqebbs_per_page; + + /* The addresses are 64 bit in the HW */ + u64 block_paddr; + void **shadow_block_vaddr; + u64 *block_vaddr; + + int num_q_pages; + u8 *shadow_wqe; + u16 *shadow_idx; + + atomic_t cons_idx; + atomic_t prod_idx; + atomic_t delta; + u16 mask; +}; + +struct hinic_wqs { + struct hinic_hwif *hwif; + int num_pages; + + /* The addresses are 64 bit in the HW */ + u64 *page_paddr; + u64 **page_vaddr; + void ***shadow_page_vaddr; + + struct hinic_free_block *free_blocks; + int alloc_blk_pos; + int return_blk_pos; + int num_free_blks; + + /* Lock for getting a free block from the WQ set */ + struct semaphore alloc_blocks_lock; +}; + +int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, + struct hinic_hwif *hwif); + +void hinic_wqs_free(struct hinic_wqs *wqs); + +int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, + u16 wqebb_size, u16 wq_page_size, u16 q_depth, + u16 max_wqe_size); + +void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h new file mode 100644 index 000000000000..d727c4dc9d6c --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h @@ -0,0 +1,253 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_WQE_H +#define HINIC_HW_WQE_H + +#include "hinic_common.h" + +#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 +#define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 +#define HINIC_SQ_CTRL_LEN_SHIFT 29 + +#define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF +#define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F +#define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 +#define HINIC_SQ_CTRL_LEN_MASK 0x3 + +#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 + +#define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF + +#define HINIC_SQ_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \ + << HINIC_SQ_CTRL_##member##_SHIFT) + +#define HINIC_SQ_CTRL_GET(val, member) \ + (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \ + & HINIC_SQ_CTRL_##member##_MASK) + +#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 +#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8 +#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 +#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_SHIFT 12 +#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_SHIFT 13 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO0_TSO_FLAG_SHIFT 15 +#define HINIC_SQ_TASK_INFO0_VLAN_TAG_SHIFT 16 + +#define HINIC_SQ_TASK_INFO0_L2HDR_LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_MASK 0x3 +#define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_MASK 0x3 +#define HINIC_SQ_TASK_INFO0_VLAN_OFFLOAD_MASK 0x1 +#define HINIC_SQ_TASK_INFO0_PARSE_FLAG_MASK 0x1 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO0_TSO_FLAG_MASK 0x1 +#define HINIC_SQ_TASK_INFO0_VLAN_TAG_MASK 0xFFFF + +#define HINIC_SQ_TASK_INFO0_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO0_##member##_MASK) << \ + HINIC_SQ_TASK_INFO0_##member##_SHIFT) + +/* 8 bits reserved */ +#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8 +#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16 +#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24 + +/* 8 bits reserved */ +#define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF +#define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF +#define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF + +#define HINIC_SQ_TASK_INFO1_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \ + HINIC_SQ_TASK_INFO1_##member##_SHIFT) + +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0 +#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12 +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22 +/* 8 bits reserved */ + +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF +#define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F +#define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3 +/* 1 bit reserved */ +#define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3 +/* 8 bits reserved */ + +#define HINIC_SQ_TASK_INFO2_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO2_##member##_MASK) << \ + HINIC_SQ_TASK_INFO2_##member##_SHIFT) + +/* 31 bits reserved */ +#define HINIC_SQ_TASK_INFO4_L2TYPE_SHIFT 31 + +/* 31 bits reserved */ +#define HINIC_SQ_TASK_INFO4_L2TYPE_MASK 0x1 + +#define HINIC_SQ_TASK_INFO4_SET(val, member) \ + (((u32)(val) & HINIC_SQ_TASK_INFO4_##member##_MASK) << \ + HINIC_SQ_TASK_INFO4_##member##_SHIFT) + +#define HINIC_RQ_CQE_STATUS_RXDONE_SHIFT 31 + +#define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1 + +#define HINIC_RQ_CQE_STATUS_GET(val, member) \ + (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \ + HINIC_RQ_CQE_STATUS_##member##_MASK) + +#define HINIC_RQ_CQE_STATUS_CLEAR(val, member) \ + ((val) & (~(HINIC_RQ_CQE_STATUS_##member##_MASK << \ + HINIC_RQ_CQE_STATUS_##member##_SHIFT))) + +#define HINIC_RQ_CQE_SGE_LEN_SHIFT 16 + +#define HINIC_RQ_CQE_SGE_LEN_MASK 0xFFFF + +#define HINIC_RQ_CQE_SGE_GET(val, member) \ + (((val) >> HINIC_RQ_CQE_SGE_##member##_SHIFT) & \ + HINIC_RQ_CQE_SGE_##member##_MASK) + +#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 +#define HINIC_RQ_CTRL_COMPLETE_FORMAT_SHIFT 15 +#define HINIC_RQ_CTRL_COMPLETE_LEN_SHIFT 27 +#define HINIC_RQ_CTRL_LEN_SHIFT 29 + +#define HINIC_RQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF +#define HINIC_RQ_CTRL_COMPLETE_FORMAT_MASK 0x1 +#define HINIC_RQ_CTRL_COMPLETE_LEN_MASK 0x3 +#define HINIC_RQ_CTRL_LEN_MASK 0x3 + +#define HINIC_RQ_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTRL_##member##_MASK) << \ + HINIC_RQ_CTRL_##member##_SHIFT) + +#define HINIC_SQ_WQE_SIZE(nr_sges) \ + (sizeof(struct hinic_sq_ctrl) + \ + sizeof(struct hinic_sq_task) + \ + (nr_sges) * sizeof(struct hinic_sq_bufdesc)) + +#define HINIC_MAX_SQ_BUFDESCS 17 + +#define HINIC_SQ_WQE_MAX_SIZE 320 +#define HINIC_RQ_WQE_SIZE 32 + +enum hinic_l4offload_type { + HINIC_L4_OFF_DISABLE = 0, + HINIC_TCP_OFFLOAD_ENABLE = 1, + HINIC_SCTP_OFFLOAD_ENABLE = 2, + HINIC_UDP_OFFLOAD_ENABLE = 3, +}; + +enum hinic_vlan_offload { + HINIC_VLAN_OFF_DISABLE = 0, + HINIC_VLAN_OFF_ENABLE = 1, +}; + +enum hinic_pkt_parsed { + HINIC_PKT_NOT_PARSED = 0, + HINIC_PKT_PARSED = 1, +}; + +enum hinic_outer_l3type { + HINIC_OUTER_L3TYPE_UNKNOWN = 0, + HINIC_OUTER_L3TYPE_IPV6 = 1, + HINIC_OUTER_L3TYPE_IPV4_NO_CHKSUM = 2, + HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3, +}; + +enum hinic_media_type { + HINIC_MEDIA_UNKNOWN = 0, +}; + +enum hinic_l2type { + HINIC_L2TYPE_ETH = 0, +}; + +enum hinc_tunnel_l4type { + HINIC_TUNNEL_L4TYPE_UNKNOWN = 0, +}; + +struct hinic_sq_ctrl { + u32 ctrl_info; + u32 queue_info; +}; + +struct hinic_sq_task { + u32 pkt_info0; + u32 pkt_info1; + u32 pkt_info2; + u32 ufo_v6_identify; + u32 pkt_info4; + u32 zero_pad; +}; + +struct hinic_sq_bufdesc { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_sq_wqe { + struct hinic_sq_ctrl ctrl; + struct hinic_sq_task task; + struct hinic_sq_bufdesc buf_descs[HINIC_MAX_SQ_BUFDESCS]; +}; + +struct hinic_rq_cqe { + u32 status; + u32 len; + + u32 rsvd2; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; + u32 rsvd6; + u32 rsvd7; +}; + +struct hinic_rq_ctrl { + u32 ctrl_info; +}; + +struct hinic_rq_cqe_sect { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_rq_bufdesc { + u32 hi_addr; + u32 lo_addr; +}; + +struct hinic_rq_wqe { + struct hinic_rq_ctrl ctrl; + u32 rsvd; + struct hinic_rq_cqe_sect cqe_sect; + struct hinic_rq_bufdesc buf_desc; +}; + +struct hinic_hw_wqe { + /* HW Format */ + union { + struct hinic_sq_wqe sq_wqe; + struct hinic_rq_wqe rq_wqe; + }; +}; + +#endif -- cgit v1.2.3-55-g7522 From f91090f7da3a215e3cf8f678ab71ad65d1d627a1 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:58 +0800 Subject: net-next/hinic: Add qp resources Create the resources for queue pair operations: doorbell area, consumer index address and producer index address. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Makefile | 4 +- drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 1 + drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 164 ++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 27 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 266 ++++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 50 ++++- 6 files changed, 507 insertions(+), 5 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index 0575a3452692..84815f7c28dc 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_HINIC) += hinic.o hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ - hinic_hw_io.o hinic_hw_wq.o hinic_hw_mgmt.o hinic_hw_api_cmd.o \ - hinic_hw_eqs.o hinic_hw_if.o + hinic_hw_io.o hinic_hw_qp.o hinic_hw_wq.o hinic_hw_mgmt.o \ + hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index 228069895f3b..8f5919540c19 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -137,6 +137,7 @@ #define HINIC_IS_PPF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PPF) #define HINIC_PCI_CFG_REGS_BAR 0 +#define HINIC_PCI_DB_BAR 4 #define HINIC_PCIE_ST_DISABLE 0 #define HINIC_PCIE_AT_DISABLE 0 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index 1bf944e5aa28..ad12cc77dc5c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -13,11 +13,16 @@ * */ +#include #include #include #include #include #include +#include +#include +#include +#include #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" @@ -25,6 +30,76 @@ #include "hinic_hw_qp.h" #include "hinic_hw_io.h" +#define CI_Q_ADDR_SIZE sizeof(u32) + +#define CI_ADDR(base_addr, q_id) ((base_addr) + \ + (q_id) * CI_Q_ADDR_SIZE) + +#define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) + +#define DB_IDX(db, db_base) \ + (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) + +static void init_db_area_idx(struct hinic_free_db_area *free_db_area) +{ + int i; + + for (i = 0; i < HINIC_DB_MAX_AREAS; i++) + free_db_area->db_idx[i] = i; + + free_db_area->alloc_pos = 0; + free_db_area->return_pos = HINIC_DB_MAX_AREAS; + + free_db_area->num_free = HINIC_DB_MAX_AREAS; + + sema_init(&free_db_area->idx_lock, 1); +} + +static void __iomem *get_db_area(struct hinic_func_to_io *func_to_io) +{ + struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; + int pos, idx; + + down(&free_db_area->idx_lock); + + free_db_area->num_free--; + + if (free_db_area->num_free < 0) { + free_db_area->num_free++; + up(&free_db_area->idx_lock); + return ERR_PTR(-ENOMEM); + } + + pos = free_db_area->alloc_pos++; + pos &= HINIC_DB_MAX_AREAS - 1; + + idx = free_db_area->db_idx[pos]; + + free_db_area->db_idx[pos] = -1; + + up(&free_db_area->idx_lock); + + return func_to_io->db_base + idx * HINIC_DB_PAGE_SIZE; +} + +static void return_db_area(struct hinic_func_to_io *func_to_io, + void __iomem *db_base) +{ + struct hinic_free_db_area *free_db_area = &func_to_io->free_db_area; + int pos, idx = DB_IDX(db_base, func_to_io->db_base); + + down(&free_db_area->idx_lock); + + pos = free_db_area->return_pos++; + pos &= HINIC_DB_MAX_AREAS - 1; + + free_db_area->db_idx[pos] = idx; + + free_db_area->num_free++; + + up(&free_db_area->idx_lock); +} + /** * init_qp - Initialize a Queue Pair * @func_to_io: func to io channel that holds the IO components @@ -42,6 +117,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io, { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; + void __iomem *db_base; int err; qp->q_id = q_id; @@ -62,8 +138,42 @@ static int init_qp(struct hinic_func_to_io *func_to_io, goto err_rq_alloc; } + db_base = get_db_area(func_to_io); + if (IS_ERR(db_base)) { + dev_err(&pdev->dev, "Failed to get DB area for SQ\n"); + err = PTR_ERR(db_base); + goto err_get_db; + } + + func_to_io->sq_db[q_id] = db_base; + + err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], + sq_msix_entry, + CI_ADDR(func_to_io->ci_addr_base, q_id), + CI_ADDR(func_to_io->ci_dma_base, q_id), db_base); + if (err) { + dev_err(&pdev->dev, "Failed to init SQ\n"); + goto err_sq_init; + } + + err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id], + rq_msix_entry); + if (err) { + dev_err(&pdev->dev, "Failed to init RQ\n"); + goto err_rq_init; + } + return 0; +err_rq_init: + hinic_clean_sq(&qp->sq); + +err_sq_init: + return_db_area(func_to_io, db_base); + +err_get_db: + hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); + err_rq_alloc: hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); return err; @@ -79,6 +189,11 @@ static void destroy_qp(struct hinic_func_to_io *func_to_io, { int q_id = qp->q_id; + hinic_clean_rq(&qp->rq); + hinic_clean_sq(&qp->sq); + + return_db_area(func_to_io, func_to_io->sq_db[q_id]); + hinic_wq_free(&func_to_io->wqs, &func_to_io->rq_wq[q_id]); hinic_wq_free(&func_to_io->wqs, &func_to_io->sq_wq[q_id]); } @@ -100,7 +215,8 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; - size_t qps_size, wq_size; + size_t qps_size, wq_size, db_size; + void *ci_addr_base; int i, j, err; qps_size = num_qps * sizeof(*func_to_io->qps); @@ -122,6 +238,24 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, goto err_rq_wq; } + db_size = num_qps * sizeof(*func_to_io->sq_db); + func_to_io->sq_db = devm_kzalloc(&pdev->dev, db_size, GFP_KERNEL); + if (!func_to_io->sq_db) { + err = -ENOMEM; + goto err_sq_db; + } + + ci_addr_base = dma_zalloc_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), + &func_to_io->ci_dma_base, + GFP_KERNEL); + if (!ci_addr_base) { + dev_err(&pdev->dev, "Failed to allocate CI area\n"); + err = -ENOMEM; + goto err_ci_base; + } + + func_to_io->ci_addr_base = ci_addr_base; + for (i = 0; i < num_qps; i++) { err = init_qp(func_to_io, &func_to_io->qps[i], i, &sq_msix_entries[i], &rq_msix_entries[i]); @@ -137,6 +271,13 @@ err_init_qp: for (j = 0; j < i; j++) destroy_qp(func_to_io, &func_to_io->qps[j]); + dma_free_coherent(&pdev->dev, CI_TABLE_SIZE(num_qps), + func_to_io->ci_addr_base, func_to_io->ci_dma_base); + +err_ci_base: + devm_kfree(&pdev->dev, func_to_io->sq_db); + +err_sq_db: devm_kfree(&pdev->dev, func_to_io->rq_wq); err_rq_wq: @@ -156,11 +297,19 @@ void hinic_io_destroy_qps(struct hinic_func_to_io *func_to_io, int num_qps) { struct hinic_hwif *hwif = func_to_io->hwif; struct pci_dev *pdev = hwif->pdev; + size_t ci_table_size; int i; + ci_table_size = CI_TABLE_SIZE(num_qps); + for (i = 0; i < num_qps; i++) destroy_qp(func_to_io, &func_to_io->qps[i]); + dma_free_coherent(&pdev->dev, ci_table_size, func_to_io->ci_addr_base, + func_to_io->ci_dma_base); + + devm_kfree(&pdev->dev, func_to_io->sq_db); + devm_kfree(&pdev->dev, func_to_io->rq_wq); devm_kfree(&pdev->dev, func_to_io->sq_wq); @@ -194,7 +343,19 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io, return err; } + func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); + if (!func_to_io->db_base) { + dev_err(&pdev->dev, "Failed to remap IO DB area\n"); + err = -ENOMEM; + goto err_db_ioremap; + } + + init_db_area_idx(&func_to_io->free_db_area); return 0; + +err_db_ioremap: + hinic_wqs_free(&func_to_io->wqs); + return err; } /** @@ -203,5 +364,6 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io, **/ void hinic_io_free(struct hinic_func_to_io *func_to_io) { + iounmap(func_to_io->db_base); hinic_wqs_free(&func_to_io->wqs); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h index 6cacb8eddd2e..2d85a38a1df6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -18,11 +18,30 @@ #include #include +#include +#include #include "hinic_hw_if.h" #include "hinic_hw_wq.h" #include "hinic_hw_qp.h" +#define HINIC_DB_PAGE_SIZE SZ_4K +#define HINIC_DB_SIZE SZ_4M + +#define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE) + +struct hinic_free_db_area { + int db_idx[HINIC_DB_MAX_AREAS]; + + int alloc_pos; + int return_pos; + + int num_free; + + /* Lock for getting db area */ + struct semaphore idx_lock; +}; + struct hinic_func_to_io { struct hinic_hwif *hwif; @@ -33,6 +52,14 @@ struct hinic_func_to_io { struct hinic_qp *qps; u16 max_qps; + + void __iomem **sq_db; + void __iomem *db_base; + + void *ci_addr_base; + dma_addr_t ci_dma_base; + + struct hinic_free_db_area free_db_area; }; int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c new file mode 100644 index 000000000000..2b77b592e90d --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -0,0 +1,266 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_wq.h" +#include "hinic_hw_qp.h" + +#define SQ_DB_OFF SZ_2K + +/** + * alloc_sq_skb_arr - allocate sq array for saved skb + * @sq: HW Send Queue + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_sq_skb_arr(struct hinic_sq *sq) +{ + struct hinic_wq *wq = sq->wq; + size_t skb_arr_size; + + skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); + sq->saved_skb = vzalloc(skb_arr_size); + if (!sq->saved_skb) + return -ENOMEM; + + return 0; +} + +/** + * free_sq_skb_arr - free sq array for saved skb + * @sq: HW Send Queue + **/ +static void free_sq_skb_arr(struct hinic_sq *sq) +{ + vfree(sq->saved_skb); +} + +/** + * alloc_rq_skb_arr - allocate rq array for saved skb + * @rq: HW Receive Queue + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_rq_skb_arr(struct hinic_rq *rq) +{ + struct hinic_wq *wq = rq->wq; + size_t skb_arr_size; + + skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); + rq->saved_skb = vzalloc(skb_arr_size); + if (!rq->saved_skb) + return -ENOMEM; + + return 0; +} + +/** + * free_rq_skb_arr - free rq array for saved skb + * @rq: HW Receive Queue + **/ +static void free_rq_skb_arr(struct hinic_rq *rq) +{ + vfree(rq->saved_skb); +} + +/** + * hinic_init_sq - Initialize HW Send Queue + * @sq: HW Send Queue + * @hwif: HW Interface for accessing HW + * @wq: Work Queue for the data of the SQ + * @entry: msix entry for sq + * @ci_addr: address for reading the current HW consumer index + * @ci_dma_addr: dma address for reading the current HW consumer index + * @db_base: doorbell base address + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry, + void *ci_addr, dma_addr_t ci_dma_addr, + void __iomem *db_base) +{ + sq->hwif = hwif; + + sq->wq = wq; + + sq->irq = entry->vector; + sq->msix_entry = entry->entry; + + sq->hw_ci_addr = ci_addr; + sq->hw_ci_dma_addr = ci_dma_addr; + + sq->db_base = db_base + SQ_DB_OFF; + + return alloc_sq_skb_arr(sq); +} + +/** + * hinic_clean_sq - Clean HW Send Queue's Resources + * @sq: Send Queue + **/ +void hinic_clean_sq(struct hinic_sq *sq) +{ + free_sq_skb_arr(sq); +} + +/** + * alloc_rq_cqe - allocate rq completion queue elements + * @rq: HW Receive Queue + * + * Return 0 - Success, negative - Failure + **/ +static int alloc_rq_cqe(struct hinic_rq *rq) +{ + struct hinic_hwif *hwif = rq->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t cqe_dma_size, cqe_size; + struct hinic_wq *wq = rq->wq; + int j, i; + + cqe_size = wq->q_depth * sizeof(*rq->cqe); + rq->cqe = vzalloc(cqe_size); + if (!rq->cqe) + return -ENOMEM; + + cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); + rq->cqe_dma = vzalloc(cqe_dma_size); + if (!rq->cqe_dma) + goto err_cqe_dma_arr_alloc; + + for (i = 0; i < wq->q_depth; i++) { + rq->cqe[i] = dma_zalloc_coherent(&pdev->dev, + sizeof(*rq->cqe[i]), + &rq->cqe_dma[i], GFP_KERNEL); + if (!rq->cqe[i]) + goto err_cqe_alloc; + } + + return 0; + +err_cqe_alloc: + for (j = 0; j < i; j++) + dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j], + rq->cqe_dma[j]); + + vfree(rq->cqe_dma); + +err_cqe_dma_arr_alloc: + vfree(rq->cqe); + return -ENOMEM; +} + +/** + * free_rq_cqe - free rq completion queue elements + * @rq: HW Receive Queue + **/ +static void free_rq_cqe(struct hinic_rq *rq) +{ + struct hinic_hwif *hwif = rq->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_wq *wq = rq->wq; + int i; + + for (i = 0; i < wq->q_depth; i++) + dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i], + rq->cqe_dma[i]); + + vfree(rq->cqe_dma); + vfree(rq->cqe); +} + +/** + * hinic_init_rq - Initialize HW Receive Queue + * @rq: HW Receive Queue + * @hwif: HW Interface for accessing HW + * @wq: Work Queue for the data of the RQ + * @entry: msix entry for rq + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry) +{ + struct pci_dev *pdev = hwif->pdev; + size_t pi_size; + int err; + + rq->hwif = hwif; + + rq->wq = wq; + + rq->irq = entry->vector; + rq->msix_entry = entry->entry; + + rq->buf_sz = HINIC_RX_BUF_SZ; + + err = alloc_rq_skb_arr(rq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate rq priv data\n"); + return err; + } + + err = alloc_rq_cqe(rq); + if (err) { + dev_err(&pdev->dev, "Failed to allocate rq cqe\n"); + goto err_alloc_rq_cqe; + } + + /* HW requirements: Must be at least 32 bit */ + pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); + rq->pi_virt_addr = dma_zalloc_coherent(&pdev->dev, pi_size, + &rq->pi_dma_addr, GFP_KERNEL); + if (!rq->pi_virt_addr) { + dev_err(&pdev->dev, "Failed to allocate PI address\n"); + err = -ENOMEM; + goto err_pi_virt; + } + + return 0; + +err_pi_virt: + free_rq_cqe(rq); + +err_alloc_rq_cqe: + free_rq_skb_arr(rq); + return err; +} + +/** + * hinic_clean_rq - Clean HW Receive Queue's Resources + * @rq: HW Receive Queue + **/ +void hinic_clean_rq(struct hinic_rq *rq) +{ + struct hinic_hwif *hwif = rq->hwif; + struct pci_dev *pdev = hwif->pdev; + size_t pi_size; + + pi_size = ALIGN(sizeof(*rq->pi_virt_addr), sizeof(u32)); + dma_free_coherent(&pdev->dev, pi_size, rq->pi_virt_addr, + rq->pi_dma_addr); + + free_rq_cqe(rq); + free_rq_skb_arr(rq); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index 4031728a2281..c5ec30dcda96 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -18,6 +18,12 @@ #include #include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" #define HINIC_SQ_WQEBB_SIZE 64 #define HINIC_RQ_WQEBB_SIZE 32 @@ -28,12 +34,41 @@ #define HINIC_SQ_DEPTH SZ_4K #define HINIC_RQ_DEPTH SZ_4K +#define HINIC_RX_BUF_SZ 2048 + struct hinic_sq { - /* should be implemented */ + struct hinic_hwif *hwif; + + struct hinic_wq *wq; + + u32 irq; + u16 msix_entry; + + void *hw_ci_addr; + dma_addr_t hw_ci_dma_addr; + + void __iomem *db_base; + + struct sk_buff **saved_skb; }; struct hinic_rq { - /* should be implemented */ + struct hinic_hwif *hwif; + + struct hinic_wq *wq; + + u32 irq; + u16 msix_entry; + + size_t buf_sz; + + struct sk_buff **saved_skb; + + struct hinic_rq_cqe **cqe; + dma_addr_t *cqe_dma; + + u16 *pi_virt_addr; + dma_addr_t pi_dma_addr; }; struct hinic_qp { @@ -43,4 +78,15 @@ struct hinic_qp { u16 q_id; }; +int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, + dma_addr_t ci_dma_addr, void __iomem *db_base); + +void hinic_clean_sq(struct hinic_sq *sq); + +int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, + struct hinic_wq *wq, struct msix_entry *entry); + +void hinic_clean_rq(struct hinic_rq *rq); + #endif -- cgit v1.2.3-55-g7522 From 53e7d6feb949b4df542897ab13a33fe484a45c72 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:55:59 +0800 Subject: net-next/hinic: Set qp context Update the nic about the resources of the queue pairs. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/Makefile | 5 +- drivers/net/ethernet/huawei/hinic/hinic_common.c | 55 ++++++ drivers/net/ethernet/huawei/hinic/hinic_common.h | 4 + drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 87 +++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 84 ++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 4 + drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 151 +++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 5 + drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 160 +++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 11 ++ .../net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h | 214 +++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 9 + 12 files changed, 787 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_common.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h create mode 100644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile index 84815f7c28dc..289ce88bb2d0 100644 --- a/drivers/net/ethernet/huawei/hinic/Makefile +++ b/drivers/net/ethernet/huawei/hinic/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_HINIC) += hinic.o hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \ - hinic_hw_io.o hinic_hw_qp.o hinic_hw_wq.o hinic_hw_mgmt.o \ - hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o + hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \ + hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \ + hinic_common.o diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c new file mode 100644 index 000000000000..1915ad63deec --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_common.c @@ -0,0 +1,55 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include + +#include "hinic_common.h" + +/** + * hinic_cpu_to_be32 - convert data to big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + **/ +void hinic_cpu_to_be32(void *data, int len) +{ + u32 *mem = data; + int i; + + len = len / sizeof(u32); + + for (i = 0; i < len; i++) { + *mem = cpu_to_be32(*mem); + mem++; + } +} + +/** + * hinic_be32_to_cpu - convert data from big endian 32 bit format + * @data: the data to convert + * @len: length of data to convert + **/ +void hinic_be32_to_cpu(void *data, int len) +{ + u32 *mem = data; + int i; + + len = len / sizeof(u32); + + for (i = 0; i < len; i++) { + *mem = be32_to_cpu(*mem); + mem++; + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h index 6a83c157b978..0f2f4ff70c97 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_common.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_common.h @@ -22,4 +22,8 @@ struct hinic_sge { u32 len; }; +void hinic_cpu_to_be32(void *data, int len); + +void hinic_be32_to_cpu(void *data, int len); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c new file mode 100644 index 000000000000..2fd3924f4eaf --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -0,0 +1,87 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_cmdq.h" + +/** + * hinic_alloc_cmdq_buf - alloc buffer for sending command + * @cmdqs: the cmdqs + * @cmdq_buf: the buffer returned in this struct + * + * Return 0 - Success, negative - Failure + **/ +int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf) +{ + /* should be implemented */ + return -ENOMEM; +} + +/** + * hinic_free_cmdq_buf - free buffer + * @cmdqs: the cmdqs + * @cmdq_buf: the buffer to free that is in this struct + **/ +void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf) +{ + /* should be implemented */ +} + +/** + * hinic_cmdq_direct_resp - send command with direct data as resp + * @cmdqs: the cmdqs + * @mod: module on the card that will handle the command + * @cmd: the command + * @buf_in: the buffer for the command + * @resp: the response to return + * + * Return 0 - Success, negative - Failure + **/ +int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmdq_buf *buf_in, u64 *resp) +{ + /* should be implemented */ + return -EINVAL; +} + +/** + * hinic_init_cmdqs - init all cmdqs + * @cmdqs: cmdqs to init + * @hwif: HW interface for accessing cmdqs + * @db_area: doorbell areas for all the cmdqs + * + * Return 0 - Success, negative - Failure + **/ +int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, + void __iomem **db_area) +{ + /* should be implemented */ + return -EINVAL; +} + +/** + * hinic_free_cmdqs - free all cmdqs + * @cmdqs: cmdqs to free + **/ +void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) +{ + /* should be implemented */ +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h new file mode 100644 index 000000000000..c9e97cad9b29 --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -0,0 +1,84 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_CMDQ_H +#define HINIC_CMDQ_H + +#include +#include +#include +#include + +#include "hinic_hw_if.h" +#include "hinic_hw_wq.h" + +#define HINIC_CMDQ_BUF_SIZE 2048 + +enum hinic_cmdq_type { + HINIC_CMDQ_SYNC, + + HINIC_MAX_CMDQ_TYPES, +}; + +struct hinic_cmdq_buf { + void *buf; + dma_addr_t dma_addr; + size_t size; +}; + +struct hinic_cmdq { + struct hinic_wq *wq; + + enum hinic_cmdq_type cmdq_type; + int wrapped; + + /* Lock for keeping the doorbell order */ + spinlock_t cmdq_lock; + + struct completion **done; + int **errcode; + + /* doorbell area */ + void __iomem *db_base; +}; + +struct hinic_cmdqs { + struct hinic_hwif *hwif; + + struct pci_pool *cmdq_buf_pool; + + struct hinic_wq *saved_wqs; + + struct hinic_cmdq_pages cmdq_pages; + + struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; +}; + +int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf); + +void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, + struct hinic_cmdq_buf *cmdq_buf); + +int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmdq_buf *buf_in, u64 *out_param); + +int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, + void __iomem **db_area); + +void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs); + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 5ae1c3682be0..f29fea1dc9d2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -25,6 +25,7 @@ #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" +#include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" #include "hinic_hw_io.h" #include "hinic_hw_dev.h" @@ -76,6 +77,9 @@ static int get_capability(struct hinic_hwdev *hwdev, /* Each QP has its own (SQ + RQ) interrupts */ nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2; + if (nic_cap->num_qps > HINIC_Q_CTXT_MAX) + nic_cap->num_qps = HINIC_Q_CTXT_MAX; + /* num_qps must be power of 2 */ nic_cap->num_qps = BIT(fls(nic_cap->num_qps) - 1); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index ad12cc77dc5c..bb4b93fe622a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -27,6 +27,8 @@ #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" +#include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" #include "hinic_hw_io.h" @@ -40,6 +42,10 @@ #define DB_IDX(db, db_base) \ (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE) +enum io_cmd { + IO_CMD_MODIFY_QUEUE_CTXT = 0, +}; + static void init_db_area_idx(struct hinic_free_db_area *free_db_area) { int i; @@ -100,6 +106,109 @@ static void return_db_area(struct hinic_func_to_io *func_to_io, up(&free_db_area->idx_lock); } +static int write_sq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, + u16 num_sqs) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct hinic_sq_ctxt_block *sq_ctxt_block; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmdq_buf cmdq_buf; + struct hinic_sq_ctxt *sq_ctxt; + struct hinic_qp *qp; + u64 out_param; + int err, i; + + err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + if (err) { + dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); + return err; + } + + sq_ctxt_block = cmdq_buf.buf; + sq_ctxt = sq_ctxt_block->sq_ctxt; + + hinic_qp_prepare_header(&sq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_SQ, + num_sqs, func_to_io->max_qps); + for (i = 0; i < num_sqs; i++) { + qp = &func_to_io->qps[i]; + + hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, + base_qpn + qp->q_id); + } + + cmdq_buf.size = HINIC_SQ_CTXT_SIZE(num_sqs); + + err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, + IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, + &out_param); + if ((err) || (out_param != 0)) { + dev_err(&pdev->dev, "Failed to set SQ ctxts\n"); + err = -EFAULT; + } + + hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + return err; +} + +static int write_rq_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, + u16 num_rqs) +{ + struct hinic_hwif *hwif = func_to_io->hwif; + struct hinic_rq_ctxt_block *rq_ctxt_block; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmdq_buf cmdq_buf; + struct hinic_rq_ctxt *rq_ctxt; + struct hinic_qp *qp; + u64 out_param; + int err, i; + + err = hinic_alloc_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + if (err) { + dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); + return err; + } + + rq_ctxt_block = cmdq_buf.buf; + rq_ctxt = rq_ctxt_block->rq_ctxt; + + hinic_qp_prepare_header(&rq_ctxt_block->hdr, HINIC_QP_CTXT_TYPE_RQ, + num_rqs, func_to_io->max_qps); + for (i = 0; i < num_rqs; i++) { + qp = &func_to_io->qps[i]; + + hinic_rq_prepare_ctxt(&rq_ctxt[i], &qp->rq, + base_qpn + qp->q_id); + } + + cmdq_buf.size = HINIC_RQ_CTXT_SIZE(num_rqs); + + err = hinic_cmdq_direct_resp(&func_to_io->cmdqs, HINIC_MOD_L2NIC, + IO_CMD_MODIFY_QUEUE_CTXT, &cmdq_buf, + &out_param); + if ((err) || (out_param != 0)) { + dev_err(&pdev->dev, "Failed to set RQ ctxts\n"); + err = -EFAULT; + } + + hinic_free_cmdq_buf(&func_to_io->cmdqs, &cmdq_buf); + return err; +} + +/** + * write_qp_ctxts - write the qp ctxt to HW + * @func_to_io: func to io channel that holds the IO components + * @base_qpn: first qp number + * @num_qps: number of qps to write + * + * Return 0 - Success, negative - Failure + **/ +static int write_qp_ctxts(struct hinic_func_to_io *func_to_io, u16 base_qpn, + u16 num_qps) +{ + return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || + write_rq_ctxts(func_to_io, base_qpn, num_qps)); +} + /** * init_qp - Initialize a Queue Pair * @func_to_io: func to io channel that holds the IO components @@ -265,8 +374,15 @@ int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, } } + err = write_qp_ctxts(func_to_io, base_qpn, num_qps); + if (err) { + dev_err(&pdev->dev, "Failed to init QP ctxts\n"); + goto err_write_qp_ctxts; + } + return 0; +err_write_qp_ctxts: err_init_qp: for (j = 0; j < i; j++) destroy_qp(func_to_io, &func_to_io->qps[j]); @@ -331,6 +447,8 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io, struct msix_entry *ceq_msix_entries) { struct pci_dev *pdev = hwif->pdev; + enum hinic_cmdq_type cmdq, type; + void __iomem *db_area; int err; func_to_io->hwif = hwif; @@ -351,8 +469,34 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io, } init_db_area_idx(&func_to_io->free_db_area); + + for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { + db_area = get_db_area(func_to_io); + if (IS_ERR(db_area)) { + dev_err(&pdev->dev, "Failed to get cmdq db area\n"); + err = PTR_ERR(db_area); + goto err_db_area; + } + + func_to_io->cmdq_db_area[cmdq] = db_area; + } + + err = hinic_init_cmdqs(&func_to_io->cmdqs, hwif, + func_to_io->cmdq_db_area); + if (err) { + dev_err(&pdev->dev, "Failed to initialize cmdqs\n"); + goto err_init_cmdqs; + } + return 0; +err_init_cmdqs: +err_db_area: + for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) + return_db_area(func_to_io, func_to_io->cmdq_db_area[type]); + + iounmap(func_to_io->db_base); + err_db_ioremap: hinic_wqs_free(&func_to_io->wqs); return err; @@ -364,6 +508,13 @@ err_db_ioremap: **/ void hinic_io_free(struct hinic_func_to_io *func_to_io) { + enum hinic_cmdq_type cmdq; + + hinic_free_cmdqs(&func_to_io->cmdqs); + + for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) + return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); + iounmap(func_to_io->db_base); hinic_wqs_free(&func_to_io->wqs); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h index 2d85a38a1df6..60d77b343fa7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -23,6 +23,7 @@ #include "hinic_hw_if.h" #include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" #include "hinic_hw_qp.h" #define HINIC_DB_PAGE_SIZE SZ_4K @@ -60,6 +61,10 @@ struct hinic_func_to_io { dma_addr_t ci_dma_base; struct hinic_free_db_area free_db_area; + + void __iomem *cmdq_db_area[HINIC_MAX_CMDQ_TYPES]; + + struct hinic_cmdqs cmdqs; }; int hinic_io_create_qps(struct hinic_func_to_io *func_to_io, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index 2b77b592e90d..13e0ff3533a4 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -21,13 +21,173 @@ #include #include #include +#include +#include +#include "hinic_common.h" #include "hinic_hw_if.h" #include "hinic_hw_wq.h" +#include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" #define SQ_DB_OFF SZ_2K +/* The number of cache line to prefetch Until threshold state */ +#define WQ_PREFETCH_MAX 2 +/* The number of cache line to prefetch After threshold state */ +#define WQ_PREFETCH_MIN 1 +/* Threshold state */ +#define WQ_PREFETCH_THRESHOLD 256 + +/* sizes of the SQ/RQ ctxt */ +#define Q_CTXT_SIZE 48 +#define CTXT_RSVD 240 + +#define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE) + +#define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ + (((max_rqs) + (max_sqs)) * CTXT_RSVD + \ + (max_sqs + (q_id)) * Q_CTXT_SIZE) + +#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) + +void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, + enum hinic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 max_queues) +{ + u16 max_sqs = max_queues; + u16 max_rqs = max_queues; + + qp_ctxt_hdr->num_queues = num_queues; + qp_ctxt_hdr->queue_type = ctxt_type; + + if (ctxt_type == HINIC_QP_CTXT_TYPE_SQ) + qp_ctxt_hdr->addr_offset = SQ_CTXT_OFFSET(max_sqs, max_rqs, 0); + else + qp_ctxt_hdr->addr_offset = RQ_CTXT_OFFSET(max_sqs, max_rqs, 0); + + qp_ctxt_hdr->addr_offset = SIZE_16BYTES(qp_ctxt_hdr->addr_offset); + + hinic_cpu_to_be32(qp_ctxt_hdr, sizeof(*qp_ctxt_hdr)); +} + +void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, + struct hinic_sq *sq, u16 global_qid) +{ + u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; + u16 pi_start, ci_start; + struct hinic_wq *wq; + + wq = sq->wq; + ci_start = atomic_read(&wq->cons_idx); + pi_start = atomic_read(&wq->prod_idx); + + /* Read the first page paddr from the WQ page paddr ptrs */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + sq_ctxt->ceq_attr = HINIC_SQ_CTXT_CEQ_ATTR_SET(global_qid, + GLOBAL_SQ_ID) | + HINIC_SQ_CTXT_CEQ_ATTR_SET(0, EN); + + sq_ctxt->ci_wrapped = HINIC_SQ_CTXT_CI_SET(ci_start, IDX) | + HINIC_SQ_CTXT_CI_SET(1, WRAPPED); + + sq_ctxt->wq_hi_pfn_pi = + HINIC_SQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, HI_PFN) | + HINIC_SQ_CTXT_WQ_PAGE_SET(pi_start, PI); + + sq_ctxt->wq_lo_pfn = wq_page_pfn_lo; + + sq_ctxt->pref_cache = + HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + HINIC_SQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + sq_ctxt->pref_wrapped = 1; + + sq_ctxt->pref_wq_hi_pfn_ci = + HINIC_SQ_CTXT_PREF_SET(ci_start, CI) | + HINIC_SQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN); + + sq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; + + sq_ctxt->wq_block_hi_pfn = + HINIC_SQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); + + sq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; + + hinic_cpu_to_be32(sq_ctxt, sizeof(*sq_ctxt)); +} + +void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, + struct hinic_rq *rq, u16 global_qid) +{ + u32 wq_page_pfn_hi, wq_page_pfn_lo, wq_block_pfn_hi, wq_block_pfn_lo; + u64 wq_page_addr, wq_page_pfn, wq_block_pfn; + u16 pi_start, ci_start; + struct hinic_wq *wq; + + wq = rq->wq; + ci_start = atomic_read(&wq->cons_idx); + pi_start = atomic_read(&wq->prod_idx); + + /* Read the first page paddr from the WQ page paddr ptrs */ + wq_page_addr = be64_to_cpu(*wq->block_vaddr); + + wq_page_pfn = HINIC_WQ_PAGE_PFN(wq_page_addr); + wq_page_pfn_hi = upper_32_bits(wq_page_pfn); + wq_page_pfn_lo = lower_32_bits(wq_page_pfn); + + wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); + wq_block_pfn_hi = upper_32_bits(wq_block_pfn); + wq_block_pfn_lo = lower_32_bits(wq_block_pfn); + + rq_ctxt->ceq_attr = HINIC_RQ_CTXT_CEQ_ATTR_SET(0, EN) | + HINIC_RQ_CTXT_CEQ_ATTR_SET(1, WRAPPED); + + rq_ctxt->pi_intr_attr = HINIC_RQ_CTXT_PI_SET(pi_start, IDX) | + HINIC_RQ_CTXT_PI_SET(rq->msix_entry, INTR); + + rq_ctxt->wq_hi_pfn_ci = HINIC_RQ_CTXT_WQ_PAGE_SET(wq_page_pfn_hi, + HI_PFN) | + HINIC_RQ_CTXT_WQ_PAGE_SET(ci_start, CI); + + rq_ctxt->wq_lo_pfn = wq_page_pfn_lo; + + rq_ctxt->pref_cache = + HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MIN, CACHE_MIN) | + HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_MAX, CACHE_MAX) | + HINIC_RQ_CTXT_PREF_SET(WQ_PREFETCH_THRESHOLD, CACHE_THRESHOLD); + + rq_ctxt->pref_wrapped = 1; + + rq_ctxt->pref_wq_hi_pfn_ci = + HINIC_RQ_CTXT_PREF_SET(wq_page_pfn_hi, WQ_HI_PFN) | + HINIC_RQ_CTXT_PREF_SET(ci_start, CI); + + rq_ctxt->pref_wq_lo_pfn = wq_page_pfn_lo; + + rq_ctxt->pi_paddr_hi = upper_32_bits(rq->pi_dma_addr); + rq_ctxt->pi_paddr_lo = lower_32_bits(rq->pi_dma_addr); + + rq_ctxt->wq_block_hi_pfn = + HINIC_RQ_CTXT_WQ_BLOCK_SET(wq_block_pfn_hi, HI_PFN); + + rq_ctxt->wq_block_lo_pfn = wq_block_pfn_lo; + + hinic_cpu_to_be32(rq_ctxt, sizeof(*rq_ctxt)); +} + /** * alloc_sq_skb_arr - allocate sq array for saved skb * @sq: HW Send Queue diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index c5ec30dcda96..56d1f8b9ca65 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -24,6 +24,7 @@ #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" +#include "hinic_hw_qp_ctxt.h" #define HINIC_SQ_WQEBB_SIZE 64 #define HINIC_RQ_WQEBB_SIZE 32 @@ -78,6 +79,16 @@ struct hinic_qp { u16 q_id; }; +void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, + enum hinic_qp_ctxt_type ctxt_type, + u16 num_queues, u16 max_queues); + +void hinic_sq_prepare_ctxt(struct hinic_sq_ctxt *sq_ctxt, + struct hinic_sq *sq, u16 global_qid); + +void hinic_rq_prepare_ctxt(struct hinic_rq_ctxt *rq_ctxt, + struct hinic_rq *rq, u16 global_qid); + int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, dma_addr_t ci_dma_addr, void __iomem *db_base); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h new file mode 100644 index 000000000000..376abf00762b --- /dev/null +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h @@ -0,0 +1,214 @@ +/* + * Huawei HiNIC PCI Express Linux driver + * Copyright(c) 2017 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef HINIC_HW_QP_CTXT_H +#define HINIC_HW_QP_CTXT_H + +#include + +#include "hinic_hw_cmdq.h" + +#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_SHIFT 13 +#define HINIC_SQ_CTXT_CEQ_ATTR_EN_SHIFT 23 + +#define HINIC_SQ_CTXT_CEQ_ATTR_GLOBAL_SQ_ID_MASK 0x3FF +#define HINIC_SQ_CTXT_CEQ_ATTR_EN_MASK 0x1 + +#define HINIC_SQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << HINIC_SQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define HINIC_SQ_CTXT_CI_IDX_SHIFT 11 +#define HINIC_SQ_CTXT_CI_WRAPPED_SHIFT 23 + +#define HINIC_SQ_CTXT_CI_IDX_MASK 0xFFF +#define HINIC_SQ_CTXT_CI_WRAPPED_MASK 0x1 + +#define HINIC_SQ_CTXT_CI_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_CI_##member##_MASK) \ + << HINIC_SQ_CTXT_CI_##member##_SHIFT) + +#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define HINIC_SQ_CTXT_WQ_PAGE_PI_SHIFT 20 + +#define HINIC_SQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF +#define HINIC_SQ_CTXT_WQ_PAGE_PI_MASK 0xFFF + +#define HINIC_SQ_CTXT_WQ_PAGE_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_WQ_PAGE_##member##_MASK) \ + << HINIC_SQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define HINIC_SQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define HINIC_SQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define HINIC_SQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF +#define HINIC_SQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF +#define HINIC_SQ_CTXT_PREF_CACHE_MIN_MASK 0x7F + +#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 +#define HINIC_SQ_CTXT_PREF_CI_SHIFT 20 + +#define HINIC_SQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF +#define HINIC_SQ_CTXT_PREF_CI_MASK 0xFFF + +#define HINIC_SQ_CTXT_PREF_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_PREF_##member##_MASK) \ + << HINIC_SQ_CTXT_PREF_##member##_SHIFT) + +#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 + +#define HINIC_SQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF + +#define HINIC_SQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((u32)(val) & HINIC_SQ_CTXT_WQ_BLOCK_##member##_MASK) \ + << HINIC_SQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define HINIC_RQ_CTXT_CEQ_ATTR_EN_SHIFT 0 +#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_SHIFT 1 + +#define HINIC_RQ_CTXT_CEQ_ATTR_EN_MASK 0x1 +#define HINIC_RQ_CTXT_CEQ_ATTR_WRAPPED_MASK 0x1 + +#define HINIC_RQ_CTXT_CEQ_ATTR_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_CEQ_ATTR_##member##_MASK) \ + << HINIC_RQ_CTXT_CEQ_ATTR_##member##_SHIFT) + +#define HINIC_RQ_CTXT_PI_IDX_SHIFT 0 +#define HINIC_RQ_CTXT_PI_INTR_SHIFT 22 + +#define HINIC_RQ_CTXT_PI_IDX_MASK 0xFFF +#define HINIC_RQ_CTXT_PI_INTR_MASK 0x3FF + +#define HINIC_RQ_CTXT_PI_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_PI_##member##_MASK) << \ + HINIC_RQ_CTXT_PI_##member##_SHIFT) + +#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_SHIFT 0 +#define HINIC_RQ_CTXT_WQ_PAGE_CI_SHIFT 20 + +#define HINIC_RQ_CTXT_WQ_PAGE_HI_PFN_MASK 0xFFFFF +#define HINIC_RQ_CTXT_WQ_PAGE_CI_MASK 0xFFF + +#define HINIC_RQ_CTXT_WQ_PAGE_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_WQ_PAGE_##member##_MASK) << \ + HINIC_RQ_CTXT_WQ_PAGE_##member##_SHIFT) + +#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_SHIFT 0 +#define HINIC_RQ_CTXT_PREF_CACHE_MAX_SHIFT 14 +#define HINIC_RQ_CTXT_PREF_CACHE_MIN_SHIFT 25 + +#define HINIC_RQ_CTXT_PREF_CACHE_THRESHOLD_MASK 0x3FFF +#define HINIC_RQ_CTXT_PREF_CACHE_MAX_MASK 0x7FF +#define HINIC_RQ_CTXT_PREF_CACHE_MIN_MASK 0x7F + +#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_SHIFT 0 +#define HINIC_RQ_CTXT_PREF_CI_SHIFT 20 + +#define HINIC_RQ_CTXT_PREF_WQ_HI_PFN_MASK 0xFFFFF +#define HINIC_RQ_CTXT_PREF_CI_MASK 0xFFF + +#define HINIC_RQ_CTXT_PREF_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_PREF_##member##_MASK) << \ + HINIC_RQ_CTXT_PREF_##member##_SHIFT) + +#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_SHIFT 0 + +#define HINIC_RQ_CTXT_WQ_BLOCK_HI_PFN_MASK 0x7FFFFF + +#define HINIC_RQ_CTXT_WQ_BLOCK_SET(val, member) \ + (((u32)(val) & HINIC_RQ_CTXT_WQ_BLOCK_##member##_MASK) << \ + HINIC_RQ_CTXT_WQ_BLOCK_##member##_SHIFT) + +#define HINIC_SQ_CTXT_SIZE(num_sqs) (sizeof(struct hinic_qp_ctxt_header) \ + + (num_sqs) * sizeof(struct hinic_sq_ctxt)) + +#define HINIC_RQ_CTXT_SIZE(num_rqs) (sizeof(struct hinic_qp_ctxt_header) \ + + (num_rqs) * sizeof(struct hinic_rq_ctxt)) + +#define HINIC_WQ_PAGE_PFN_SHIFT 12 +#define HINIC_WQ_BLOCK_PFN_SHIFT 9 + +#define HINIC_WQ_PAGE_PFN(page_addr) ((page_addr) >> HINIC_WQ_PAGE_PFN_SHIFT) +#define HINIC_WQ_BLOCK_PFN(page_addr) ((page_addr) >> \ + HINIC_WQ_BLOCK_PFN_SHIFT) + +#define HINIC_Q_CTXT_MAX \ + ((HINIC_CMDQ_BUF_SIZE - sizeof(struct hinic_qp_ctxt_header)) \ + / sizeof(struct hinic_sq_ctxt)) + +enum hinic_qp_ctxt_type { + HINIC_QP_CTXT_TYPE_SQ, + HINIC_QP_CTXT_TYPE_RQ +}; + +struct hinic_qp_ctxt_header { + u16 num_queues; + u16 queue_type; + u32 addr_offset; +}; + +struct hinic_sq_ctxt { + u32 ceq_attr; + + u32 ci_wrapped; + + u32 wq_hi_pfn_pi; + u32 wq_lo_pfn; + + u32 pref_cache; + u32 pref_wrapped; + u32 pref_wq_hi_pfn_ci; + u32 pref_wq_lo_pfn; + + u32 rsvd0; + u32 rsvd1; + + u32 wq_block_hi_pfn; + u32 wq_block_lo_pfn; +}; + +struct hinic_rq_ctxt { + u32 ceq_attr; + + u32 pi_intr_attr; + + u32 wq_hi_pfn_ci; + u32 wq_lo_pfn; + + u32 pref_cache; + u32 pref_wrapped; + + u32 pref_wq_hi_pfn_ci; + u32 pref_wq_lo_pfn; + + u32 pi_paddr_hi; + u32 pi_paddr_lo; + + u32 wq_block_hi_pfn; + u32 wq_block_lo_pfn; +}; + +struct hinic_sq_ctxt_block { + struct hinic_qp_ctxt_header hdr; + struct hinic_sq_ctxt sq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +struct hinic_rq_ctxt_block { + struct hinic_qp_ctxt_header hdr; + struct hinic_rq_ctxt rq_ctxt[HINIC_Q_CTXT_MAX]; +}; + +#endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index 7c114daf13ef..8ce259ace24a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -72,6 +72,15 @@ struct hinic_wqs { struct semaphore alloc_blocks_lock; }; +struct hinic_cmdq_pages { + /* The addresses are 64 bit in the HW */ + u64 page_paddr; + u64 *page_vaddr; + void **shadow_page_vaddr; + + struct hinic_hwif *hwif; +}; + int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, struct hinic_hwif *hwif); -- cgit v1.2.3-55-g7522 From d0b9805e8222e86378c04d6bab366181b707631e Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:00 +0800 Subject: net-next/hinic: Initialize cmdq Create the work queues for cmdq and update the nic about the work queue contexts. cmdq commands are used for updating the nic about the qp contexts. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 282 +++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 53 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 2 + drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 5 + drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 156 ++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 8 + 6 files changed, 500 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 2fd3924f4eaf..0dccbe678c31 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -13,11 +13,49 @@ * */ +#include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "hinic_hw_if.h" +#include "hinic_hw_mgmt.h" +#include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" +#include "hinic_hw_io.h" +#include "hinic_hw_dev.h" + +#define CMDQ_DB_OFF SZ_2K + +#define CMDQ_WQEBB_SIZE 64 +#define CMDQ_DEPTH SZ_4K + +#define CMDQ_WQ_PAGE_SIZE SZ_4K + +#define WQE_LCMD_SIZE 64 +#define WQE_SCMD_SIZE 64 + +#define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) + +#define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ + struct hinic_cmdqs, cmdq[0]) + +#define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \ + struct hinic_func_to_io, \ + cmdqs) + +enum cmdq_wqe_type { + WQE_LCMD_TYPE = 0, + WQE_SCMD_TYPE = 1, +}; /** * hinic_alloc_cmdq_buf - alloc buffer for sending command @@ -29,8 +67,17 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, struct hinic_cmdq_buf *cmdq_buf) { - /* should be implemented */ - return -ENOMEM; + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL, + &cmdq_buf->dma_addr); + if (!cmdq_buf->buf) { + dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n"); + return -ENOMEM; + } + + return 0; } /** @@ -41,7 +88,7 @@ int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs, void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, struct hinic_cmdq_buf *cmdq_buf) { - /* should be implemented */ + pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); } /** @@ -62,6 +109,169 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, return -EINVAL; } +/** + * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq + * @cmdq_ctxt: cmdq ctxt to initialize + * @cmdq: the cmdq + * @cmdq_pages: the memory of the queue + **/ +static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt, + struct hinic_cmdq *cmdq, + struct hinic_cmdq_pages *cmdq_pages) +{ + struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info; + u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn; + struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq); + struct hinic_wq *wq = cmdq->wq; + + /* The data in the HW is in Big Endian Format */ + wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); + + pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size); + + ctxt_info->curr_wqe_page_pfn = + HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) | + HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED); + + /* block PFN - Read Modify Write */ + cmdq_first_block_paddr = cmdq_pages->page_paddr; + + pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size); + + ctxt_info->wq_block_pfn = + HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) | + HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI); + + cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif); + cmdq_ctxt->cmdq_type = cmdq->cmdq_type; +} + +/** + * init_cmdq - initialize cmdq + * @cmdq: the cmdq + * @wq: the wq attaced to the cmdq + * @q_type: the cmdq type of the cmdq + * @db_area: doorbell area for the cmdq + * + * Return 0 - Success, negative - Failure + **/ +static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq, + enum hinic_cmdq_type q_type, void __iomem *db_area) +{ + int err; + + cmdq->wq = wq; + cmdq->cmdq_type = q_type; + cmdq->wrapped = 1; + + spin_lock_init(&cmdq->cmdq_lock); + + cmdq->done = vzalloc(wq->q_depth * sizeof(*cmdq->done)); + if (!cmdq->done) + return -ENOMEM; + + cmdq->errcode = vzalloc(wq->q_depth * sizeof(*cmdq->errcode)); + if (!cmdq->errcode) { + err = -ENOMEM; + goto err_errcode; + } + + cmdq->db_base = db_area + CMDQ_DB_OFF; + return 0; + +err_errcode: + vfree(cmdq->done); + return err; +} + +/** + * free_cmdq - Free cmdq + * @cmdq: the cmdq to free + **/ +static void free_cmdq(struct hinic_cmdq *cmdq) +{ + vfree(cmdq->errcode); + vfree(cmdq->done); +} + +/** + * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq + * @hwdev: the NIC HW device + * @cmdqs: cmdqs to write the ctxts for + * &db_area: db_area for all the cmdqs + * + * Return 0 - Success, negative - Failure + **/ +static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev, + struct hinic_cmdqs *cmdqs, void __iomem **db_area) +{ + struct hinic_hwif *hwif = hwdev->hwif; + enum hinic_cmdq_type type, cmdq_type; + struct hinic_cmdq_ctxt *cmdq_ctxts; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + size_t cmdq_ctxts_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI function type\n"); + return -EINVAL; + } + + cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts); + cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL); + if (!cmdq_ctxts) + return -ENOMEM; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = init_cmdq(&cmdqs->cmdq[cmdq_type], + &cmdqs->saved_wqs[cmdq_type], cmdq_type, + db_area[cmdq_type]); + if (err) { + dev_err(&pdev->dev, "Failed to initialize cmdq\n"); + goto err_init_cmdq; + } + + cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type], + &cmdqs->cmdq[cmdq_type], + &cmdqs->cmdq_pages); + } + + /* Write the CMDQ ctxts */ + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) { + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_CMDQ_CTXT_SET, + &cmdq_ctxts[cmdq_type], + sizeof(cmdq_ctxts[cmdq_type]), + NULL, NULL, HINIC_MGMT_MSG_SYNC); + if (err) { + dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n", + cmdq_type); + goto err_write_cmdq_ctxt; + } + } + + devm_kfree(&pdev->dev, cmdq_ctxts); + return 0; + +err_write_cmdq_ctxt: + cmdq_type = HINIC_MAX_CMDQ_TYPES; + +err_init_cmdq: + for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++) + free_cmdq(&cmdqs->cmdq[type]); + + devm_kfree(&pdev->dev, cmdq_ctxts); + return err; +} + /** * hinic_init_cmdqs - init all cmdqs * @cmdqs: cmdqs to init @@ -73,8 +283,55 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, void __iomem **db_area) { - /* should be implemented */ - return -EINVAL; + struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); + struct pci_dev *pdev = hwif->pdev; + struct hinic_hwdev *hwdev; + size_t saved_wqs_size; + u16 max_wqe_size; + int err; + + cmdqs->hwif = hwif; + cmdqs->cmdq_buf_pool = pci_pool_create("hinic_cmdq", pdev, + HINIC_CMDQ_BUF_SIZE, + HINIC_CMDQ_BUF_SIZE, 0); + if (!cmdqs->cmdq_buf_pool) + return -ENOMEM; + + saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq); + cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL); + if (!cmdqs->saved_wqs) { + err = -ENOMEM; + goto err_saved_wqs; + } + + max_wqe_size = WQE_LCMD_SIZE; + err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif, + HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE, + CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size); + if (err) { + dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n"); + goto err_cmdq_wqs; + } + + hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io); + err = init_cmdqs_ctxt(hwdev, cmdqs, db_area); + if (err) { + dev_err(&pdev->dev, "Failed to write cmdq ctxt\n"); + goto err_cmdq_ctxt; + } + + return 0; + +err_cmdq_ctxt: + hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + +err_cmdq_wqs: + devm_kfree(&pdev->dev, cmdqs->saved_wqs); + +err_saved_wqs: + pci_pool_destroy(cmdqs->cmdq_buf_pool); + return err; } /** @@ -83,5 +340,18 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, **/ void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) { - /* should be implemented */ + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + enum hinic_cmdq_type cmdq_type; + + cmdq_type = HINIC_CMDQ_SYNC; + for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) + free_cmdq(&cmdqs->cmdq[cmdq_type]); + + hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, + HINIC_MAX_CMDQ_TYPES); + + devm_kfree(&pdev->dev, cmdqs->saved_wqs); + + pci_pool_destroy(cmdqs->cmdq_buf_pool); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h index c9e97cad9b29..5ec59f1b4b0a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -24,6 +24,40 @@ #include "hinic_hw_if.h" #include "hinic_hw_wq.h" +#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_SHIFT 0 +#define HINIC_CMDQ_CTXT_EQ_ID_SHIFT 56 +#define HINIC_CMDQ_CTXT_CEQ_ARM_SHIFT 61 +#define HINIC_CMDQ_CTXT_CEQ_EN_SHIFT 62 +#define HINIC_CMDQ_CTXT_WRAPPED_SHIFT 63 + +#define HINIC_CMDQ_CTXT_CURR_WQE_PAGE_PFN_MASK 0xFFFFFFFFFFFFF +#define HINIC_CMDQ_CTXT_EQ_ID_MASK 0x1F +#define HINIC_CMDQ_CTXT_CEQ_ARM_MASK 0x1 +#define HINIC_CMDQ_CTXT_CEQ_EN_MASK 0x1 +#define HINIC_CMDQ_CTXT_WRAPPED_MASK 0x1 + +#define HINIC_CMDQ_CTXT_PAGE_INFO_SET(val, member) \ + (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ + << HINIC_CMDQ_CTXT_##member##_SHIFT) + +#define HINIC_CMDQ_CTXT_PAGE_INFO_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ + << HINIC_CMDQ_CTXT_##member##_SHIFT))) + +#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_SHIFT 0 +#define HINIC_CMDQ_CTXT_CI_SHIFT 52 + +#define HINIC_CMDQ_CTXT_WQ_BLOCK_PFN_MASK 0xFFFFFFFFFFFFF +#define HINIC_CMDQ_CTXT_CI_MASK 0xFFF + +#define HINIC_CMDQ_CTXT_BLOCK_INFO_SET(val, member) \ + (((u64)(val) & HINIC_CMDQ_CTXT_##member##_MASK) \ + << HINIC_CMDQ_CTXT_##member##_SHIFT) + +#define HINIC_CMDQ_CTXT_BLOCK_INFO_CLEAR(val, member) \ + ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ + << HINIC_CMDQ_CTXT_##member##_SHIFT))) + #define HINIC_CMDQ_BUF_SIZE 2048 enum hinic_cmdq_type { @@ -38,6 +72,25 @@ struct hinic_cmdq_buf { size_t size; }; +struct hinic_cmdq_ctxt_info { + u64 curr_wqe_page_pfn; + u64 wq_block_pfn; +}; + +struct hinic_cmdq_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 cmdq_type; + u8 rsvd1[1]; + + u8 rsvd2[4]; + + struct hinic_cmdq_ctxt_info ctxt_info; +}; + struct hinic_cmdq { struct hinic_wq *wq; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h index 7f50b2f6cc22..ca584c05d30e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h @@ -106,6 +106,8 @@ #define HINIC_EQ_PAGE_SIZE SZ_4K +#define HINIC_CEQ_ID_CMDQ 0 + enum hinic_eq_type { HINIC_AEQ, }; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index 8021406cca7c..90116c2db819 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -68,6 +68,11 @@ enum hinic_cfg_cmd { HINIC_CFG_NIC_CAP = 0, }; +enum hinic_comm_cmd { + HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10, + HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11, +}; + enum hinic_mgmt_cb_state { HINIC_MGMT_CB_ENABLED = BIT(0), HINIC_MGMT_CB_RUNNING = BIT(1), diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index fc72b76e744b..638aab790bd6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -27,6 +27,7 @@ #include "hinic_hw_if.h" #include "hinic_hw_wq.h" +#include "hinic_hw_cmdq.h" #define WQS_BLOCKS_PER_PAGE 4 @@ -42,6 +43,11 @@ #define WQ_PAGE_ADDR_SIZE sizeof(u64) #define WQ_MAX_PAGES (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) +#define CMDQ_BLOCK_SIZE 512 +#define CMDQ_PAGE_SIZE 4096 + +#define CMDQ_WQ_MAX_PAGES (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE) + #define WQ_BASE_VADDR(wqs, wq) \ ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ + (wq)->block_idx * WQ_BLOCK_SIZE) @@ -54,6 +60,18 @@ ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ + (wq)->block_idx * WQ_BLOCK_SIZE) +#define CMDQ_BASE_VADDR(cmdq_pages, wq) \ + ((void *)((cmdq_pages)->page_vaddr) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_PADDR(cmdq_pages, wq) \ + ((cmdq_pages)->page_paddr \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + +#define CMDQ_BASE_ADDR(cmdq_pages, wq) \ + ((void *)((cmdq_pages)->shadow_page_vaddr) \ + + (wq)->block_idx * CMDQ_BLOCK_SIZE) + /** * queue_alloc_page - allocate page for Queue * @hwif: HW interface for allocating DMA @@ -122,6 +140,37 @@ static void wqs_free_page(struct hinic_wqs *wqs, int page_idx) vfree(wqs->shadow_page_vaddr[page_idx]); } +/** + * cmdq_allocate_page - allocate page for cmdq + * @cmdq_pages: the pages of the cmdq queue struct to hold the page + * + * Return 0 - Success, negative - Failure + **/ +static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages) +{ + return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr, + &cmdq_pages->page_paddr, + &cmdq_pages->shadow_page_vaddr, + CMDQ_PAGE_SIZE); +} + +/** + * cmdq_free_page - free page from cmdq + * @cmdq_pages: the pages of the cmdq queue struct that hold the page + * + * Return 0 - Success, negative - Failure + **/ +static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages) +{ + struct hinic_hwif *hwif = cmdq_pages->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE, + cmdq_pages->page_vaddr, + (dma_addr_t)cmdq_pages->page_paddr); + vfree(cmdq_pages->shadow_page_vaddr); +} + static int alloc_page_arrays(struct hinic_wqs *wqs) { struct hinic_hwif *hwif = wqs->hwif; @@ -514,3 +563,110 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) wqs_return_block(wqs, wq->page_idx, wq->block_idx); } + +/** + * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs + * @cmdq_pages: will hold the pages of the cmdq + * @wq: returned wqs + * @hwif: HW interface + * @cmdq_blocks: number of cmdq blocks/wq to allocate + * @wqebb_size: Work Queue Block Byte Size + * @wq_page_size: the page size in the Work Queue + * @q_depth: number of wqebbs in WQ + * @max_wqe_size: maximum WQE size that will be used in the WQ + * + * Return 0 - Success, negative - Failure + **/ +int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, struct hinic_hwif *hwif, + int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, + u16 q_depth, u16 max_wqe_size) +{ + struct pci_dev *pdev = hwif->pdev; + u16 num_wqebbs_per_page; + int i, j, err = -ENOMEM; + + if (wqebb_size == 0) { + dev_err(&pdev->dev, "wqebb_size must be > 0\n"); + return -EINVAL; + } + + if (wq_page_size == 0) { + dev_err(&pdev->dev, "wq_page_size must be > 0\n"); + return -EINVAL; + } + + if (q_depth & (q_depth - 1)) { + dev_err(&pdev->dev, "WQ q_depth must be power of 2\n"); + return -EINVAL; + } + + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + + if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); + return -EINVAL; + } + + cmdq_pages->hwif = hwif; + + err = cmdq_allocate_page(cmdq_pages); + if (err) { + dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); + return err; + } + + for (i = 0; i < cmdq_blocks; i++) { + wq[i].hwif = hwif; + wq[i].page_idx = 0; + wq[i].block_idx = i; + + wq[i].wqebb_size = wqebb_size; + wq[i].wq_page_size = wq_page_size; + wq[i].q_depth = q_depth; + wq[i].max_wqe_size = max_wqe_size; + wq[i].num_wqebbs_per_page = num_wqebbs_per_page; + + wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); + wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); + wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); + + err = alloc_wq_pages(&wq[i], cmdq_pages->hwif, + CMDQ_WQ_MAX_PAGES); + if (err) { + dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n"); + goto err_cmdq_block; + } + + atomic_set(&wq[i].cons_idx, 0); + atomic_set(&wq[i].prod_idx, 0); + atomic_set(&wq[i].delta, q_depth); + wq[i].mask = q_depth - 1; + } + + return 0; + +err_cmdq_block: + for (j = 0; j < i; j++) + free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages); + + cmdq_free_page(cmdq_pages); + return err; +} + +/** + * hinic_wqs_cmdq_free - Free wqs from cmdqs + * @cmdq_pages: hold the pages of the cmdq + * @wq: wqs to free + * @cmdq_blocks: number of wqs to free + **/ +void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks) +{ + int i; + + for (i = 0; i < cmdq_blocks; i++) + free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages); + + cmdq_free_page(cmdq_pages); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index 8ce259ace24a..a3c4469d0efe 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -81,6 +81,14 @@ struct hinic_cmdq_pages { struct hinic_hwif *hwif; }; +int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, struct hinic_hwif *hwif, + int cmdq_blocks, u16 wqebb_size, u16 wq_page_size, + u16 q_depth, u16 max_wqe_size); + +void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, + struct hinic_wq *wq, int cmdq_blocks); + int hinic_wqs_alloc(struct hinic_wqs *wqs, int num_wqs, struct hinic_hwif *hwif); -- cgit v1.2.3-55-g7522 From fc9319e4025d49875fdb97c06618de2c0088ac31 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:01 +0800 Subject: net-next/hinic: Add ceqs Initialize the completion event queues and handle ceq events by calling the registered handlers. Used for cmdq command completion. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 16 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 29 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 7 +- drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 290 +++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h | 75 ++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_io.c | 15 +- drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 3 + 7 files changed, 427 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 0dccbe678c31..ec24b95747c1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -27,6 +27,7 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" @@ -109,6 +110,16 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, return -EINVAL; } +/** + * cmdq_ceq_handler - cmdq completion event handler + * @handle: private data for the handler(cmdqs) + * @ceqe_data: ceq element data + **/ +static void cmdq_ceq_handler(void *handle, u32 ceqe_data) +{ + /* should be implemented */ +} + /** * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq * @cmdq_ctxt: cmdq ctxt to initialize @@ -320,6 +331,8 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, goto err_cmdq_ctxt; } + hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs, + cmdq_ceq_handler); return 0; err_cmdq_ctxt: @@ -340,10 +353,13 @@ err_saved_wqs: **/ void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs) { + struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs); struct hinic_hwif *hwif = cmdqs->hwif; struct pci_dev *pdev = hwif->pdev; enum hinic_cmdq_type cmdq_type; + hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); + cmdq_type = HINIC_CMDQ_SYNC; for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) free_cmdq(&cmdqs->cmdq[cmdq_type]); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h index 1f57301ce527..10b8c7b650dc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -81,27 +81,44 @@ /* EQ registers */ #define HINIC_AEQ_MTT_OFF_BASE_ADDR 0x200 +#define HINIC_CEQ_MTT_OFF_BASE_ADDR 0x400 #define HINIC_EQ_MTT_OFF_STRIDE 0x40 #define HINIC_CSR_AEQ_MTT_OFF(id) \ (HINIC_AEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) +#define HINIC_CSR_CEQ_MTT_OFF(id) \ + (HINIC_CEQ_MTT_OFF_BASE_ADDR + (id) * HINIC_EQ_MTT_OFF_STRIDE) + #define HINIC_CSR_EQ_PAGE_OFF_STRIDE 8 #define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) +#define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE) + #define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) +#define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ + (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ + (pg_num) * HINIC_CSR_EQ_PAGE_OFF_STRIDE + 4) + #define HINIC_AEQ_CTRL_0_ADDR_BASE 0xE00 #define HINIC_AEQ_CTRL_1_ADDR_BASE 0xE04 #define HINIC_AEQ_CONS_IDX_ADDR_BASE 0xE08 #define HINIC_AEQ_PROD_IDX_ADDR_BASE 0xE0C +#define HINIC_CEQ_CTRL_0_ADDR_BASE 0x1000 +#define HINIC_CEQ_CTRL_1_ADDR_BASE 0x1004 +#define HINIC_CEQ_CONS_IDX_ADDR_BASE 0x1008 +#define HINIC_CEQ_PROD_IDX_ADDR_BASE 0x100C + #define HINIC_EQ_OFF_STRIDE 0x80 #define HINIC_CSR_AEQ_CTRL_0_ADDR(idx) \ @@ -116,4 +133,16 @@ #define HINIC_CSR_AEQ_PROD_IDX_ADDR(idx) \ (HINIC_AEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) +#define HINIC_CSR_CEQ_CTRL_0_ADDR(idx) \ + (HINIC_CEQ_CTRL_0_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CTRL_1_ADDR(idx) \ + (HINIC_CEQ_CTRL_1_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_CONS_IDX_ADDR(idx) \ + (HINIC_CEQ_CONS_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + +#define HINIC_CSR_CEQ_PROD_IDX_ADDR(idx) \ + (HINIC_CEQ_PROD_IDX_ADDR_BASE + (idx) * HINIC_EQ_OFF_STRIDE) + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index f29fea1dc9d2..2f698f1a89a3 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -277,6 +277,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev) struct hinic_cap *nic_cap = &hwdev->nic_cap; struct hinic_hwif *hwif = hwdev->hwif; int err, num_aeqs, num_ceqs, num_qps; + struct msix_entry *ceq_msix_entries; struct msix_entry *sq_msix_entries; struct msix_entry *rq_msix_entries; struct pci_dev *pdev = hwif->pdev; @@ -290,7 +291,11 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev) num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); num_ceqs = HINIC_HWIF_NUM_CEQS(hwif); - err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, 0, NULL); + + ceq_msix_entries = &hwdev->msix_entries[num_aeqs]; + + err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs, + ceq_msix_entries); if (err) { dev_err(&pdev->dev, "Failed to init IO channel\n"); return err; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index 6b03bc6ae940..cd09e6ef3aea 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -37,14 +37,21 @@ #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) -#define EQ_CONS_IDX_REG_ADDR(eq) HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) -#define EQ_PROD_IDX_REG_ADDR(eq) HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) +#define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) -#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) \ - HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) +#define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ + HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) -#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) \ - HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) +#define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ + HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) + +#define EQ_LO_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ + HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ + HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) #define GET_EQ_ELEMENT(eq, idx) \ ((eq)->virt_addr[(idx) / (eq)->num_elem_in_pg] + \ @@ -53,8 +60,13 @@ #define GET_AEQ_ELEM(eq, idx) ((struct hinic_aeq_elem *) \ GET_EQ_ELEMENT(eq, idx)) +#define GET_CEQ_ELEM(eq, idx) ((u32 *) \ + GET_EQ_ELEMENT(eq, idx)) + #define GET_CURR_AEQ_ELEM(eq) GET_AEQ_ELEM(eq, (eq)->cons_idx) +#define GET_CURR_CEQ_ELEM(eq) GET_CEQ_ELEM(eq, (eq)->cons_idx) + #define PAGE_IN_4K(page_size) ((page_size) >> 12) #define EQ_SET_HW_PAGE_SIZE_VAL(eq) (ilog2(PAGE_IN_4K((eq)->page_size))) @@ -63,13 +75,29 @@ #define EQ_MAX_PAGES 8 +#define CEQE_TYPE_SHIFT 23 +#define CEQE_TYPE_MASK 0x7 + +#define CEQE_TYPE(ceqe) (((ceqe) >> CEQE_TYPE_SHIFT) & \ + CEQE_TYPE_MASK) + +#define CEQE_DATA_MASK 0x3FFFFFF +#define CEQE_DATA(ceqe) ((ceqe) & CEQE_DATA_MASK) + #define aeq_to_aeqs(eq) \ container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) +#define ceq_to_ceqs(eq) \ + container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) + #define work_to_aeq_work(work) \ container_of(work, struct hinic_eq_work, work) #define DMA_ATTR_AEQ_DEFAULT 0 +#define DMA_ATTR_CEQ_DEFAULT 0 + +/* No coalescence */ +#define THRESH_CEQ_DEFAULT 0 enum eq_int_mode { EQ_INT_MODE_ARMED, @@ -118,6 +146,42 @@ void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, hwe_cb->hwe_handler = NULL; } +/** + * hinic_ceq_register_cb - register CEQ callback for specific event + * @ceqs: pointer to Completion eqs part of the chip + * @event: ceq event to register callback for it + * @handle: private data will be used by the callback + * @handler: callback function + **/ +void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event, void *handle, + void (*handler)(void *handle, u32 ceqe_data)) +{ + struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; + + ceq_cb->handler = handler; + ceq_cb->handle = handle; + ceq_cb->ceqe_state = HINIC_EQE_ENABLED; +} + +/** + * hinic_ceq_unregister_cb - unregister the CEQ callback for specific event + * @ceqs: pointer to Completion eqs part of the chip + * @event: ceq event to unregister callback for it + **/ +void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event) +{ + struct hinic_ceq_cb *ceq_cb = &ceqs->ceq_cb[event]; + + ceq_cb->ceqe_state &= ~HINIC_EQE_ENABLED; + + while (ceq_cb->ceqe_state & HINIC_EQE_RUNNING) + schedule(); + + ceq_cb->handler = NULL; +} + static u8 eq_cons_idx_checksum_set(u32 val) { u8 checksum = 0; @@ -215,6 +279,70 @@ static void aeq_irq_handler(struct hinic_eq *eq) } } +/** + * ceq_event_handler - handler for the ceq events + * @ceqs: ceqs part of the chip + * @ceqe: ceq element that describes the event + **/ +static void ceq_event_handler(struct hinic_ceqs *ceqs, u32 ceqe) +{ + struct hinic_hwif *hwif = ceqs->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_ceq_cb *ceq_cb; + enum hinic_ceq_type event; + unsigned long eqe_state; + + event = CEQE_TYPE(ceqe); + if (event >= HINIC_MAX_CEQ_EVENTS) { + dev_err(&pdev->dev, "Unknown CEQ event, event = %d\n", event); + return; + } + + ceq_cb = &ceqs->ceq_cb[event]; + + eqe_state = cmpxchg(&ceq_cb->ceqe_state, + HINIC_EQE_ENABLED, + HINIC_EQE_ENABLED | HINIC_EQE_RUNNING); + + if ((eqe_state == HINIC_EQE_ENABLED) && (ceq_cb->handler)) + ceq_cb->handler(ceq_cb->handle, CEQE_DATA(ceqe)); + else + dev_err(&pdev->dev, "Unhandled CEQ Event %d\n", event); + + ceq_cb->ceqe_state &= ~HINIC_EQE_RUNNING; +} + +/** + * ceq_irq_handler - handler for the CEQ event + * @eq: the Completion Event Queue that received the event + **/ +static void ceq_irq_handler(struct hinic_eq *eq) +{ + struct hinic_ceqs *ceqs = ceq_to_ceqs(eq); + u32 ceqe; + int i; + + for (i = 0; i < eq->q_len; i++) { + ceqe = *(GET_CURR_CEQ_ELEM(eq)); + + /* Data in HW is in Big endian Format */ + ceqe = be32_to_cpu(ceqe); + + /* HW toggles the wrapped bit, when it adds eq element event */ + if (HINIC_EQ_ELEM_DESC_GET(ceqe, WRAPPED) == eq->wrapped) + break; + + ceq_event_handler(ceqs, ceqe); + + eq->cons_idx++; + + if (eq->cons_idx == eq->q_len) { + eq->cons_idx = 0; + eq->wrapped = !eq->wrapped; + } + } +} + /** * eq_irq_handler - handler for the EQ event * @data: the Event Queue that received the event @@ -225,6 +353,8 @@ static void eq_irq_handler(void *data) if (eq->type == HINIC_AEQ) aeq_irq_handler(eq); + else if (eq->type == HINIC_CEQ) + ceq_irq_handler(eq); eq_update_ci(eq); } @@ -242,6 +372,17 @@ static void eq_irq_work(struct work_struct *work) eq_irq_handler(aeq); } +/** + * ceq_tasklet - the tasklet of the EQ that received the event + * @ceq_data: the eq + **/ +static void ceq_tasklet(unsigned long ceq_data) +{ + struct hinic_eq *ceq = (struct hinic_eq *)ceq_data; + + eq_irq_handler(ceq); +} + /** * aeq_interrupt - aeq interrupt handler * @irq: irq number @@ -265,6 +406,23 @@ static irqreturn_t aeq_interrupt(int irq, void *data) return IRQ_HANDLED; } +/** + * ceq_interrupt - ceq interrupt handler + * @irq: irq number + * @data: the Completion Event Queue that collected the event + **/ +static irqreturn_t ceq_interrupt(int irq, void *data) +{ + struct hinic_eq *ceq = data; + + /* clear resend timer cnt register */ + hinic_msix_attr_cnt_clear(ceq->hwif, ceq->msix_entry.entry); + + tasklet_schedule(&ceq->ceq_tasklet); + + return IRQ_HANDLED; +} + void set_ctrl0(struct hinic_eq *eq) { struct msix_entry *msix_entry = &eq->msix_entry; @@ -290,6 +448,28 @@ void set_ctrl0(struct hinic_eq *eq) val |= ctrl0; + hinic_hwif_write_reg(eq->hwif, addr, val); + } else if (type == HINIC_CEQ) { + /* RMW Ctrl0 */ + addr = HINIC_CSR_CEQ_CTRL_0_ADDR(eq->q_id); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_CEQ_CTRL_0_CLEAR(val, INTR_IDX) & + HINIC_CEQ_CTRL_0_CLEAR(val, DMA_ATTR) & + HINIC_CEQ_CTRL_0_CLEAR(val, KICK_THRESH) & + HINIC_CEQ_CTRL_0_CLEAR(val, PCI_INTF_IDX) & + HINIC_CEQ_CTRL_0_CLEAR(val, INTR_MODE); + + ctrl0 = HINIC_CEQ_CTRL_0_SET(msix_entry->entry, INTR_IDX) | + HINIC_CEQ_CTRL_0_SET(DMA_ATTR_CEQ_DEFAULT, DMA_ATTR) | + HINIC_CEQ_CTRL_0_SET(THRESH_CEQ_DEFAULT, KICK_THRESH) | + HINIC_CEQ_CTRL_0_SET(HINIC_HWIF_PCI_INTF(eq->hwif), + PCI_INTF_IDX) | + HINIC_CEQ_CTRL_0_SET(EQ_INT_MODE_ARMED, INTR_MODE); + + val |= ctrl0; + hinic_hwif_write_reg(eq->hwif, addr, val); } } @@ -319,6 +499,23 @@ void set_ctrl1(struct hinic_eq *eq) val |= ctrl1; + hinic_hwif_write_reg(eq->hwif, addr, val); + } else if (type == HINIC_CEQ) { + /* RMW Ctrl1 */ + addr = HINIC_CSR_CEQ_CTRL_1_ADDR(eq->q_id); + + page_size_val = EQ_SET_HW_PAGE_SIZE_VAL(eq); + + val = hinic_hwif_read_reg(eq->hwif, addr); + + val = HINIC_CEQ_CTRL_1_CLEAR(val, LEN) & + HINIC_CEQ_CTRL_1_CLEAR(val, PAGE_SIZE); + + ctrl1 = HINIC_CEQ_CTRL_1_SET(eq->q_len, LEN) | + HINIC_CEQ_CTRL_1_SET(page_size_val, PAGE_SIZE); + + val |= ctrl1; + hinic_hwif_write_reg(eq->hwif, addr, val); } } @@ -351,6 +548,24 @@ static void aeq_elements_init(struct hinic_eq *eq, u32 init_val) wmb(); /* Write the initilzation values */ } +/** + * ceq_elements_init - Initialize all the elements in the ceq + * @eq: the event queue + * @init_val: value to init with it the elements + **/ +static void ceq_elements_init(struct hinic_eq *eq, u32 init_val) +{ + u32 *ceqe; + int i; + + for (i = 0; i < eq->q_len; i++) { + ceqe = GET_CEQ_ELEM(eq, i); + *(ceqe) = cpu_to_be32(init_val); + } + + wmb(); /* Write the initilzation values */ +} + /** * alloc_eq_pages - allocate the pages for the queue * @eq: the event queue @@ -402,6 +617,8 @@ static int alloc_eq_pages(struct hinic_eq *eq) if (eq->type == HINIC_AEQ) aeq_elements_init(eq, init_val); + else if (eq->type == HINIC_CEQ) + ceq_elements_init(eq, init_val); return 0; @@ -471,6 +688,8 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, if (type == HINIC_AEQ) { eq->elem_size = HINIC_AEQE_SIZE; + } else if (type == HINIC_CEQ) { + eq->elem_size = HINIC_CEQE_SIZE; } else { dev_err(&pdev->dev, "Invalid EQ type\n"); return -EINVAL; @@ -504,6 +723,9 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, struct hinic_eq_work *aeq_work = &eq->aeq_work; INIT_WORK(&aeq_work->work, eq_irq_work); + } else if (type == HINIC_CEQ) { + tasklet_init(&eq->ceq_tasklet, ceq_tasklet, + (unsigned long)eq); } /* set the attributes of the msix entry */ @@ -517,6 +739,9 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif, if (type == HINIC_AEQ) err = request_irq(entry.vector, aeq_interrupt, 0, "hinic_aeq", eq); + else if (type == HINIC_CEQ) + err = request_irq(entry.vector, ceq_interrupt, 0, + "hinic_ceq", eq); if (err) { dev_err(&pdev->dev, "Failed to request irq for the EQ\n"); @@ -544,6 +769,8 @@ static void remove_eq(struct hinic_eq *eq) struct hinic_eq_work *aeq_work = &eq->aeq_work; cancel_work_sync(&aeq_work->work); + } else if (eq->type == HINIC_CEQ) { + tasklet_kill(&eq->ceq_tasklet); } free_eq_pages(eq); @@ -606,3 +833,54 @@ void hinic_aeqs_free(struct hinic_aeqs *aeqs) destroy_workqueue(aeqs->workq); } + +/** + * hinic_ceqs_init - init all the ceqs + * @ceqs: ceqs part of the chip + * @hwif: the hardware interface of a pci function device + * @num_ceqs: number of CEQs + * @q_len: number of EQ elements + * @page_size: the page size of the event queue + * @msix_entries: msix entries associated with the event queues + * + * Return 0 - Success, Negative - Failure + **/ +int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, + int num_ceqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries) +{ + struct pci_dev *pdev = hwif->pdev; + int i, q_id, err; + + ceqs->hwif = hwif; + ceqs->num_ceqs = num_ceqs; + + for (q_id = 0; q_id < num_ceqs; q_id++) { + err = init_eq(&ceqs->ceq[q_id], hwif, HINIC_CEQ, q_id, q_len, + page_size, msix_entries[q_id]); + if (err) { + dev_err(&pdev->dev, "Failed to init ceq %d\n", q_id); + goto err_init_ceq; + } + } + + return 0; + +err_init_ceq: + for (i = 0; i < q_id; i++) + remove_eq(&ceqs->ceq[i]); + + return err; +} + +/** + * hinic_ceqs_free - free all the ceqs + * @ceqs: ceqs part of the chip + **/ +void hinic_ceqs_free(struct hinic_ceqs *ceqs) +{ + int q_id; + + for (q_id = 0; q_id < ceqs->num_ceqs; q_id++) + remove_eq(&ceqs->ceq[q_id]); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h index ca584c05d30e..ecb9c2bc6dc8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h @@ -21,6 +21,7 @@ #include #include #include +#include #include "hinic_hw_if.h" @@ -58,6 +59,40 @@ ((val) & (~(HINIC_AEQ_CTRL_1_##member##_MASK \ << HINIC_AEQ_CTRL_1_##member##_SHIFT))) +#define HINIC_CEQ_CTRL_0_INTR_IDX_SHIFT 0 +#define HINIC_CEQ_CTRL_0_DMA_ATTR_SHIFT 12 +#define HINIC_CEQ_CTRL_0_KICK_THRESH_SHIFT 20 +#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_SHIFT 24 +#define HINIC_CEQ_CTRL_0_INTR_MODE_SHIFT 31 + +#define HINIC_CEQ_CTRL_0_INTR_IDX_MASK 0x3FF +#define HINIC_CEQ_CTRL_0_DMA_ATTR_MASK 0x3F +#define HINIC_CEQ_CTRL_0_KICK_THRESH_MASK 0xF +#define HINIC_CEQ_CTRL_0_PCI_INTF_IDX_MASK 0x3 +#define HINIC_CEQ_CTRL_0_INTR_MODE_MASK 0x1 + +#define HINIC_CEQ_CTRL_0_SET(val, member) \ + (((u32)(val) & HINIC_CEQ_CTRL_0_##member##_MASK) << \ + HINIC_CEQ_CTRL_0_##member##_SHIFT) + +#define HINIC_CEQ_CTRL_0_CLEAR(val, member) \ + ((val) & (~(HINIC_CEQ_CTRL_0_##member##_MASK \ + << HINIC_CEQ_CTRL_0_##member##_SHIFT))) + +#define HINIC_CEQ_CTRL_1_LEN_SHIFT 0 +#define HINIC_CEQ_CTRL_1_PAGE_SIZE_SHIFT 28 + +#define HINIC_CEQ_CTRL_1_LEN_MASK 0x1FFFFF +#define HINIC_CEQ_CTRL_1_PAGE_SIZE_MASK 0xF + +#define HINIC_CEQ_CTRL_1_SET(val, member) \ + (((u32)(val) & HINIC_CEQ_CTRL_1_##member##_MASK) << \ + HINIC_CEQ_CTRL_1_##member##_SHIFT) + +#define HINIC_CEQ_CTRL_1_CLEAR(val, member) \ + ((val) & (~(HINIC_CEQ_CTRL_1_##member##_MASK \ + << HINIC_CEQ_CTRL_1_##member##_SHIFT))) + #define HINIC_EQ_ELEM_DESC_TYPE_SHIFT 0 #define HINIC_EQ_ELEM_DESC_SRC_SHIFT 7 #define HINIC_EQ_ELEM_DESC_SIZE_SHIFT 8 @@ -95,14 +130,17 @@ << HINIC_EQ_CI_##member##_SHIFT))) #define HINIC_MAX_AEQS 4 +#define HINIC_MAX_CEQS 32 #define HINIC_AEQE_SIZE 64 +#define HINIC_CEQE_SIZE 4 #define HINIC_AEQE_DESC_SIZE 4 #define HINIC_AEQE_DATA_SIZE \ (HINIC_AEQE_SIZE - HINIC_AEQE_DESC_SIZE) #define HINIC_DEFAULT_AEQ_LEN 64 +#define HINIC_DEFAULT_CEQ_LEN 1024 #define HINIC_EQ_PAGE_SIZE SZ_4K @@ -110,6 +148,7 @@ enum hinic_eq_type { HINIC_AEQ, + HINIC_CEQ, }; enum hinic_aeq_type { @@ -118,6 +157,12 @@ enum hinic_aeq_type { HINIC_MAX_AEQ_EVENTS, }; +enum hinic_ceq_type { + HINIC_CEQ_CMDQ = 3, + + HINIC_MAX_CEQ_EVENTS, +}; + enum hinic_eqe_state { HINIC_EQE_ENABLED = BIT(0), HINIC_EQE_RUNNING = BIT(1), @@ -154,6 +199,8 @@ struct hinic_eq { void **virt_addr; struct hinic_eq_work aeq_work; + + struct tasklet_struct ceq_tasklet; }; struct hinic_hw_event_cb { @@ -173,6 +220,21 @@ struct hinic_aeqs { struct workqueue_struct *workq; }; +struct hinic_ceq_cb { + void (*handler)(void *handle, u32 ceqe_data); + void *handle; + enum hinic_eqe_state ceqe_state; +}; + +struct hinic_ceqs { + struct hinic_hwif *hwif; + + struct hinic_eq ceq[HINIC_MAX_CEQS]; + int num_ceqs; + + struct hinic_ceq_cb ceq_cb[HINIC_MAX_CEQ_EVENTS]; +}; + void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, enum hinic_aeq_type event, void *handle, void (*hwe_handler)(void *handle, void *data, @@ -181,10 +243,23 @@ void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs, void hinic_aeq_unregister_hw_cb(struct hinic_aeqs *aeqs, enum hinic_aeq_type event); +void hinic_ceq_register_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event, void *handle, + void (*ceq_cb)(void *handle, u32 ceqe_data)); + +void hinic_ceq_unregister_cb(struct hinic_ceqs *ceqs, + enum hinic_ceq_type event); + int hinic_aeqs_init(struct hinic_aeqs *aeqs, struct hinic_hwif *hwif, int num_aeqs, u32 q_len, u32 page_size, struct msix_entry *msix_entries); void hinic_aeqs_free(struct hinic_aeqs *aeqs); +int hinic_ceqs_init(struct hinic_ceqs *ceqs, struct hinic_hwif *hwif, + int num_ceqs, u32 q_len, u32 page_size, + struct msix_entry *msix_entries); + +void hinic_ceqs_free(struct hinic_ceqs *ceqs); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c index bb4b93fe622a..8e5897669a3a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c @@ -25,6 +25,7 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" @@ -455,10 +456,18 @@ int hinic_io_init(struct hinic_func_to_io *func_to_io, func_to_io->qps = NULL; func_to_io->max_qps = max_qps; + err = hinic_ceqs_init(&func_to_io->ceqs, hwif, num_ceqs, + HINIC_DEFAULT_CEQ_LEN, HINIC_EQ_PAGE_SIZE, + ceq_msix_entries); + if (err) { + dev_err(&pdev->dev, "Failed to init CEQs\n"); + return err; + } + err = hinic_wqs_alloc(&func_to_io->wqs, 2 * max_qps, hwif); if (err) { dev_err(&pdev->dev, "Failed to allocate WQS for IO\n"); - return err; + goto err_wqs_alloc; } func_to_io->db_base = pci_ioremap_bar(pdev, HINIC_PCI_DB_BAR); @@ -499,6 +508,9 @@ err_db_area: err_db_ioremap: hinic_wqs_free(&func_to_io->wqs); + +err_wqs_alloc: + hinic_ceqs_free(&func_to_io->ceqs); return err; } @@ -517,4 +529,5 @@ void hinic_io_free(struct hinic_func_to_io *func_to_io) iounmap(func_to_io->db_base); hinic_wqs_free(&func_to_io->wqs); + hinic_ceqs_free(&func_to_io->ceqs); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h index 60d77b343fa7..cfc21baf0977 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -22,6 +22,7 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_eqs.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" #include "hinic_hw_qp.h" @@ -46,6 +47,8 @@ struct hinic_free_db_area { struct hinic_func_to_io { struct hinic_hwif *hwif; + struct hinic_ceqs ceqs; + struct hinic_wqs wqs; struct hinic_wq *sq_wq; -- cgit v1.2.3-55-g7522 From 76baca2e92f4ed478ef14d68da118484f134632d Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:02 +0800 Subject: net-next/hinic: Add cmdq commands Add cmdq commands for setting queue pair contexts in the nic. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_common.c | 25 ++ drivers/net/ethernet/huawei/hinic/hinic_common.h | 9 + drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 282 +++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 38 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_io.h | 10 + drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 194 +++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 12 + drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h | 115 +++++++++ 8 files changed, 683 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.c b/drivers/net/ethernet/huawei/hinic/hinic_common.c index 1915ad63deec..02c74fd8380e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_common.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_common.c @@ -13,6 +13,7 @@ * */ +#include #include #include @@ -53,3 +54,27 @@ void hinic_be32_to_cpu(void *data, int len) mem++; } } + +/** + * hinic_set_sge - set dma area in scatter gather entry + * @sge: scatter gather entry + * @addr: dma address + * @len: length of relevant data in the dma address + **/ +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len) +{ + sge->hi_addr = upper_32_bits(addr); + sge->lo_addr = lower_32_bits(addr); + sge->len = len; +} + +/** + * hinic_sge_to_dma - get dma address from scatter gather entry + * @sge: scatter gather entry + * + * Return dma address of sg entry + **/ +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge) +{ + return (dma_addr_t)((((u64)sge->hi_addr) << 32) | sge->lo_addr); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_common.h b/drivers/net/ethernet/huawei/hinic/hinic_common.h index 0f2f4ff70c97..2c06b76e94a1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_common.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_common.h @@ -16,6 +16,11 @@ #ifndef HINIC_COMMON_H #define HINIC_COMMON_H +#include + +#define UPPER_8_BITS(data) (((data) >> 8) & 0xFF) +#define LOWER_8_BITS(data) ((data) & 0xFF) + struct hinic_sge { u32 hi_addr; u32 lo_addr; @@ -26,4 +31,8 @@ void hinic_cpu_to_be32(void *data, int len); void hinic_be32_to_cpu(void *data, int len); +void hinic_set_sge(struct hinic_sge *sge, dma_addr_t addr, int len); + +dma_addr_t hinic_sge_to_dma(struct hinic_sge *sge); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index ec24b95747c1..07ce78745098 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -24,19 +24,34 @@ #include #include #include +#include +#include +#include #include +#include +#include "hinic_common.h" #include "hinic_hw_if.h" #include "hinic_hw_eqs.h" #include "hinic_hw_mgmt.h" +#include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" #include "hinic_hw_io.h" #include "hinic_hw_dev.h" +#define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) + +#define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi)) + +#define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) + +#define FIRST_DATA_TO_WRITE_LAST sizeof(u64) + #define CMDQ_DB_OFF SZ_2K #define CMDQ_WQEBB_SIZE 64 +#define CMDQ_WQE_SIZE 64 #define CMDQ_DEPTH SZ_4K #define CMDQ_WQ_PAGE_SIZE SZ_4K @@ -44,6 +59,10 @@ #define WQE_LCMD_SIZE 64 #define WQE_SCMD_SIZE 64 +#define COMPLETE_LEN 3 + +#define CMDQ_TIMEOUT 1000 + #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size))) #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ @@ -58,6 +77,40 @@ enum cmdq_wqe_type { WQE_SCMD_TYPE = 1, }; +enum completion_format { + COMPLETE_DIRECT = 0, + COMPLETE_SGE = 1, +}; + +enum data_format { + DATA_SGE = 0, + DATA_DIRECT = 1, +}; + +enum bufdesc_len { + BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */ + BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */ +}; + +enum ctrl_sect_len { + CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */ + CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */ +}; + +enum cmdq_scmd_type { + CMDQ_SET_ARM_CMD = 2, +}; + +enum cmdq_cmd_type { + CMDQ_CMD_SYNC_DIRECT_RESP = 0, + CMDQ_CMD_SYNC_SGE_RESP = 1, +}; + +enum completion_request { + NO_CEQ = 0, + CEQ_SET = 1, +}; + /** * hinic_alloc_cmdq_buf - alloc buffer for sending command * @cmdqs: the cmdqs @@ -92,6 +145,221 @@ void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); } +static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion, + struct hinic_cmdq_buf *buf_out) +{ + struct hinic_sge_resp *sge_resp = &completion->sge_resp; + + hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size); +} + +static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped, + enum hinic_cmd_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx, + enum completion_format complete_format, + enum data_format data_format, + enum bufdesc_len buf_len) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + enum ctrl_sect_len ctrl_len; + struct hinic_ctrl *ctrl; + u32 saved_data; + + if (data_format == DATA_SGE) { + wqe_lcmd = &wqe->wqe_lcmd; + + wqe_lcmd->status.status_info = 0; + ctrl = &wqe_lcmd->ctrl; + ctrl_len = CTRL_SECT_LEN; + } else { + wqe_scmd = &wqe->direct_wqe.wqe_scmd; + + wqe_scmd->status.status_info = 0; + ctrl = &wqe_scmd->ctrl; + ctrl_len = CTRL_DIRECT_SECT_LEN; + } + + ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) | + HINIC_CMDQ_CTRL_SET(cmd, CMD) | + HINIC_CMDQ_CTRL_SET(mod, MOD) | + HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE); + + CMDQ_WQE_HEADER(wqe)->header_info = + HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) | + HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) | + HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) | + HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) | + HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) | + HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) | + HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED); + + saved_data = CMDQ_WQE_HEADER(wqe)->saved_data; + saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM); + + if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM)) + CMDQ_WQE_HEADER(wqe)->saved_data |= + HINIC_SAVED_DATA_SET(1, ARM); + else + CMDQ_WQE_HEADER(wqe)->saved_data = saved_data; +} + +static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd, + struct hinic_cmdq_buf *buf_in) +{ + hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size); +} + +static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + struct hinic_cmdq_buf *buf_in, + struct hinic_cmdq_buf *buf_out, int wrapped, + enum hinic_cmd_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + enum completion_format complete_format; + + switch (cmd_type) { + case CMDQ_CMD_SYNC_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out); + break; + case CMDQ_CMD_SYNC_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_lcmd->completion.direct_resp = 0; + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, + prod_idx, complete_format, DATA_SGE, + BUFDESC_LCMD_LEN); + + cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); +} + +static void cmdq_wqe_fill(void *dst, void *src) +{ + memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST, + CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST); + + wmb(); /* The first 8 bytes should be written last */ + + *(u64 *)dst = *(u64 *)src; +} + +static void cmdq_fill_db(u32 *db_info, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) | + HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) | + HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) | + HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE); +} + +static void cmdq_set_db(struct hinic_cmdq *cmdq, + enum hinic_cmdq_type cmdq_type, u16 prod_idx) +{ + u32 db_info; + + cmdq_fill_db(&db_info, cmdq_type, prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + db_info = cpu_to_be32(db_info); + + wmb(); /* write all before the doorbell */ + + writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); +} + +static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, + enum hinic_mod_type mod, u8 cmd, + struct hinic_cmdq_buf *buf_in, + u64 *resp) +{ + struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; + u16 curr_prod_idx, next_prod_idx; + int errcode, wrapped, num_wqebbs; + struct hinic_wq *wq = cmdq->wq; + struct hinic_hw_wqe *hw_wqe; + struct completion done; + + /* Keep doorbell index correct. bh - for tasklet(ceq). */ + spin_lock_bh(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx); + if (IS_ERR(hw_wqe)) { + spin_unlock_bh(&cmdq->cmdq_lock); + return -EBUSY; + } + + curr_cmdq_wqe = &hw_wqe->cmdq_wqe; + + wrapped = cmdq->wrapped; + + num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq->errcode[curr_prod_idx] = &errcode; + + init_completion(&done); + cmdq->done[curr_prod_idx] = &done; + + cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL, + wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd, + curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE); + + /* CMDQ WQE is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock_bh(&cmdq->cmdq_lock); + + if (!wait_for_completion_timeout(&done, CMDQ_TIMEOUT)) { + spin_lock_bh(&cmdq->cmdq_lock); + + if (cmdq->errcode[curr_prod_idx] == &errcode) + cmdq->errcode[curr_prod_idx] = NULL; + + if (cmdq->done[curr_prod_idx] == &done) + cmdq->done[curr_prod_idx] = NULL; + + spin_unlock_bh(&cmdq->cmdq_lock); + + return -ETIMEDOUT; + } + + smp_rmb(); /* read error code after completion */ + + if (resp) { + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd; + + *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp); + } + + if (errcode != 0) + return -EFAULT; + + return 0; +} + +static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in) +{ + if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) + return -EINVAL; + + return 0; +} + /** * hinic_cmdq_direct_resp - send command with direct data as resp * @cmdqs: the cmdqs @@ -106,8 +374,18 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, enum hinic_mod_type mod, u8 cmd, struct hinic_cmdq_buf *buf_in, u64 *resp) { - /* should be implemented */ - return -EINVAL; + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + int err; + + err = cmdq_params_valid(buf_in); + if (err) { + dev_err(&pdev->dev, "Invalid CMDQ parameters\n"); + return err; + } + + return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC], + mod, cmd, buf_in, resp); } /** diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h index 5ec59f1b4b0a..e11a4f094997 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -58,14 +58,52 @@ ((val) & (~((u64)HINIC_CMDQ_CTXT_##member##_MASK \ << HINIC_CMDQ_CTXT_##member##_SHIFT))) +#define HINIC_SAVED_DATA_ARM_SHIFT 31 + +#define HINIC_SAVED_DATA_ARM_MASK 0x1 + +#define HINIC_SAVED_DATA_SET(val, member) \ + (((u32)(val) & HINIC_SAVED_DATA_##member##_MASK) \ + << HINIC_SAVED_DATA_##member##_SHIFT) + +#define HINIC_SAVED_DATA_GET(val, member) \ + (((val) >> HINIC_SAVED_DATA_##member##_SHIFT) \ + & HINIC_SAVED_DATA_##member##_MASK) + +#define HINIC_SAVED_DATA_CLEAR(val, member) \ + ((val) & (~(HINIC_SAVED_DATA_##member##_MASK \ + << HINIC_SAVED_DATA_##member##_SHIFT))) + +#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_SHIFT 0 +#define HINIC_CMDQ_DB_INFO_PATH_SHIFT 23 +#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_SHIFT 24 +#define HINIC_CMDQ_DB_INFO_DB_TYPE_SHIFT 27 + +#define HINIC_CMDQ_DB_INFO_HI_PROD_IDX_MASK 0xFF +#define HINIC_CMDQ_DB_INFO_PATH_MASK 0x1 +#define HINIC_CMDQ_DB_INFO_CMDQ_TYPE_MASK 0x7 +#define HINIC_CMDQ_DB_INFO_DB_TYPE_MASK 0x1F + +#define HINIC_CMDQ_DB_INFO_SET(val, member) \ + (((u32)(val) & HINIC_CMDQ_DB_INFO_##member##_MASK) \ + << HINIC_CMDQ_DB_INFO_##member##_SHIFT) + #define HINIC_CMDQ_BUF_SIZE 2048 +#define HINIC_CMDQ_BUF_HW_RSVD 8 +#define HINIC_CMDQ_MAX_DATA_SIZE (HINIC_CMDQ_BUF_SIZE - \ + HINIC_CMDQ_BUF_HW_RSVD) + enum hinic_cmdq_type { HINIC_CMDQ_SYNC, HINIC_MAX_CMDQ_TYPES, }; +enum hinic_cmd_ack_type { + HINIC_CMD_ACK_TYPE_CMDQ, +}; + struct hinic_cmdq_buf { void *buf; dma_addr_t dma_addr; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h index cfc21baf0977..adb64179d47d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h @@ -32,6 +32,16 @@ #define HINIC_DB_MAX_AREAS (HINIC_DB_SIZE / HINIC_DB_PAGE_SIZE) +enum hinic_db_type { + HINIC_DB_CMDQ_TYPE, + HINIC_DB_SQ_TYPE, +}; + +enum hinic_io_path { + HINIC_CTRL_PATH, + HINIC_DATA_PATH, +}; + struct hinic_free_db_area { int db_idx[HINIC_DB_MAX_AREAS]; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 638aab790bd6..6ceae958db28 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -23,9 +23,11 @@ #include #include #include +#include #include #include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_cmdq.h" @@ -72,6 +74,25 @@ ((void *)((cmdq_pages)->shadow_page_vaddr) \ + (wq)->block_idx * CMDQ_BLOCK_SIZE) +#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \ + (wq)->wqebb_size) + +#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \ + & ((wq)->num_q_pages - 1)) + +#define WQ_PAGE_ADDR(wq, idx) \ + ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) + +#define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) + +#define WQE_IN_RANGE(wqe, start, end) \ + (((unsigned long)(wqe) >= (unsigned long)(start)) && \ + ((unsigned long)(wqe) < (unsigned long)(end))) + +#define WQE_SHADOW_PAGE(wq, wqe) \ + (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ + / (wq)->max_wqe_size) + /** * queue_alloc_page - allocate page for Queue * @hwif: HW interface for allocating DMA @@ -670,3 +691,176 @@ void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages, cmdq_free_page(cmdq_pages); } + +static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 idx) +{ + void *wqebb_addr; + int i; + + for (i = 0; i < num_wqebbs; i++, idx++) { + idx = MASKED_WQE_IDX(wq, idx); + wqebb_addr = WQ_PAGE_ADDR(wq, idx) + + WQE_PAGE_OFF(wq, idx); + + memcpy(shadow_addr, wqebb_addr, wq->wqebb_size); + + shadow_addr += wq->wqebb_size; + } +} + +static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, + int num_wqebbs, u16 idx) +{ + void *wqebb_addr; + int i; + + for (i = 0; i < num_wqebbs; i++, idx++) { + idx = MASKED_WQE_IDX(wq, idx); + wqebb_addr = WQ_PAGE_ADDR(wq, idx) + + WQE_PAGE_OFF(wq, idx); + + memcpy(wqebb_addr, shadow_addr, wq->wqebb_size); + shadow_addr += wq->wqebb_size; + } +} + +/** + * hinic_get_wqe - get wqe ptr in the current pi and update the pi + * @wq: wq to get wqe from + * @wqe_size: wqe size + * @prod_idx: returned pi + * + * Return wqe pointer + **/ +struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *prod_idx) +{ + int curr_pg, end_pg, num_wqebbs; + u16 curr_prod_idx, end_prod_idx; + + *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx)); + + num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + + if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { + atomic_add(num_wqebbs, &wq->delta); + return ERR_PTR(-EBUSY); + } + + end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx); + + end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx); + curr_prod_idx = end_prod_idx - num_wqebbs; + curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); + + /* end prod index points to the next wqebb, therefore minus 1 */ + end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); + end_pg = WQE_PAGE_NUM(wq, end_prod_idx); + + *prod_idx = curr_prod_idx; + + if (curr_pg != end_pg) { + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); + + wq->shadow_idx[curr_pg] = *prod_idx; + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); +} + +/** + * hinic_put_wqe - return the wqe place to use for a new wqe + * @wq: wq to return wqe + * @wqe_size: wqe size + **/ +void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) +{ + int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + + atomic_add(num_wqebbs, &wq->cons_idx); + + atomic_add(num_wqebbs, &wq->delta); +} + +/** + * hinic_read_wqe - read wqe ptr in the current ci + * @wq: wq to get read from + * @wqe_size: wqe size + * @cons_idx: returned ci + * + * Return wqe pointer + **/ +struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *cons_idx) +{ + int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + u16 curr_cons_idx, end_cons_idx; + int curr_pg, end_pg; + + if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) + return ERR_PTR(-EBUSY); + + curr_cons_idx = atomic_read(&wq->cons_idx); + + curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); + end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); + + curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); + end_pg = WQE_PAGE_NUM(wq, end_cons_idx); + + *cons_idx = curr_cons_idx; + + if (curr_pg != end_pg) { + void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); + return shadow_addr; + } + + return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); +} + +/** + * wqe_shadow - check if a wqe is shadow + * @wq: wq of the wqe + * @wqe: the wqe for shadow checking + * + * Return true - shadow, false - Not shadow + **/ +static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe) +{ + size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; + + return WQE_IN_RANGE(wqe, wq->shadow_wqe, + &wq->shadow_wqe[wqe_shadow_size]); +} + +/** + * hinic_write_wqe - write the wqe to the wq + * @wq: wq to write wqe to + * @wqe: wqe to write + * @wqe_size: wqe size + **/ +void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, + unsigned int wqe_size) +{ + int curr_pg, num_wqebbs; + void *shadow_addr; + u16 prod_idx; + + if (wqe_shadow(wq, wqe)) { + curr_pg = WQE_SHADOW_PAGE(wq, wqe); + + prod_idx = wq->shadow_idx[curr_pg]; + num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; + + copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx); + } +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index a3c4469d0efe..f01477a2c165 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -21,6 +21,7 @@ #include #include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" struct hinic_free_block { int page_idx; @@ -100,4 +101,15 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); +struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *prod_idx); + +void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); + +struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, + u16 *cons_idx); + +void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, + unsigned int wqe_size); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h index d727c4dc9d6c..bc73485483c5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h @@ -18,6 +18,50 @@ #include "hinic_common.h" +#define HINIC_CMDQ_CTRL_PI_SHIFT 0 +#define HINIC_CMDQ_CTRL_CMD_SHIFT 16 +#define HINIC_CMDQ_CTRL_MOD_SHIFT 24 +#define HINIC_CMDQ_CTRL_ACK_TYPE_SHIFT 29 +#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_SHIFT 31 + +#define HINIC_CMDQ_CTRL_PI_MASK 0xFFFF +#define HINIC_CMDQ_CTRL_CMD_MASK 0xFF +#define HINIC_CMDQ_CTRL_MOD_MASK 0x1F +#define HINIC_CMDQ_CTRL_ACK_TYPE_MASK 0x3 +#define HINIC_CMDQ_CTRL_HW_BUSY_BIT_MASK 0x1 + +#define HINIC_CMDQ_CTRL_SET(val, member) \ + (((u32)(val) & HINIC_CMDQ_CTRL_##member##_MASK) \ + << HINIC_CMDQ_CTRL_##member##_SHIFT) + +#define HINIC_CMDQ_CTRL_GET(val, member) \ + (((val) >> HINIC_CMDQ_CTRL_##member##_SHIFT) \ + & HINIC_CMDQ_CTRL_##member##_MASK) + +#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_SHIFT 0 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_SHIFT 15 +#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_SHIFT 22 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_SHIFT 23 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_SHIFT 27 +#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_SHIFT 29 +#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_SHIFT 31 + +#define HINIC_CMDQ_WQE_HEADER_BUFDESC_LEN_MASK 0xFF +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_FMT_MASK 0x1 +#define HINIC_CMDQ_WQE_HEADER_DATA_FMT_MASK 0x1 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_REQ_MASK 0x1 +#define HINIC_CMDQ_WQE_HEADER_COMPLETE_SECT_LEN_MASK 0x3 +#define HINIC_CMDQ_WQE_HEADER_CTRL_LEN_MASK 0x3 +#define HINIC_CMDQ_WQE_HEADER_TOGGLED_WRAPPED_MASK 0x1 + +#define HINIC_CMDQ_WQE_HEADER_SET(val, member) \ + (((u32)(val) & HINIC_CMDQ_WQE_HEADER_##member##_MASK) \ + << HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) + +#define HINIC_CMDQ_WQE_HEADER_GET(val, member) \ + (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \ + & HINIC_CMDQ_WQE_HEADER_##member##_MASK) + #define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 #define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 #define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 @@ -143,6 +187,8 @@ sizeof(struct hinic_sq_task) + \ (nr_sges) * sizeof(struct hinic_sq_bufdesc)) +#define HINIC_SCMD_DATA_LEN 16 + #define HINIC_MAX_SQ_BUFDESCS 17 #define HINIC_SQ_WQE_MAX_SIZE 320 @@ -184,6 +230,74 @@ enum hinc_tunnel_l4type { HINIC_TUNNEL_L4TYPE_UNKNOWN = 0, }; +struct hinic_cmdq_header { + u32 header_info; + u32 saved_data; +}; + +struct hinic_status { + u32 status_info; +}; + +struct hinic_ctrl { + u32 ctrl_info; +}; + +struct hinic_sge_resp { + struct hinic_sge sge; + u32 rsvd; +}; + +struct hinic_cmdq_completion { + /* HW Format */ + union { + struct hinic_sge_resp sge_resp; + u64 direct_resp; + }; +}; + +struct hinic_scmd_bufdesc { + u32 buf_len; + u32 rsvd; + u8 data[HINIC_SCMD_DATA_LEN]; +}; + +struct hinic_lcmd_bufdesc { + struct hinic_sge sge; + u32 rsvd1; + u64 rsvd2; + u64 rsvd3; +}; + +struct hinic_cmdq_wqe_scmd { + struct hinic_cmdq_header header; + u64 rsvd; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_scmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_wqe_lcmd { + struct hinic_cmdq_header header; + struct hinic_status status; + struct hinic_ctrl ctrl; + struct hinic_cmdq_completion completion; + struct hinic_lcmd_bufdesc buf_desc; +}; + +struct hinic_cmdq_direct_wqe { + struct hinic_cmdq_wqe_scmd wqe_scmd; +}; + +struct hinic_cmdq_wqe { + /* HW Format */ + union { + struct hinic_cmdq_direct_wqe direct_wqe; + struct hinic_cmdq_wqe_lcmd wqe_lcmd; + }; +}; + struct hinic_sq_ctrl { u32 ctrl_info; u32 queue_info; @@ -245,6 +359,7 @@ struct hinic_rq_wqe { struct hinic_hw_wqe { /* HW Format */ union { + struct hinic_cmdq_wqe cmdq_wqe; struct hinic_sq_wqe sq_wqe; struct hinic_rq_wqe rq_wqe; }; -- cgit v1.2.3-55-g7522 From 7ef37fe4c1a156a394174bd1b5d849cef2b8b4fa Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:03 +0800 Subject: net-next/hinic: Add cmdq completion handler Add cmdq completion handler for getting a notification about the completion of cmdq commands. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c | 297 +++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h | 12 + 2 files changed, 308 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 07ce78745098..7d95f0866fb0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -40,12 +40,31 @@ #include "hinic_hw_io.h" #include "hinic_hw_dev.h" +#define CMDQ_CEQE_TYPE_SHIFT 0 + +#define CMDQ_CEQE_TYPE_MASK 0x7 + +#define CMDQ_CEQE_GET(val, member) \ + (((val) >> CMDQ_CEQE_##member##_SHIFT) \ + & CMDQ_CEQE_##member##_MASK) + +#define CMDQ_WQE_ERRCODE_VAL_SHIFT 20 + +#define CMDQ_WQE_ERRCODE_VAL_MASK 0xF + +#define CMDQ_WQE_ERRCODE_GET(val, member) \ + (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \ + & CMDQ_WQE_ERRCODE_##member##_MASK) + #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3) #define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi)) #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe)) +#define CMDQ_WQE_COMPLETED(ctrl_info) \ + HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT) + #define FIRST_DATA_TO_WRITE_LAST sizeof(u64) #define CMDQ_DB_OFF SZ_2K @@ -145,6 +164,22 @@ void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs, pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr); } +static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len) +{ + unsigned int wqe_size = 0; + + switch (len) { + case BUFDESC_LCMD_LEN: + wqe_size = WQE_LCMD_SIZE; + break; + case BUFDESC_SCMD_LEN: + wqe_size = WQE_SCMD_SIZE; + break; + } + + return wqe_size; +} + static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion, struct hinic_cmdq_buf *buf_out) { @@ -210,6 +245,15 @@ static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd, hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size); } +static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe, + void *buf_in, u32 in_size) +{ + struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd; + + wqe_scmd->buf_desc.buf_len = in_size; + memcpy(wqe_scmd->buf_desc.data, buf_in, in_size); +} + static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, enum cmdq_cmd_type cmd_type, struct hinic_cmdq_buf *buf_in, @@ -238,6 +282,36 @@ static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe, cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in); } +static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe, + enum cmdq_cmd_type cmd_type, + void *buf_in, u16 in_size, + struct hinic_cmdq_buf *buf_out, int wrapped, + enum hinic_cmd_ack_type ack_type, + enum hinic_mod_type mod, u8 cmd, u16 prod_idx) +{ + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; + enum completion_format complete_format; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + + wqe_scmd = &direct_wqe->wqe_scmd; + + switch (cmd_type) { + case CMDQ_CMD_SYNC_SGE_RESP: + complete_format = COMPLETE_SGE; + cmdq_set_sge_completion(&wqe_scmd->completion, buf_out); + break; + case CMDQ_CMD_SYNC_DIRECT_RESP: + complete_format = COMPLETE_DIRECT; + wqe_scmd->completion.direct_resp = 0; + break; + } + + cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx, + complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN); + + cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size); +} + static void cmdq_wqe_fill(void *dst, void *src) { memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST, @@ -352,6 +426,52 @@ static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, return 0; } +static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in, + u16 in_size) +{ + struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe; + u16 curr_prod_idx, next_prod_idx; + struct hinic_wq *wq = cmdq->wq; + struct hinic_hw_wqe *hw_wqe; + int wrapped, num_wqebbs; + + /* Keep doorbell index correct */ + spin_lock(&cmdq->cmdq_lock); + + /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/ + hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx); + if (IS_ERR(hw_wqe)) { + spin_unlock(&cmdq->cmdq_lock); + return -EBUSY; + } + + curr_cmdq_wqe = &hw_wqe->cmdq_wqe; + + wrapped = cmdq->wrapped; + + num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; + next_prod_idx = curr_prod_idx + num_wqebbs; + if (next_prod_idx >= wq->q_depth) { + cmdq->wrapped = !cmdq->wrapped; + next_prod_idx -= wq->q_depth; + } + + cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, + in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ, + HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx); + + /* The data that is written to HW should be in Big Endian Format */ + hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE); + + /* cmdq wqe is not shadow, therefore wqe will be written to wq */ + cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe); + + cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx); + + spin_unlock(&cmdq->cmdq_lock); + return 0; +} + static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in) { if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE) @@ -388,6 +508,139 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, mod, cmd, buf_in, resp); } +/** + * hinic_set_arm_bit - set arm bit for enable interrupt again + * @cmdqs: the cmdqs + * @q_type: type of queue to set the arm bit for + * @q_id: the queue number + * + * Return 0 - Success, negative - Failure + **/ +int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, + enum hinic_set_arm_qtype q_type, u32 q_id) +{ + struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC]; + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmdq_arm_bit arm_bit; + int err; + + arm_bit.q_type = q_type; + arm_bit.q_id = q_id; + + err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit)); + if (err) { + dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id); + return err; + } + + return 0; +} + +static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe) +{ + u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info); + unsigned int bufdesc_len, wqe_size; + struct hinic_ctrl *ctrl; + + bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN); + wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len); + if (wqe_size == WQE_LCMD_SIZE) { + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd; + + ctrl = &wqe_lcmd->ctrl; + } else { + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + + wqe_scmd = &direct_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + } + + /* clear HW busy bit */ + ctrl->ctrl_info = 0; + + wmb(); /* verify wqe is clear */ +} + +/** + * cmdq_arm_ceq_handler - cmdq completion event handler for arm command + * @cmdq: the cmdq of the arm command + * @wqe: the wqe of the arm command + * + * Return 0 - Success, negative - Failure + **/ +static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq, + struct hinic_cmdq_wqe *wqe) +{ + struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe; + struct hinic_cmdq_wqe_scmd *wqe_scmd; + struct hinic_ctrl *ctrl; + u32 ctrl_info; + + wqe_scmd = &direct_wqe->wqe_scmd; + ctrl = &wqe_scmd->ctrl; + ctrl_info = be32_to_cpu(ctrl->ctrl_info); + + /* HW should toggle the HW BUSY BIT */ + if (!CMDQ_WQE_COMPLETED(ctrl_info)) + return -EBUSY; + + clear_wqe_complete_bit(cmdq, wqe); + + hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE); + return 0; +} + +static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx, + int errcode) +{ + if (cmdq->errcode[prod_idx]) + *cmdq->errcode[prod_idx] = errcode; +} + +/** + * cmdq_arm_ceq_handler - cmdq completion event handler for sync command + * @cmdq: the cmdq of the command + * @cons_idx: the consumer index to update the error code for + * @errcode: the error code + **/ +static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx, + int errcode) +{ + u16 prod_idx = cons_idx; + + spin_lock(&cmdq->cmdq_lock); + cmdq_update_errcode(cmdq, prod_idx, errcode); + + wmb(); /* write all before update for the command request */ + + if (cmdq->done[prod_idx]) + complete(cmdq->done[prod_idx]); + spin_unlock(&cmdq->cmdq_lock); +} + +static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci, + struct hinic_cmdq_wqe *cmdq_wqe) +{ + struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd; + struct hinic_status *status = &wqe_lcmd->status; + struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl; + int errcode; + + if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info))) + return -EBUSY; + + errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL); + + cmdq_sync_cmd_handler(cmdq, ci, errcode); + + clear_wqe_complete_bit(cmdq, cmdq_wqe); + hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE); + return 0; +} + /** * cmdq_ceq_handler - cmdq completion event handler * @handle: private data for the handler(cmdqs) @@ -395,7 +648,49 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, **/ static void cmdq_ceq_handler(void *handle, u32 ceqe_data) { - /* should be implemented */ + enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE); + struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle; + struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type]; + struct hinic_cmdq_header *header; + struct hinic_hw_wqe *hw_wqe; + int err, set_arm = 0; + u32 saved_data; + u16 ci; + + /* Read the smallest wqe size for getting wqe size */ + while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) { + if (IS_ERR(hw_wqe)) + break; + + header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe); + saved_data = be32_to_cpu(header->saved_data); + + if (HINIC_SAVED_DATA_GET(saved_data, ARM)) { + /* arm_bit was set until here */ + set_arm = 0; + + if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe)) + break; + } else { + set_arm = 1; + + hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci); + if (IS_ERR(hw_wqe)) + break; + + if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe)) + break; + } + } + + if (set_arm) { + struct hinic_hwif *hwif = cmdqs->hwif; + struct pci_dev *pdev = hwif->pdev; + + err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type); + if (err) + dev_err(&pdev->dev, "Failed to set arm for CMDQ\n"); + } } /** diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h index e11a4f094997..b35583400cb6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h @@ -100,6 +100,10 @@ enum hinic_cmdq_type { HINIC_MAX_CMDQ_TYPES, }; +enum hinic_set_arm_qtype { + HINIC_SET_ARM_CMDQ, +}; + enum hinic_cmd_ack_type { HINIC_CMD_ACK_TYPE_CMDQ, }; @@ -110,6 +114,11 @@ struct hinic_cmdq_buf { size_t size; }; +struct hinic_cmdq_arm_bit { + u32 q_type; + u32 q_id; +}; + struct hinic_cmdq_ctxt_info { u64 curr_wqe_page_pfn; u64 wq_block_pfn; @@ -167,6 +176,9 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs, enum hinic_mod_type mod, u8 cmd, struct hinic_cmdq_buf *buf_in, u64 *out_param); +int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs, + enum hinic_set_arm_qtype q_type, u32 q_id); + int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, void __iomem **db_area); -- cgit v1.2.3-55-g7522 From e2585ea775380ec2b2b1bf9619a5a3a6d26aa72b Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:04 +0800 Subject: net-next/hinic: Add Rx handler Set the io resources in the nic and handle rx events by qp operations. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 1 + drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h | 1 + drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 361 +++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 77 ++++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.c | 36 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_if.h | 35 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h | 13 + drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 208 +++++++++++ drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 29 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 12 + drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 2 + drivers/net/ethernet/huawei/hinic/hinic_main.c | 27 ++ drivers/net/ethernet/huawei/hinic/hinic_port.c | 32 ++ drivers/net/ethernet/huawei/hinic/hinic_port.h | 19 + drivers/net/ethernet/huawei/hinic/hinic_rx.c | 418 ++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_rx.h | 7 + 16 files changed, 1278 insertions(+) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 5b8231dc3ff1..3d0f6cf2508f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -43,6 +43,7 @@ struct hinic_dev { struct hinic_hwdev *hwdev; u32 msg_enable; + unsigned int rx_weight; unsigned int flags; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h index 10b8c7b650dc..f39b184f674d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h @@ -20,6 +20,7 @@ #define HINIC_CSR_FUNC_ATTR0_ADDR 0x0 #define HINIC_CSR_FUNC_ATTR1_ADDR 0x4 +#define HINIC_CSR_FUNC_ATTR4_ADDR 0x10 #define HINIC_CSR_FUNC_ATTR5_ADDR 0x14 #define HINIC_DMA_ATTR_BASE 0xC80 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 2f698f1a89a3..77d43431e573 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -20,6 +20,9 @@ #include #include #include +#include +#include +#include #include #include "hinic_hw_if.h" @@ -30,6 +33,10 @@ #include "hinic_hw_io.h" #include "hinic_hw_dev.h" +#define IO_STATUS_TIMEOUT 100 +#define OUTBOUND_STATE_TIMEOUT 100 +#define DB_STATE_TIMEOUT 100 + #define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ (2 * (max_qps) + (num_aeqs) + (num_ceqs)) @@ -37,6 +44,15 @@ enum intr_type { INTR_MSIX_TYPE, }; +enum io_status { + IO_STOPPED = 0, + IO_RUNNING = 1, +}; + +enum hw_ioctxt_set_cmdq_depth { + HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT, +}; + /* HW struct */ struct hinic_dev_cap { u8 status; @@ -51,6 +67,31 @@ struct hinic_dev_cap { u8 rsvd3[208]; }; +struct rx_buf_sz { + int idx; + size_t sz; +}; + +static struct rx_buf_sz rx_buf_sz_table[] = { + {0, 32}, + {1, 64}, + {2, 96}, + {3, 128}, + {4, 192}, + {5, 256}, + {6, 384}, + {7, 512}, + {8, 768}, + {9, 1024}, + {10, 1536}, + {11, 2048}, + {12, 3072}, + {13, 4096}, + {14, 8192}, + {15, 16384}, + {-1, -1}, +}; + /** * get_capability - convert device capabilities to NIC capabilities * @hwdev: the HW device to set and convert device capabilities for @@ -235,6 +276,252 @@ int hinic_port_msg_cmd(struct hinic_hwdev *hwdev, enum hinic_port_cmd cmd, HINIC_MGMT_MSG_SYNC); } +/** + * init_fw_ctxt- Init Firmware tables before network mgmt and io operations + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +static int init_fw_ctxt(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_cmd_fw_ctxt fw_ctxt; + struct hinic_pfhwdev *pfhwdev; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + fw_ctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + fw_ctxt.rx_buf_sz = HINIC_RX_BUF_SZ; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_FWCTXT_INIT, + &fw_ctxt, sizeof(fw_ctxt), + &fw_ctxt, &out_size); + if (err || (out_size != sizeof(fw_ctxt)) || fw_ctxt.status) { + dev_err(&pdev->dev, "Failed to init FW ctxt, ret = %d\n", + fw_ctxt.status); + return -EFAULT; + } + + return 0; +} + +/** + * set_hw_ioctxt - set the shape of the IO queues in FW + * @hwdev: the NIC HW device + * @rq_depth: rq depth + * @sq_depth: sq depth + * + * Return 0 - Success, negative - Failure + **/ +static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth, + unsigned int sq_depth) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct hinic_cmd_hw_ioctxt hw_ioctxt; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + int i; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT; + hw_ioctxt.cmdq_depth = 0; + + hw_ioctxt.rq_depth = ilog2(rq_depth); + + for (i = 0; ; i++) { + if ((rx_buf_sz_table[i].sz == HINIC_RX_BUF_SZ) || + (rx_buf_sz_table[i].sz == -1)) { + hw_ioctxt.rx_buf_sz_idx = rx_buf_sz_table[i].idx; + break; + } + } + + if (hw_ioctxt.rx_buf_sz_idx == -1) + return -EINVAL; + + hw_ioctxt.sq_depth = ilog2(sq_depth); + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_HWCTXT_SET, + &hw_ioctxt, sizeof(hw_ioctxt), NULL, + NULL, HINIC_MGMT_MSG_SYNC); +} + +static int wait_for_outbound_state(struct hinic_hwdev *hwdev) +{ + enum hinic_outbound_state outbound_state; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + unsigned long end; + + end = jiffies + msecs_to_jiffies(OUTBOUND_STATE_TIMEOUT); + do { + outbound_state = hinic_outbound_state_get(hwif); + + if (outbound_state == HINIC_OUTBOUND_ENABLE) + return 0; + + msleep(20); + } while (time_before(jiffies, end)); + + dev_err(&pdev->dev, "Wait for OUTBOUND - Timeout\n"); + return -EFAULT; +} + +static int wait_for_db_state(struct hinic_hwdev *hwdev) +{ + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + enum hinic_db_state db_state; + unsigned long end; + + end = jiffies + msecs_to_jiffies(DB_STATE_TIMEOUT); + do { + db_state = hinic_db_state_get(hwif); + + if (db_state == HINIC_DB_ENABLE) + return 0; + + msleep(20); + } while (time_before(jiffies, end)); + + dev_err(&pdev->dev, "Wait for DB - Timeout\n"); + return -EFAULT; +} + +static int wait_for_io_stopped(struct hinic_hwdev *hwdev) +{ + struct hinic_cmd_io_status cmd_io_status; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + unsigned long end; + u16 out_size; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + cmd_io_status.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + end = jiffies + msecs_to_jiffies(IO_STATUS_TIMEOUT); + do { + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_IO_STATUS_GET, + &cmd_io_status, sizeof(cmd_io_status), + &cmd_io_status, &out_size, + HINIC_MGMT_MSG_SYNC); + if ((err) || (out_size != sizeof(cmd_io_status))) { + dev_err(&pdev->dev, "Failed to get IO status, ret = %d\n", + err); + return err; + } + + if (cmd_io_status.status == IO_STOPPED) { + dev_info(&pdev->dev, "IO stopped\n"); + return 0; + } + + msleep(20); + } while (time_before(jiffies, end)); + + dev_err(&pdev->dev, "Wait for IO stopped - Timeout\n"); + return -ETIMEDOUT; +} + +/** + * clear_io_resource - set the IO resources as not active in the NIC + * @hwdev: the NIC HW device + * + * Return 0 - Success, negative - Failure + **/ +static int clear_io_resources(struct hinic_hwdev *hwdev) +{ + struct hinic_cmd_clear_io_res cmd_clear_io_res; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + int err; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + err = wait_for_io_stopped(hwdev); + if (err) { + dev_err(&pdev->dev, "IO has not stopped yet\n"); + return err; + } + + cmd_clear_io_res.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, + HINIC_COMM_CMD_IO_RES_CLEAR, &cmd_clear_io_res, + sizeof(cmd_clear_io_res), NULL, NULL, + HINIC_MGMT_MSG_SYNC); + if (err) { + dev_err(&pdev->dev, "Failed to clear IO resources\n"); + return err; + } + + return 0; +} + +/** + * set_resources_state - set the state of the resources in the NIC + * @hwdev: the NIC HW device + * @state: the state to set + * + * Return 0 - Success, negative - Failure + **/ +static int set_resources_state(struct hinic_hwdev *hwdev, + enum hinic_res_state state) +{ + struct hinic_cmd_set_res_state res_state; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + res_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + res_state.state = state; + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, + HINIC_MOD_COMM, + HINIC_COMM_CMD_RES_STATE_SET, + &res_state, sizeof(res_state), NULL, + NULL, HINIC_MGMT_MSG_SYNC); +} + /** * get_base_qpn - get the first qp number * @hwdev: the NIC HW device @@ -312,8 +599,23 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev) goto err_create_qps; } + err = wait_for_db_state(hwdev); + if (err) { + dev_warn(&pdev->dev, "db - disabled, try again\n"); + hinic_db_state_set(hwif, HINIC_DB_ENABLE); + } + + err = set_hw_ioctxt(hwdev, HINIC_SQ_DEPTH, HINIC_RQ_DEPTH); + if (err) { + dev_err(&pdev->dev, "Failed to set HW IO ctxt\n"); + goto err_hw_ioctxt; + } + return 0; +err_hw_ioctxt: + hinic_io_destroy_qps(func_to_io, num_qps); + err_create_qps: hinic_io_free(func_to_io); return err; @@ -329,6 +631,8 @@ void hinic_hwdev_ifdown(struct hinic_hwdev *hwdev) struct hinic_func_to_io *func_to_io = &hwdev->func_to_io; struct hinic_cap *nic_cap = &hwdev->nic_cap; + clear_io_resources(hwdev); + hinic_io_destroy_qps(func_to_io, nic_cap->num_qps); hinic_io_free(func_to_io); } @@ -532,6 +836,12 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) goto err_init_msix; } + err = wait_for_outbound_state(hwdev); + if (err) { + dev_warn(&pdev->dev, "outbound - disabled, try again\n"); + hinic_outbound_state_set(hwif, HINIC_OUTBOUND_ENABLE); + } + num_aeqs = HINIC_HWIF_NUM_AEQS(hwif); err = hinic_aeqs_init(&hwdev->aeqs, hwif, num_aeqs, @@ -554,8 +864,22 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev) goto err_dev_cap; } + err = init_fw_ctxt(hwdev); + if (err) { + dev_err(&pdev->dev, "Failed to init function table\n"); + goto err_init_fw_ctxt; + } + + err = set_resources_state(hwdev, HINIC_RES_ACTIVE); + if (err) { + dev_err(&pdev->dev, "Failed to set resources state\n"); + goto err_resources_state; + } + return hwdev; +err_resources_state: +err_init_fw_ctxt: err_dev_cap: free_pfhwdev(pfhwdev); @@ -582,6 +906,8 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev) struct hinic_pfhwdev, hwdev); + set_resources_state(hwdev, HINIC_RES_CLEAN); + free_pfhwdev(pfhwdev); hinic_aeqs_free(&hwdev->aeqs); @@ -639,3 +965,38 @@ struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i) return &qp->rq; } + +/** + * hinic_hwdev_msix_cnt_set - clear message attribute counters for msix entry + * @hwdev: the NIC HW device + * @msix_index: msix_index + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index) +{ + return hinic_msix_attr_cnt_clear(hwdev->hwif, msix_index); +} + +/** + * hinic_hwdev_msix_set - set message attribute for msix entry + * @hwdev: the NIC HW device + * @msix_index: msix_index + * @pending_limit: the maximum pending interrupt events (unit 8) + * @coalesc_timer: coalesc period for interrupt (unit 8 us) + * @lli_timer: replenishing period for low latency credit (unit 8 us) + * @lli_credit_limit: maximum credits for low latency msix messages (unit 8) + * @resend_timer: maximum wait for resending msix (unit coalesc period) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer) +{ + return hinic_msix_attr_set(hwdev->hwif, msix_index, + pending_limit, coalesc_timer, + lli_timer_cfg, lli_credit_limit, + resend_timer); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 81c2c6e92898..e7277d19db58 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -74,6 +74,76 @@ enum hinic_cb_state { HINIC_CB_RUNNING = BIT(1), }; +enum hinic_res_state { + HINIC_RES_CLEAN = 0, + HINIC_RES_ACTIVE = 1, +}; + +struct hinic_cmd_fw_ctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rx_buf_sz; + + u32 rsvd1; +}; + +struct hinic_cmd_hw_ioctxt { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + + u16 rsvd1; + + u8 set_cmdq_depth; + u8 cmdq_depth; + + u8 rsvd2; + u8 rsvd3; + u8 rsvd4; + u8 rsvd5; + + u16 rq_depth; + u16 rx_buf_sz_idx; + u16 sq_depth; +}; + +struct hinic_cmd_io_status { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 rsvd1; + u8 rsvd2; + u32 io_status; +}; + +struct hinic_cmd_clear_io_res { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 rsvd1; + u8 rsvd2; +}; + +struct hinic_cmd_set_res_state { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u8 state; + u8 rsvd1; + u32 rsvd2; +}; + struct hinic_cmd_base_qpn { u8 status; u8 version; @@ -137,4 +207,11 @@ struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i); struct hinic_rq *hinic_hwdev_get_rq(struct hinic_hwdev *hwdev, int i); +int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index); + +int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, + u8 pending_limit, u8 coalesc_timer, + u8 lli_timer_cfg, u8 lli_credit_limit, + u8 resend_timer); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c index b340695bff8b..823a17061a97 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c @@ -132,6 +132,42 @@ void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action) hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR5_ADDR, attr5); } +enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_FA4_GET(attr4, OUTBOUND_STATE); +} + +void hinic_outbound_state_set(struct hinic_hwif *hwif, + enum hinic_outbound_state outbound_state) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + attr4 = HINIC_FA4_CLEAR(attr4, OUTBOUND_STATE); + attr4 |= HINIC_FA4_SET(outbound_state, OUTBOUND_STATE); + + hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); +} + +enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + return HINIC_FA4_GET(attr4, DB_STATE); +} + +void hinic_db_state_set(struct hinic_hwif *hwif, + enum hinic_db_state db_state) +{ + u32 attr4 = hinic_hwif_read_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR); + + attr4 = HINIC_FA4_CLEAR(attr4, DB_STATE); + attr4 |= HINIC_FA4_SET(db_state, DB_STATE); + + hinic_hwif_write_reg(hwif, HINIC_CSR_FUNC_ATTR4_ADDR, attr4); +} + /** * hwif_ready - test if the HW is ready for use * @hwif: the HW interface of a pci function device diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h index 8f5919540c19..5b4760c0e9f5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h @@ -73,6 +73,21 @@ #define HINIC_FA1_GET(val, member) \ (((val) >> HINIC_FA1_##member##_SHIFT) & HINIC_FA1_##member##_MASK) +#define HINIC_FA4_OUTBOUND_STATE_SHIFT 0 +#define HINIC_FA4_DB_STATE_SHIFT 1 + +#define HINIC_FA4_OUTBOUND_STATE_MASK 0x1 +#define HINIC_FA4_DB_STATE_MASK 0x1 + +#define HINIC_FA4_GET(val, member) \ + (((val) >> HINIC_FA4_##member##_SHIFT) & HINIC_FA4_##member##_MASK) + +#define HINIC_FA4_SET(val, member) \ + ((((u32)val) & HINIC_FA4_##member##_MASK) << HINIC_FA4_##member##_SHIFT) + +#define HINIC_FA4_CLEAR(val, member) \ + ((val) & (~(HINIC_FA4_##member##_MASK << HINIC_FA4_##member##_SHIFT))) + #define HINIC_FA5_PF_ACTION_SHIFT 0 #define HINIC_FA5_PF_ACTION_MASK 0xFFFF @@ -182,6 +197,16 @@ enum hinic_pf_action { HINIC_PF_MGMT_ACTIVE = 0x11, }; +enum hinic_outbound_state { + HINIC_OUTBOUND_ENABLE = 0, + HINIC_OUTBOUND_DISABLE = 1, +}; + +enum hinic_db_state { + HINIC_DB_ENABLE = 0, + HINIC_DB_DISABLE = 1, +}; + struct hinic_func_attr { u16 func_idx; u8 pf_idx; @@ -230,6 +255,16 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index); void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action); +enum hinic_outbound_state hinic_outbound_state_get(struct hinic_hwif *hwif); + +void hinic_outbound_state_set(struct hinic_hwif *hwif, + enum hinic_outbound_state outbound_state); + +enum hinic_db_state hinic_db_state_get(struct hinic_hwif *hwif); + +void hinic_db_state_set(struct hinic_hwif *hwif, + enum hinic_db_state db_state); + int hinic_init_hwif(struct hinic_hwif *hwif, struct pci_dev *pdev); void hinic_free_hwif(struct hinic_hwif *hwif); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h index 90116c2db819..320711e8dee6 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h @@ -69,8 +69,21 @@ enum hinic_cfg_cmd { }; enum hinic_comm_cmd { + HINIC_COMM_CMD_IO_STATUS_GET = 0x3, + HINIC_COMM_CMD_CMDQ_CTXT_SET = 0x10, HINIC_COMM_CMD_CMDQ_CTXT_GET = 0x11, + + HINIC_COMM_CMD_HWCTXT_SET = 0x12, + HINIC_COMM_CMD_HWCTXT_GET = 0x13, + + HINIC_COMM_CMD_SQ_HI_CI_SET = 0x14, + + HINIC_COMM_CMD_RES_STATE_SET = 0x24, + + HINIC_COMM_CMD_IO_RES_CLEAR = 0x29, + + HINIC_COMM_CMD_MAX = 0x32, }; enum hinic_mgmt_cb_state { diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index 13e0ff3533a4..6c5c6ea0ab49 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -22,10 +22,13 @@ #include #include #include +#include +#include #include #include "hinic_common.h" #include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" #include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" @@ -51,6 +54,13 @@ (max_sqs + (q_id)) * Q_CTXT_SIZE) #define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) +#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3) + +#define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) + +enum rq_completion_fmt { + RQ_COMPLETE_SGE = 1 +}; void hinic_qp_prepare_header(struct hinic_qp_ctxt_header *qp_ctxt_hdr, enum hinic_qp_ctxt_type ctxt_type, @@ -424,3 +434,201 @@ void hinic_clean_rq(struct hinic_rq *rq) free_rq_cqe(rq); free_rq_skb_arr(rq); } + +/** + * hinic_get_rq_free_wqebbs - return number of free wqebbs for use + * @rq: recv queue + * + * Return number of free wqebbs + **/ +int hinic_get_rq_free_wqebbs(struct hinic_rq *rq) +{ + struct hinic_wq *wq = rq->wq; + + return atomic_read(&wq->delta) - 1; +} + +/** + * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi + * @rq: rq to get wqe from + * @wqe_size: wqe size + * @prod_idx: returned pi + * + * Return wqe pointer + **/ +struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, + unsigned int wqe_size, u16 *prod_idx) +{ + struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size, + prod_idx); + + if (IS_ERR(hw_wqe)) + return NULL; + + return &hw_wqe->rq_wqe; +} + +/** + * hinic_rq_write_wqe - write the wqe to the rq + * @rq: recv queue + * @prod_idx: pi of the wqe + * @rq_wqe: the wqe to write + * @skb: skb to save + **/ +void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *rq_wqe, struct sk_buff *skb) +{ + struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)rq_wqe; + + rq->saved_skb[prod_idx] = skb; + + /* The data in the HW should be in Big Endian Format */ + hinic_cpu_to_be32(rq_wqe, sizeof(*rq_wqe)); + + hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe)); +} + +/** + * hinic_rq_read_wqe - read wqe ptr in the current ci and update the ci + * @rq: recv queue + * @wqe_size: the size of the wqe + * @skb: return saved skb + * @cons_idx: consumer index of the wqe + * + * Return wqe in ci position + **/ +struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, u16 *cons_idx) +{ + struct hinic_hw_wqe *hw_wqe; + struct hinic_rq_cqe *cqe; + int rx_done; + u32 status; + + hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx); + if (IS_ERR(hw_wqe)) + return NULL; + + cqe = rq->cqe[*cons_idx]; + + status = be32_to_cpu(cqe->status); + + rx_done = HINIC_RQ_CQE_STATUS_GET(status, RXDONE); + if (!rx_done) + return NULL; + + *skb = rq->saved_skb[*cons_idx]; + + return &hw_wqe->rq_wqe; +} + +/** + * hinic_rq_read_next_wqe - increment ci and read the wqe in ci position + * @rq: recv queue + * @wqe_size: the size of the wqe + * @skb: return saved skb + * @cons_idx: consumer index in the wq + * + * Return wqe in incremented ci position + **/ +struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, + u16 *cons_idx) +{ + struct hinic_wq *wq = rq->wq; + struct hinic_hw_wqe *hw_wqe; + unsigned int num_wqebbs; + + wqe_size = ALIGN(wqe_size, wq->wqebb_size); + num_wqebbs = wqe_size / wq->wqebb_size; + + *cons_idx = RQ_MASKED_IDX(rq, *cons_idx + num_wqebbs); + + *skb = rq->saved_skb[*cons_idx]; + + hw_wqe = hinic_read_wqe_direct(wq, *cons_idx); + + return &hw_wqe->rq_wqe; +} + +/** + * hinic_put_wqe - release the ci for new wqes + * @rq: recv queue + * @cons_idx: consumer index of the wqe + * @wqe_size: the size of the wqe + **/ +void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, + unsigned int wqe_size) +{ + struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; + u32 status = be32_to_cpu(cqe->status); + + status = HINIC_RQ_CQE_STATUS_CLEAR(status, RXDONE); + + /* Rx WQE size is 1 WQEBB, no wq shadow*/ + cqe->status = cpu_to_be32(status); + + wmb(); /* clear done flag */ + + hinic_put_wqe(rq->wq, wqe_size); +} + +/** + * hinic_rq_get_sge - get sge from the wqe + * @rq: recv queue + * @rq_wqe: wqe to get the sge from its buf address + * @cons_idx: consumer index + * @sge: returned sge + **/ +void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *rq_wqe, + u16 cons_idx, struct hinic_sge *sge) +{ + struct hinic_rq_cqe *cqe = rq->cqe[cons_idx]; + u32 len = be32_to_cpu(cqe->len); + + sge->hi_addr = be32_to_cpu(rq_wqe->buf_desc.hi_addr); + sge->lo_addr = be32_to_cpu(rq_wqe->buf_desc.lo_addr); + sge->len = HINIC_RQ_CQE_SGE_GET(len, LEN); +} + +/** + * hinic_rq_prepare_wqe - prepare wqe before insert to the queue + * @rq: recv queue + * @prod_idx: pi value + * @rq_wqe: the wqe + * @sge: sge for use by the wqe for recv buf address + **/ +void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *rq_wqe, struct hinic_sge *sge) +{ + struct hinic_rq_cqe_sect *cqe_sect = &rq_wqe->cqe_sect; + struct hinic_rq_bufdesc *buf_desc = &rq_wqe->buf_desc; + struct hinic_rq_cqe *cqe = rq->cqe[prod_idx]; + struct hinic_rq_ctrl *ctrl = &rq_wqe->ctrl; + dma_addr_t cqe_dma = rq->cqe_dma[prod_idx]; + + ctrl->ctrl_info = + HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*ctrl)), LEN) | + HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*cqe_sect)), + COMPLETE_LEN) | + HINIC_RQ_CTRL_SET(SIZE_8BYTES(sizeof(*buf_desc)), + BUFDESC_SECT_LEN) | + HINIC_RQ_CTRL_SET(RQ_COMPLETE_SGE, COMPLETE_FORMAT); + + hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe)); + + buf_desc->hi_addr = sge->hi_addr; + buf_desc->lo_addr = sge->lo_addr; +} + +/** + * hinic_rq_update - update pi of the rq + * @rq: recv queue + * @prod_idx: pi value + **/ +void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx) +{ + *rq->pi_virt_addr = cpu_to_be16(RQ_MASKED_IDX(rq, prod_idx + 1)); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index 56d1f8b9ca65..696f0df6559a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -21,6 +21,7 @@ #include #include +#include "hinic_common.h" #include "hinic_hw_if.h" #include "hinic_hw_wqe.h" #include "hinic_hw_wq.h" @@ -100,4 +101,32 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, void hinic_clean_rq(struct hinic_rq *rq); +int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); + +struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, + unsigned int wqe_size, u16 *prod_idx); + +void hinic_rq_write_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *wqe, struct sk_buff *skb); + +struct hinic_rq_wqe *hinic_rq_read_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, u16 *cons_idx); + +struct hinic_rq_wqe *hinic_rq_read_next_wqe(struct hinic_rq *rq, + unsigned int wqe_size, + struct sk_buff **skb, + u16 *cons_idx); + +void hinic_rq_put_wqe(struct hinic_rq *rq, u16 cons_idx, + unsigned int wqe_size); + +void hinic_rq_get_sge(struct hinic_rq *rq, struct hinic_rq_wqe *wqe, + u16 cons_idx, struct hinic_sge *sge); + +void hinic_rq_prepare_wqe(struct hinic_rq *rq, u16 prod_idx, + struct hinic_rq_wqe *wqe, struct hinic_sge *sge); + +void hinic_rq_update(struct hinic_rq *rq, u16 prod_idx); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 6ceae958db28..3e3181c089bd 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -826,6 +826,18 @@ struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); } +/** + * hinic_read_wqe_direct - read wqe directly from ci position + * @wq: wq + * @cons_idx: ci position + * + * Return wqe + **/ +struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx) +{ + return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx); +} + /** * wqe_shadow - check if a wqe is shadow * @wq: wq of the wqe diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index f01477a2c165..9c030a0f035e 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -109,6 +109,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, u16 *cons_idx); +struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx); + void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, unsigned int wqe_size); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 22d5b61b0426..53b13f8d0b8f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -42,6 +43,10 @@ MODULE_AUTHOR("Huawei Technologies CO., Ltd"); MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); MODULE_LICENSE("GPL"); +static unsigned int rx_weight = 64; +module_param(rx_weight, uint, 0644); +MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); + #define PCI_DEVICE_ID_HI1822_PF 0x1822 #define HINIC_WQ_NAME "hinic_dev" @@ -220,6 +225,13 @@ static int hinic_open(struct net_device *netdev) goto err_port_state; } + err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_ENABLE); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to set func port state\n"); + goto err_func_port_state; + } + /* Wait up to 3 sec between port enable to link state */ msleep(3000); @@ -250,6 +262,12 @@ static int hinic_open(struct net_device *netdev) err_port_link: up(&nic_dev->mgmt_lock); + ret = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); + if (ret) + netif_warn(nic_dev, drv, netdev, + "Failed to revert func port state\n"); + +err_func_port_state: ret = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); if (ret) netif_warn(nic_dev, drv, netdev, @@ -283,6 +301,14 @@ static int hinic_close(struct net_device *netdev) up(&nic_dev->mgmt_lock); + err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to set func port state\n"); + nic_dev->flags |= (flags & HINIC_INTF_UP); + return err; + } + err = hinic_port_set_state(nic_dev, HINIC_PORT_DISABLE); if (err) { netif_err(nic_dev, drv, netdev, "Failed to set port state\n"); @@ -664,6 +690,7 @@ static int nic_dev_init(struct pci_dev *pdev) nic_dev->flags = 0; nic_dev->txqs = NULL; nic_dev->rxqs = NULL; + nic_dev->rx_weight = rx_weight; sema_init(&nic_dev->mgmt_lock, 1); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 0dafede7169e..528ec6febd04 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -314,3 +314,35 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state) return 0; } + +/** + * hinic_port_set_func_state- set func device state + * @nic_dev: nic device + * @state: the state to set + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_set_func_state(struct hinic_dev *nic_dev, + enum hinic_func_port_state state) +{ + struct hinic_port_func_state_cmd func_state; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + func_state.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + func_state.state = state; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_FUNC_STATE, + &func_state, sizeof(func_state), + &func_state, &out_size); + if (err || (out_size != sizeof(func_state)) || func_state.status) { + dev_err(&pdev->dev, "Failed to set port func state, ret = %d\n", + func_state.status); + return -EFAULT; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 3a8da8eadb9b..17f9d7fc5a0a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -40,6 +40,11 @@ enum hinic_port_state { HINIC_PORT_ENABLE = 3, }; +enum hinic_func_port_state { + HINIC_FUNC_PORT_DISABLE = 0, + HINIC_FUNC_PORT_ENABLE = 2, +}; + struct hinic_port_mac_cmd { u8 status; u8 version; @@ -109,6 +114,17 @@ struct hinic_port_link_status { u8 rsvd2; }; +struct hinic_port_func_state_cmd { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 state; + u8 rsvd2[3]; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -131,4 +147,7 @@ int hinic_port_link_state(struct hinic_dev *nic_dev, int hinic_port_set_state(struct hinic_dev *nic_dev, enum hinic_port_state state); +int hinic_port_set_func_state(struct hinic_dev *nic_dev, + enum hinic_func_port_state state); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 3c79f65d44da..b1212e498f95 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -13,11 +13,35 @@ * */ +#include +#include +#include +#include +#include #include +#include #include +#include +#include +#include +#include +#include +#include +#include "hinic_common.h" +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" #include "hinic_hw_qp.h" +#include "hinic_hw_dev.h" #include "hinic_rx.h" +#include "hinic_dev.h" + +#define RX_IRQ_NO_PENDING 0 +#define RX_IRQ_NO_COALESC 0 +#define RX_IRQ_NO_LLI_TIMER 0 +#define RX_IRQ_NO_CREDIT 0 +#define RX_IRQ_NO_RESEND_TIMER 0 /** * hinic_rxq_clean_stats - Clean the statistics of specific queue @@ -45,6 +69,361 @@ static void rxq_stats_init(struct hinic_rxq *rxq) hinic_rxq_clean_stats(rxq); } +/** + * rx_alloc_skb - allocate skb and map it to dma address + * @rxq: rx queue + * @dma_addr: returned dma address for the skb + * + * Return skb + **/ +static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq, + dma_addr_t *dma_addr) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct sk_buff *skb; + dma_addr_t addr; + int err; + + skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz); + if (!skb) { + netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n"); + return NULL; + } + + addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz, + DMA_FROM_DEVICE); + err = dma_mapping_error(&pdev->dev, addr); + if (err) { + dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err); + goto err_rx_map; + } + + *dma_addr = addr; + return skb; + +err_rx_map: + dev_kfree_skb_any(skb); + return NULL; +} + +/** + * rx_unmap_skb - unmap the dma address of the skb + * @rxq: rx queue + * @dma_addr: dma address of the skb + **/ +static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + + dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz, + DMA_FROM_DEVICE); +} + +/** + * rx_free_skb - unmap and free skb + * @rxq: rx queue + * @skb: skb to free + * @dma_addr: dma address of the skb + **/ +static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb, + dma_addr_t dma_addr) +{ + rx_unmap_skb(rxq, dma_addr); + dev_kfree_skb_any(skb); +} + +/** + * rx_alloc_pkts - allocate pkts in rx queue + * @rxq: rx queue + * + * Return number of skbs allocated + **/ +static int rx_alloc_pkts(struct hinic_rxq *rxq) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_rq_wqe *rq_wqe; + unsigned int free_wqebbs; + struct hinic_sge sge; + dma_addr_t dma_addr; + struct sk_buff *skb; + int i, alloc_more; + u16 prod_idx; + + free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); + alloc_more = 0; + + /* Limit the allocation chunks */ + if (free_wqebbs > nic_dev->rx_weight) + free_wqebbs = nic_dev->rx_weight; + + for (i = 0; i < free_wqebbs; i++) { + skb = rx_alloc_skb(rxq, &dma_addr); + if (!skb) { + netdev_err(rxq->netdev, "Failed to alloc Rx skb\n"); + alloc_more = 1; + goto skb_out; + } + + hinic_set_sge(&sge, dma_addr, skb->len); + + rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, + &prod_idx); + if (!rq_wqe) { + rx_free_skb(rxq, skb, dma_addr); + alloc_more = 1; + goto skb_out; + } + + hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge); + + hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb); + } + +skb_out: + if (i) { + wmb(); /* write all the wqes before update PI */ + + hinic_rq_update(rxq->rq, prod_idx); + } + + if (alloc_more) + tasklet_schedule(&rxq->rx_task); + + return i; +} + +/** + * free_all_rx_skbs - free all skbs in rx queue + * @rxq: rx queue + **/ +static void free_all_rx_skbs(struct hinic_rxq *rxq) +{ + struct hinic_rq *rq = rxq->rq; + struct hinic_hw_wqe *hw_wqe; + struct hinic_sge sge; + u16 ci; + + while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { + if (IS_ERR(hw_wqe)) + break; + + hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge); + + hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); + + rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge)); + } +} + +/** + * rx_alloc_task - tasklet for queue allocation + * @data: rx queue + **/ +static void rx_alloc_task(unsigned long data) +{ + struct hinic_rxq *rxq = (struct hinic_rxq *)data; + + (void)rx_alloc_pkts(rxq); +} + +/** + * rx_recv_jumbo_pkt - Rx handler for jumbo pkt + * @rxq: rx queue + * @head_skb: the first skb in the list + * @left_pkt_len: left size of the pkt exclude head skb + * @ci: consumer index + * + * Return number of wqes that used for the left of the pkt + **/ +static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb, + unsigned int left_pkt_len, u16 ci) +{ + struct sk_buff *skb, *curr_skb = head_skb; + struct hinic_rq_wqe *rq_wqe; + unsigned int curr_len; + struct hinic_sge sge; + int num_wqes = 0; + + while (left_pkt_len > 0) { + rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, + &skb, &ci); + + num_wqes++; + + hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); + + rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + + prefetch(skb->data); + + curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ : + left_pkt_len; + + left_pkt_len -= curr_len; + + __skb_put(skb, curr_len); + + if (curr_skb == head_skb) + skb_shinfo(head_skb)->frag_list = skb; + else + curr_skb->next = skb; + + head_skb->len += skb->len; + head_skb->data_len += skb->len; + head_skb->truesize += skb->truesize; + + curr_skb = skb; + } + + return num_wqes; +} + +/** + * rxq_recv - Rx handler + * @rxq: rx queue + * @budget: maximum pkts to process + * + * Return number of pkts received + **/ +static int rxq_recv(struct hinic_rxq *rxq, int budget) +{ + struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); + u64 pkt_len = 0, rx_bytes = 0; + struct hinic_rq_wqe *rq_wqe; + int num_wqes, pkts = 0; + struct hinic_sge sge; + struct sk_buff *skb; + u16 ci; + + while (pkts < budget) { + num_wqes = 0; + + rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb, + &ci); + if (!rq_wqe) + break; + + hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge); + + rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + + prefetch(skb->data); + + pkt_len = sge.len; + + if (pkt_len <= HINIC_RX_BUF_SZ) { + __skb_put(skb, pkt_len); + } else { + __skb_put(skb, HINIC_RX_BUF_SZ); + num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len - + HINIC_RX_BUF_SZ, ci); + } + + hinic_rq_put_wqe(rxq->rq, ci, + (num_wqes + 1) * HINIC_RQ_WQE_SIZE); + + skb_record_rx_queue(skb, qp->q_id); + skb->protocol = eth_type_trans(skb, rxq->netdev); + + napi_gro_receive(&rxq->napi, skb); + + pkts++; + rx_bytes += pkt_len; + } + + if (pkts) + tasklet_schedule(&rxq->rx_task); /* hinic_rx_alloc_pkts */ + + u64_stats_update_begin(&rxq->rxq_stats.syncp); + rxq->rxq_stats.pkts += pkts; + rxq->rxq_stats.bytes += rx_bytes; + u64_stats_update_end(&rxq->rxq_stats.syncp); + + return pkts; +} + +static int rx_poll(struct napi_struct *napi, int budget) +{ + struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi); + struct hinic_rq *rq = rxq->rq; + int pkts; + + pkts = rxq_recv(rxq, budget); + if (pkts >= budget) + return budget; + + napi_complete(napi); + enable_irq(rq->irq); + return pkts; +} + +static void rx_add_napi(struct hinic_rxq *rxq) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + + netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight); + napi_enable(&rxq->napi); +} + +static void rx_del_napi(struct hinic_rxq *rxq) +{ + napi_disable(&rxq->napi); + netif_napi_del(&rxq->napi); +} + +static irqreturn_t rx_irq(int irq, void *data) +{ + struct hinic_rxq *rxq = (struct hinic_rxq *)data; + struct hinic_rq *rq = rxq->rq; + struct hinic_dev *nic_dev; + + /* Disable the interrupt until napi will be completed */ + disable_irq_nosync(rq->irq); + + nic_dev = netdev_priv(rxq->netdev); + hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry); + + napi_schedule(&rxq->napi); + return IRQ_HANDLED; +} + +static int rx_request_irq(struct hinic_rxq *rxq) +{ + struct hinic_dev *nic_dev = netdev_priv(rxq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_rq *rq = rxq->rq; + int err; + + rx_add_napi(rxq); + + hinic_hwdev_msix_set(hwdev, rq->msix_entry, + RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC, + RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT, + RX_IRQ_NO_RESEND_TIMER); + + err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); + if (err) { + rx_del_napi(rxq); + return err; + } + + return 0; +} + +static void rx_free_irq(struct hinic_rxq *rxq) +{ + struct hinic_rq *rq = rxq->rq; + + free_irq(rq->irq, rxq); + rx_del_napi(rxq); +} + /** * hinic_init_rxq - Initialize the Rx Queue * @rxq: Logical Rx Queue @@ -56,11 +435,43 @@ static void rxq_stats_init(struct hinic_rxq *rxq) int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, struct net_device *netdev) { + struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq); + int err, pkts, irqname_len; + rxq->netdev = netdev; rxq->rq = rq; rxq_stats_init(rxq); + + irqname_len = snprintf(NULL, 0, "hinic_rxq%d", qp->q_id) + 1; + rxq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); + if (!rxq->irq_name) + return -ENOMEM; + + sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id); + + tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq); + + pkts = rx_alloc_pkts(rxq); + if (!pkts) { + err = -ENOMEM; + goto err_rx_pkts; + } + + err = rx_request_irq(rxq); + if (err) { + netdev_err(netdev, "Failed to request Rx irq\n"); + goto err_req_rx_irq; + } + return 0; + +err_req_rx_irq: +err_rx_pkts: + tasklet_kill(&rxq->rx_task); + free_all_rx_skbs(rxq); + devm_kfree(&netdev->dev, rxq->irq_name); + return err; } /** @@ -69,4 +480,11 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, **/ void hinic_clean_rxq(struct hinic_rxq *rxq) { + struct net_device *netdev = rxq->netdev; + + rx_free_irq(rxq); + + tasklet_kill(&rxq->rx_task); + free_all_rx_skbs(rxq); + devm_kfree(&netdev->dev, rxq->irq_name); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h index fbd0246165dc..538c8861e8dd 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -19,6 +19,7 @@ #include #include #include +#include #include "hinic_hw_qp.h" @@ -34,6 +35,12 @@ struct hinic_rxq { struct hinic_rq *rq; struct hinic_rxq_stats rxq_stats; + + char *irq_name; + + struct tasklet_struct rx_task; + + struct napi_struct napi; }; void hinic_rxq_clean_stats(struct hinic_rxq *rxq); -- cgit v1.2.3-55-g7522 From 00e57a6d4ad345a3910cfd24a5403d49a70d7705 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:05 +0800 Subject: net-next/hinic: Add Tx operation Add transmit operation for sending data by qp operations. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 1 + drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c | 46 +++ drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 22 ++ drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 257 +++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 48 +++ drivers/net/ethernet/huawei/hinic/hinic_main.c | 12 +- drivers/net/ethernet/huawei/hinic/hinic_tx.c | 406 +++++++++++++++++++++++ drivers/net/ethernet/huawei/hinic/hinic_tx.h | 11 + 8 files changed, 799 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 3d0f6cf2508f..15d0c2e3797c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -43,6 +43,7 @@ struct hinic_dev { struct hinic_hwdev *hwdev; u32 msg_enable; + unsigned int tx_weight; unsigned int rx_weight; unsigned int flags; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 77d43431e573..09dec6de8dd5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -40,6 +40,8 @@ #define MAX_IRQS(max_qps, num_aeqs, num_ceqs) \ (2 * (max_qps) + (num_aeqs) + (num_ceqs)) +#define ADDR_IN_4BYTES(addr) ((addr) >> 2) + enum intr_type { INTR_MSIX_TYPE, }; @@ -1000,3 +1002,47 @@ int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, lli_timer_cfg, lli_credit_limit, resend_timer); } + +/** + * hinic_hwdev_hw_ci_addr_set - set cons idx addr and attributes in HW for sq + * @hwdev: the NIC HW device + * @sq: send queue + * @pending_limit: the maximum pending update ci events (unit 8) + * @coalesc_timer: coalesc period for update ci (unit 8 us) + * + * Return 0 - Success, negative - Failure + **/ +int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, + u8 pending_limit, u8 coalesc_timer) +{ + struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_pfhwdev *pfhwdev; + struct hinic_cmd_hw_ci hw_ci; + + if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) { + dev_err(&pdev->dev, "Unsupported PCI Function type\n"); + return -EINVAL; + } + + hw_ci.dma_attr_off = 0; + hw_ci.pending_limit = pending_limit; + hw_ci.coalesc_timer = coalesc_timer; + + hw_ci.msix_en = 1; + hw_ci.msix_entry_idx = sq->msix_entry; + + hw_ci.func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + hw_ci.sq_id = qp->q_id; + + hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr); + + pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev); + return hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, + HINIC_MOD_COMM, + HINIC_COMM_CMD_SQ_HI_CI_SET, + &hw_ci, sizeof(hw_ci), NULL, + NULL, HINIC_MGMT_MSG_SYNC); +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index e7277d19db58..0f5563f3b779 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -153,6 +153,25 @@ struct hinic_cmd_base_qpn { u16 qpn; }; +struct hinic_cmd_hw_ci { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + + u8 dma_attr_off; + u8 pending_limit; + u8 coalesc_timer; + + u8 msix_en; + u16 msix_entry_idx; + + u32 sq_id; + u32 rsvd1; + u64 ci_addr; +}; + struct hinic_hwdev { struct hinic_hwif *hwif; struct msix_entry *msix_entries; @@ -214,4 +233,7 @@ int hinic_hwdev_msix_set(struct hinic_hwdev *hwdev, u16 msix_index, u8 lli_timer_cfg, u8 lli_credit_limit, u8 resend_timer); +int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, + u8 pending_limit, u8 coalesc_timer); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index 6c5c6ea0ab49..b9db6d649743 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -32,6 +33,7 @@ #include "hinic_hw_wq.h" #include "hinic_hw_qp_ctxt.h" #include "hinic_hw_qp.h" +#include "hinic_hw_io.h" #define SQ_DB_OFF SZ_2K @@ -53,11 +55,27 @@ (((max_rqs) + (max_sqs)) * CTXT_RSVD + \ (max_sqs + (q_id)) * Q_CTXT_SIZE) -#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) -#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3) +#define SIZE_16BYTES(size) (ALIGN(size, 16) >> 4) +#define SIZE_8BYTES(size) (ALIGN(size, 8) >> 3) +#define SECT_SIZE_FROM_8BYTES(size) ((size) << 3) +#define SQ_DB_PI_HI_SHIFT 8 +#define SQ_DB_PI_HI(prod_idx) ((prod_idx) >> SQ_DB_PI_HI_SHIFT) + +#define SQ_DB_PI_LOW_MASK 0xFF +#define SQ_DB_PI_LOW(prod_idx) ((prod_idx) & SQ_DB_PI_LOW_MASK) + +#define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) + +#define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) +#define TX_MAX_MSS_DEFAULT 0x3E00 + +enum sq_wqe_type { + SQ_NORMAL_WQE = 0, +}; + enum rq_completion_fmt { RQ_COMPLETE_SGE = 1 }; @@ -435,6 +453,19 @@ void hinic_clean_rq(struct hinic_rq *rq) free_rq_skb_arr(rq); } +/** + * hinic_get_sq_free_wqebbs - return number of free wqebbs for use + * @sq: send queue + * + * Return number of free wqebbs + **/ +int hinic_get_sq_free_wqebbs(struct hinic_sq *sq) +{ + struct hinic_wq *wq = sq->wq; + + return atomic_read(&wq->delta) - 1; +} + /** * hinic_get_rq_free_wqebbs - return number of free wqebbs for use * @rq: recv queue @@ -448,6 +479,228 @@ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq) return atomic_read(&wq->delta) - 1; } +static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx, + int nr_descs) +{ + u32 ctrl_size, task_size, bufdesc_size; + + ctrl_size = SIZE_8BYTES(sizeof(struct hinic_sq_ctrl)); + task_size = SIZE_8BYTES(sizeof(struct hinic_sq_task)); + bufdesc_size = nr_descs * sizeof(struct hinic_sq_bufdesc); + bufdesc_size = SIZE_8BYTES(bufdesc_size); + + ctrl->ctrl_info = HINIC_SQ_CTRL_SET(bufdesc_size, BUFDESC_SECT_LEN) | + HINIC_SQ_CTRL_SET(task_size, TASKSECT_LEN) | + HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | + HINIC_SQ_CTRL_SET(ctrl_size, LEN); + + ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT, + QUEUE_INFO_MSS); +} + +static void sq_prepare_task(struct hinic_sq_task *task) +{ + task->pkt_info0 = + HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) | + HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) | + HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN, + INNER_L3TYPE) | + HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE, + VLAN_OFFLOAD) | + HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG); + + task->pkt_info1 = + HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) | + HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) | + HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN); + + task->pkt_info2 = + HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) | + HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) | + HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN, + TUNNEL_L4TYPE) | + HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN, + OUTER_L3TYPE); + + task->ufo_v6_identify = 0; + + task->pkt_info4 = HINIC_SQ_TASK_INFO4_SET(HINIC_L2TYPE_ETH, L2TYPE); + + task->zero_pad = 0; +} + +/** + * hinic_sq_prepare_wqe - prepare wqe before insert to the queue + * @sq: send queue + * @prod_idx: pi value + * @sq_wqe: wqe to prepare + * @sges: sges for use by the wqe for send for buf addresses + * @nr_sges: number of sges + **/ +void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, + int nr_sges) +{ + int i; + + sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges); + + sq_prepare_task(&sq_wqe->task); + + for (i = 0; i < nr_sges; i++) + sq_wqe->buf_descs[i].sge = sges[i]; +} + +/** + * sq_prepare_db - prepare doorbell to write + * @sq: send queue + * @prod_idx: pi value for the doorbell + * @cos: cos of the doorbell + * + * Return db value + **/ +static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos) +{ + struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); + u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx)); + + /* Data should be written to HW in Big Endian Format */ + return cpu_to_be32(HINIC_SQ_DB_INFO_SET(hi_prod_idx, PI_HI) | + HINIC_SQ_DB_INFO_SET(HINIC_DB_SQ_TYPE, TYPE) | + HINIC_SQ_DB_INFO_SET(HINIC_DATA_PATH, PATH) | + HINIC_SQ_DB_INFO_SET(cos, COS) | + HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); +} + +/** + * hinic_sq_write_db- write doorbell + * @sq: send queue + * @prod_idx: pi value for the doorbell + * @wqe_size: wqe size + * @cos: cos of the wqe + **/ +void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, + unsigned int cos) +{ + struct hinic_wq *wq = sq->wq; + + /* increment prod_idx to the next */ + prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + + wmb(); /* Write all before the doorbell */ + + writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx)); +} + +/** + * hinic_sq_get_wqe - get wqe ptr in the current pi and update the pi + * @sq: sq to get wqe from + * @wqe_size: wqe size + * @prod_idx: returned pi + * + * Return wqe pointer + **/ +struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, + unsigned int wqe_size, u16 *prod_idx) +{ + struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, + prod_idx); + + if (IS_ERR(hw_wqe)) + return NULL; + + return &hw_wqe->sq_wqe; +} + +/** + * hinic_sq_write_wqe - write the wqe to the sq + * @sq: send queue + * @prod_idx: pi of the wqe + * @sq_wqe: the wqe to write + * @skb: skb to save + * @wqe_size: the size of the wqe + **/ +void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *sq_wqe, + struct sk_buff *skb, unsigned int wqe_size) +{ + struct hinic_hw_wqe *hw_wqe = (struct hinic_hw_wqe *)sq_wqe; + + sq->saved_skb[prod_idx] = skb; + + /* The data in the HW should be in Big Endian Format */ + hinic_cpu_to_be32(sq_wqe, wqe_size); + + hinic_write_wqe(sq->wq, hw_wqe, wqe_size); +} + +/** + * hinic_sq_read_wqe - read wqe ptr in the current ci and update the ci + * @sq: send queue + * @skb: return skb that was saved + * @wqe_size: the size of the wqe + * @cons_idx: consumer index of the wqe + * + * Return wqe in ci position + **/ +struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, + struct sk_buff **skb, + unsigned int *wqe_size, u16 *cons_idx) +{ + struct hinic_hw_wqe *hw_wqe; + struct hinic_sq_wqe *sq_wqe; + struct hinic_sq_ctrl *ctrl; + unsigned int buf_sect_len; + u32 ctrl_info; + + /* read the ctrl section for getting wqe size */ + hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); + if (IS_ERR(hw_wqe)) + return NULL; + + sq_wqe = &hw_wqe->sq_wqe; + ctrl = &sq_wqe->ctrl; + ctrl_info = be32_to_cpu(ctrl->ctrl_info); + buf_sect_len = HINIC_SQ_CTRL_GET(ctrl_info, BUFDESC_SECT_LEN); + + *wqe_size = sizeof(*ctrl) + sizeof(sq_wqe->task); + *wqe_size += SECT_SIZE_FROM_8BYTES(buf_sect_len); + + *skb = sq->saved_skb[*cons_idx]; + + /* using the real wqe size to read wqe again */ + hw_wqe = hinic_read_wqe(sq->wq, *wqe_size, cons_idx); + + return &hw_wqe->sq_wqe; +} + +/** + * hinic_sq_put_wqe - release the ci for new wqes + * @sq: send queue + * @wqe_size: the size of the wqe + **/ +void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size) +{ + hinic_put_wqe(sq->wq, wqe_size); +} + +/** + * hinic_sq_get_sges - get sges from the wqe + * @sq_wqe: wqe to get the sges from its buffer addresses + * @sges: returned sges + * @nr_sges: number sges to return + **/ +void hinic_sq_get_sges(struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges, + int nr_sges) +{ + int i; + + for (i = 0; i < nr_sges && i < HINIC_MAX_SQ_BUFDESCS; i++) { + sges[i] = sq_wqe->buf_descs[i].sge; + hinic_be32_to_cpu(&sges[i], sizeof(sges[i])); + } +} + /** * hinic_rq_get_wqe - get wqe ptr in the current pi and update the pi * @rq: rq to get wqe from diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index 696f0df6559a..e642a8a8cb7f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h @@ -16,6 +16,7 @@ #ifndef HINIC_HW_QP_H #define HINIC_HW_QP_H +#include #include #include #include @@ -27,6 +28,22 @@ #include "hinic_hw_wq.h" #include "hinic_hw_qp_ctxt.h" +#define HINIC_SQ_DB_INFO_PI_HI_SHIFT 0 +#define HINIC_SQ_DB_INFO_QID_SHIFT 8 +#define HINIC_SQ_DB_INFO_PATH_SHIFT 23 +#define HINIC_SQ_DB_INFO_COS_SHIFT 24 +#define HINIC_SQ_DB_INFO_TYPE_SHIFT 27 + +#define HINIC_SQ_DB_INFO_PI_HI_MASK 0xFF +#define HINIC_SQ_DB_INFO_QID_MASK 0x3FF +#define HINIC_SQ_DB_INFO_PATH_MASK 0x1 +#define HINIC_SQ_DB_INFO_COS_MASK 0x7 +#define HINIC_SQ_DB_INFO_TYPE_MASK 0x1F + +#define HINIC_SQ_DB_INFO_SET(val, member) \ + (((u32)(val) & HINIC_SQ_DB_INFO_##member##_MASK) \ + << HINIC_SQ_DB_INFO_##member##_SHIFT) + #define HINIC_SQ_WQEBB_SIZE 64 #define HINIC_RQ_WQEBB_SIZE 32 @@ -38,6 +55,12 @@ #define HINIC_RX_BUF_SZ 2048 +#define HINIC_MIN_TX_WQE_SIZE(wq) \ + ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size) + +#define HINIC_MIN_TX_NUM_WQEBBS(sq) \ + (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) + struct hinic_sq { struct hinic_hwif *hwif; @@ -101,8 +124,33 @@ int hinic_init_rq(struct hinic_rq *rq, struct hinic_hwif *hwif, void hinic_clean_rq(struct hinic_rq *rq); +int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); + int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); +void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *wqe, struct hinic_sge *sges, + int nr_sges); + +void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, + unsigned int cos); + +struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, + unsigned int wqe_size, u16 *prod_idx); + +void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, + struct hinic_sq_wqe *wqe, struct sk_buff *skb, + unsigned int wqe_size); + +struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, + struct sk_buff **skb, + unsigned int *wqe_size, u16 *cons_idx); + +void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size); + +void hinic_sq_get_sges(struct hinic_sq_wqe *wqe, struct hinic_sge *sges, + int nr_sges); + struct hinic_rq_wqe *hinic_rq_get_wqe(struct hinic_rq *rq, unsigned int wqe_size, u16 *prod_idx); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 53b13f8d0b8f..599d8b590e9a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -43,6 +43,10 @@ MODULE_AUTHOR("Huawei Technologies CO., Ltd"); MODULE_DESCRIPTION("Huawei Intelligent NIC driver"); MODULE_LICENSE("GPL"); +static unsigned int tx_weight = 64; +module_param(tx_weight, uint, 0644); +MODULE_PARM_DESC(tx_weight, "Number Tx packets for NAPI budget (default=64)"); + static unsigned int rx_weight = 64; module_param(rx_weight, uint, 0644); MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); @@ -569,9 +573,11 @@ static void hinic_set_rx_mode(struct net_device *netdev) queue_work(nic_dev->workq, &rx_mode_work->work); } -netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +static void hinic_tx_timeout(struct net_device *netdev) { - return NETDEV_TX_BUSY; + struct hinic_dev *nic_dev = netdev_priv(netdev); + + netif_err(nic_dev, drv, netdev, "Tx timeout\n"); } static const struct net_device_ops hinic_netdev_ops = { @@ -584,6 +590,7 @@ static const struct net_device_ops hinic_netdev_ops = { .ndo_vlan_rx_kill_vid = hinic_vlan_rx_kill_vid, .ndo_set_rx_mode = hinic_set_rx_mode, .ndo_start_xmit = hinic_xmit_frame, + .ndo_tx_timeout = hinic_tx_timeout, /* more operations should be filled */ }; @@ -690,6 +697,7 @@ static int nic_dev_init(struct pci_dev *pdev) nic_dev->flags = 0; nic_dev->txqs = NULL; nic_dev->rxqs = NULL; + nic_dev->tx_weight = tx_weight; nic_dev->rx_weight = rx_weight; sema_init(&nic_dev->mgmt_lock, 1); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 9835912039c9..90ab2d971383 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -13,12 +13,42 @@ * */ +#include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hinic_common.h" +#include "hinic_hw_if.h" +#include "hinic_hw_wqe.h" +#include "hinic_hw_wq.h" #include "hinic_hw_qp.h" +#include "hinic_hw_dev.h" +#include "hinic_dev.h" #include "hinic_tx.h" +#define TX_IRQ_NO_PENDING 0 +#define TX_IRQ_NO_COALESC 0 +#define TX_IRQ_NO_LLI_TIMER 0 +#define TX_IRQ_NO_CREDIT 0 +#define TX_IRQ_NO_RESEND_TIMER 0 + +#define CI_UPDATE_NO_PENDING 0 +#define CI_UPDATE_NO_COALESC 0 + +#define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) + +#define MIN_SKB_LEN 64 + /** * hinic_txq_clean_stats - Clean the statistics of specific queue * @txq: Logical Tx Queue @@ -48,6 +78,321 @@ static void txq_stats_init(struct hinic_txq *txq) hinic_txq_clean_stats(txq); } +/** + * tx_map_skb - dma mapping for skb and return sges + * @nic_dev: nic device + * @skb: the skb + * @sges: returned sges + * + * Return 0 - Success, negative - Failure + **/ +static int tx_map_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, + struct hinic_sge *sges) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct skb_frag_struct *frag; + dma_addr_t dma_addr; + int i, j; + + dma_addr = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_addr)) { + dev_err(&pdev->dev, "Failed to map Tx skb data\n"); + return -EFAULT; + } + + hinic_set_sge(&sges[0], dma_addr, skb_headlen(skb)); + + for (i = 0 ; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + + dma_addr = skb_frag_dma_map(&pdev->dev, frag, 0, + skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, dma_addr)) { + dev_err(&pdev->dev, "Failed to map Tx skb frag\n"); + goto err_tx_map; + } + + hinic_set_sge(&sges[i + 1], dma_addr, skb_frag_size(frag)); + } + + return 0; + +err_tx_map: + for (j = 0; j < i; j++) + dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[j + 1]), + sges[j + 1].len, DMA_TO_DEVICE); + + dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, + DMA_TO_DEVICE); + return -EFAULT; +} + +/** + * tx_unmap_skb - unmap the dma address of the skb + * @nic_dev: nic device + * @skb: the skb + * @sges: the sges that are connected to the skb + **/ +static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, + struct hinic_sge *sges) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags ; i++) + dma_unmap_page(&pdev->dev, hinic_sge_to_dma(&sges[i + 1]), + sges[i + 1].len, DMA_TO_DEVICE); + + dma_unmap_single(&pdev->dev, hinic_sge_to_dma(&sges[0]), sges[0].len, + DMA_TO_DEVICE); +} + +netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct netdev_queue *netdev_txq; + int nr_sges, err = NETDEV_TX_OK; + struct hinic_sq_wqe *sq_wqe; + unsigned int wqe_size; + struct hinic_txq *txq; + struct hinic_qp *qp; + u16 prod_idx; + + txq = &nic_dev->txqs[skb->queue_mapping]; + qp = container_of(txq->sq, struct hinic_qp, sq); + + if (skb->len < MIN_SKB_LEN) { + if (skb_pad(skb, MIN_SKB_LEN - skb->len)) { + netdev_err(netdev, "Failed to pad skb\n"); + goto skb_error; + } + + skb->len = MIN_SKB_LEN; + } + + nr_sges = skb_shinfo(skb)->nr_frags + 1; + if (nr_sges > txq->max_sges) { + netdev_err(netdev, "Too many Tx sges\n"); + goto skb_error; + } + + err = tx_map_skb(nic_dev, skb, txq->sges); + if (err) + goto skb_error; + + wqe_size = HINIC_SQ_WQE_SIZE(nr_sges); + + sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); + if (!sq_wqe) { + tx_unmap_skb(nic_dev, skb, txq->sges); + + netif_stop_subqueue(netdev, qp->q_id); + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_busy++; + u64_stats_update_end(&txq->txq_stats.syncp); + err = NETDEV_TX_BUSY; + goto flush_skbs; + } + + hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); + + hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); + +flush_skbs: + netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); + if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) + hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); + + return err; + +skb_error: + dev_kfree_skb_any(skb); + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_dropped++; + u64_stats_update_end(&txq->txq_stats.syncp); + return err; +} + +/** + * tx_free_skb - unmap and free skb + * @nic_dev: nic device + * @skb: the skb + * @sges: the sges that are connected to the skb + **/ +static void tx_free_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, + struct hinic_sge *sges) +{ + tx_unmap_skb(nic_dev, skb, sges); + + dev_kfree_skb_any(skb); +} + +/** + * free_all_rx_skbs - free all skbs in tx queue + * @txq: tx queue + **/ +static void free_all_tx_skbs(struct hinic_txq *txq) +{ + struct hinic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic_sq *sq = txq->sq; + struct hinic_sq_wqe *sq_wqe; + unsigned int wqe_size; + struct sk_buff *skb; + int nr_sges; + u16 ci; + + while ((sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &ci))) { + nr_sges = skb_shinfo(skb)->nr_frags + 1; + + hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + + hinic_sq_put_wqe(sq, wqe_size); + + tx_free_skb(nic_dev, skb, txq->free_sges); + } +} + +/** + * free_tx_poll - free finished tx skbs in tx queue that connected to napi + * @napi: napi + * @budget: number of tx + * + * Return 0 - Success, negative - Failure + **/ +static int free_tx_poll(struct napi_struct *napi, int budget) +{ + struct hinic_txq *txq = container_of(napi, struct hinic_txq, napi); + struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); + struct hinic_dev *nic_dev = netdev_priv(txq->netdev); + struct netdev_queue *netdev_txq; + struct hinic_sq *sq = txq->sq; + struct hinic_wq *wq = sq->wq; + struct hinic_sq_wqe *sq_wqe; + unsigned int wqe_size; + int nr_sges, pkts = 0; + struct sk_buff *skb; + u64 tx_bytes = 0; + u16 hw_ci, sw_ci; + + do { + hw_ci = HW_CONS_IDX(sq) & wq->mask; + + sq_wqe = hinic_sq_read_wqe(sq, &skb, &wqe_size, &sw_ci); + if ((!sq_wqe) || + (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) + break; + + tx_bytes += skb->len; + pkts++; + + nr_sges = skb_shinfo(skb)->nr_frags + 1; + + hinic_sq_get_sges(sq_wqe, txq->free_sges, nr_sges); + + hinic_sq_put_wqe(sq, wqe_size); + + tx_free_skb(nic_dev, skb, txq->free_sges); + } while (pkts < budget); + + if (__netif_subqueue_stopped(nic_dev->netdev, qp->q_id) && + hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { + netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); + + __netif_tx_lock(netdev_txq, smp_processor_id()); + + netif_wake_subqueue(nic_dev->netdev, qp->q_id); + + __netif_tx_unlock(netdev_txq); + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.tx_wake++; + u64_stats_update_end(&txq->txq_stats.syncp); + } + + u64_stats_update_begin(&txq->txq_stats.syncp); + txq->txq_stats.bytes += tx_bytes; + txq->txq_stats.pkts += pkts; + u64_stats_update_end(&txq->txq_stats.syncp); + + if (pkts < budget) { + napi_complete(napi); + enable_irq(sq->irq); + return pkts; + } + + return budget; +} + +static void tx_napi_add(struct hinic_txq *txq, int weight) +{ + netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); + napi_enable(&txq->napi); +} + +static void tx_napi_del(struct hinic_txq *txq) +{ + napi_disable(&txq->napi); + netif_napi_del(&txq->napi); +} + +static irqreturn_t tx_irq(int irq, void *data) +{ + struct hinic_txq *txq = data; + struct hinic_dev *nic_dev; + + nic_dev = netdev_priv(txq->netdev); + + /* Disable the interrupt until napi will be completed */ + disable_irq_nosync(txq->sq->irq); + + hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); + + napi_schedule(&txq->napi); + return IRQ_HANDLED; +} + +static int tx_request_irq(struct hinic_txq *txq) +{ + struct hinic_dev *nic_dev = netdev_priv(txq->netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + struct hinic_sq *sq = txq->sq; + int err; + + tx_napi_add(txq, nic_dev->tx_weight); + + hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, + TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, + TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT, + TX_IRQ_NO_RESEND_TIMER); + + err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); + if (err) { + dev_err(&pdev->dev, "Failed to request Tx irq\n"); + tx_napi_del(txq); + return err; + } + + return 0; +} + +static void tx_free_irq(struct hinic_txq *txq) +{ + struct hinic_sq *sq = txq->sq; + + free_irq(sq->irq, txq); + tx_napi_del(txq); +} + /** * hinic_init_txq - Initialize the Tx Queue * @txq: Logical Tx Queue @@ -59,11 +404,63 @@ static void txq_stats_init(struct hinic_txq *txq) int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct net_device *netdev) { + struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + int err, irqname_len; + size_t sges_size; + txq->netdev = netdev; txq->sq = sq; txq_stats_init(txq); + + txq->max_sges = HINIC_MAX_SQ_BUFDESCS; + + sges_size = txq->max_sges * sizeof(*txq->sges); + txq->sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + if (!txq->sges) + return -ENOMEM; + + sges_size = txq->max_sges * sizeof(*txq->free_sges); + txq->free_sges = devm_kzalloc(&netdev->dev, sges_size, GFP_KERNEL); + if (!txq->free_sges) { + err = -ENOMEM; + goto err_alloc_free_sges; + } + + irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1; + txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL); + if (!txq->irq_name) { + err = -ENOMEM; + goto err_alloc_irqname; + } + + sprintf(txq->irq_name, "hinic_txq%d", qp->q_id); + + err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, + CI_UPDATE_NO_COALESC); + if (err) + goto err_hw_ci; + + err = tx_request_irq(txq); + if (err) { + netdev_err(netdev, "Failed to request Tx irq\n"); + goto err_req_tx_irq; + } + return 0; + +err_req_tx_irq: +err_hw_ci: + devm_kfree(&netdev->dev, txq->irq_name); + +err_alloc_irqname: + devm_kfree(&netdev->dev, txq->free_sges); + +err_alloc_free_sges: + devm_kfree(&netdev->dev, txq->sges); + return err; } /** @@ -72,4 +469,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, **/ void hinic_clean_txq(struct hinic_txq *txq) { + struct net_device *netdev = txq->netdev; + + tx_free_irq(txq); + + free_all_tx_skbs(txq); + + devm_kfree(&netdev->dev, txq->irq_name); + devm_kfree(&netdev->dev, txq->free_sges); + devm_kfree(&netdev->dev, txq->sges); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h index bbdb4b62d940..7123c7f7e06a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -18,8 +18,10 @@ #include #include +#include #include +#include "hinic_common.h" #include "hinic_hw_qp.h" struct hinic_txq_stats { @@ -37,10 +39,19 @@ struct hinic_txq { struct hinic_sq *sq; struct hinic_txq_stats txq_stats; + + int max_sges; + struct hinic_sge *sges; + struct hinic_sge *free_sges; + + char *irq_name; + struct napi_struct napi; }; void hinic_txq_clean_stats(struct hinic_txq *txq); +netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); + int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, struct net_device *netdev); -- cgit v1.2.3-55-g7522 From edd384f682cc2981420628b769a1929db680f02f Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:06 +0800 Subject: net-next/hinic: Add ethtool and stats Add ethtool operations and statistics operations. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_dev.h | 3 + drivers/net/ethernet/huawei/hinic/hinic_main.c | 218 ++++++++++++++++++++++++- drivers/net/ethernet/huawei/hinic/hinic_port.c | 31 ++++ drivers/net/ethernet/huawei/hinic/hinic_port.h | 45 +++++ drivers/net/ethernet/huawei/hinic/hinic_rx.c | 19 +++ drivers/net/ethernet/huawei/hinic/hinic_rx.h | 2 + drivers/net/ethernet/huawei/hinic/hinic_tx.c | 22 +++ drivers/net/ethernet/huawei/hinic/hinic_tx.h | 2 + 8 files changed, 341 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h index 15d0c2e3797c..5186cc9023aa 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h @@ -56,6 +56,9 @@ struct hinic_dev { struct hinic_txq *txqs; struct hinic_rxq *rxqs; + + struct hinic_txq_stats tx_stats; + struct hinic_rxq_stats rx_stats; }; #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 599d8b590e9a..a417ca2d441c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -69,6 +69,186 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)"); static int change_mac_addr(struct net_device *netdev, const u8 *addr); +static void set_link_speed(struct ethtool_link_ksettings *link_ksettings, + enum hinic_speed speed) +{ + switch (speed) { + case HINIC_SPEED_10MB_LINK: + link_ksettings->base.speed = SPEED_10; + break; + + case HINIC_SPEED_100MB_LINK: + link_ksettings->base.speed = SPEED_100; + break; + + case HINIC_SPEED_1000MB_LINK: + link_ksettings->base.speed = SPEED_1000; + break; + + case HINIC_SPEED_10GB_LINK: + link_ksettings->base.speed = SPEED_10000; + break; + + case HINIC_SPEED_25GB_LINK: + link_ksettings->base.speed = SPEED_25000; + break; + + case HINIC_SPEED_40GB_LINK: + link_ksettings->base.speed = SPEED_40000; + break; + + case HINIC_SPEED_100GB_LINK: + link_ksettings->base.speed = SPEED_100000; + break; + + default: + link_ksettings->base.speed = SPEED_UNKNOWN; + break; + } +} + +static int hinic_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings + *link_ksettings) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + enum hinic_port_link_state link_state; + struct hinic_port_cap port_cap; + int err; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Autoneg); + + link_ksettings->base.speed = SPEED_UNKNOWN; + link_ksettings->base.autoneg = AUTONEG_DISABLE; + link_ksettings->base.duplex = DUPLEX_UNKNOWN; + + err = hinic_port_get_cap(nic_dev, &port_cap); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to get port capabilities\n"); + return err; + } + + err = hinic_port_link_state(nic_dev, &link_state); + if (err) { + netif_err(nic_dev, drv, netdev, + "Failed to get port link state\n"); + return err; + } + + if (link_state != HINIC_LINK_STATE_UP) { + netif_info(nic_dev, drv, netdev, "No link\n"); + return err; + } + + set_link_speed(link_ksettings, port_cap.speed); + + if (!!(port_cap.autoneg_cap & HINIC_AUTONEG_SUPPORTED)) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); + + if (port_cap.autoneg_state == HINIC_AUTONEG_ACTIVE) + link_ksettings->base.autoneg = AUTONEG_ENABLE; + + link_ksettings->base.duplex = (port_cap.duplex == HINIC_DUPLEX_FULL) ? + DUPLEX_FULL : DUPLEX_HALF; + return 0; +} + +static void hinic_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + + strlcpy(info->driver, HINIC_DRV_NAME, sizeof(info->driver)); + strlcpy(info->bus_info, pci_name(hwif->pdev), sizeof(info->bus_info)); +} + +static void hinic_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + ring->rx_max_pending = HINIC_RQ_DEPTH; + ring->tx_max_pending = HINIC_SQ_DEPTH; + ring->rx_pending = HINIC_RQ_DEPTH; + ring->tx_pending = HINIC_SQ_DEPTH; +} + +static void hinic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_hwdev *hwdev = nic_dev->hwdev; + + channels->max_rx = hwdev->nic_cap.max_qps; + channels->max_tx = hwdev->nic_cap.max_qps; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = hinic_hwdev_num_qps(hwdev); + channels->tx_count = hinic_hwdev_num_qps(hwdev); + channels->other_count = 0; + channels->combined_count = 0; +} + +static const struct ethtool_ops hinic_ethtool_ops = { + .get_link_ksettings = hinic_get_link_ksettings, + .get_drvinfo = hinic_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = hinic_get_ringparam, + .get_channels = hinic_get_channels, +}; + +static void update_rx_stats(struct hinic_dev *nic_dev, struct hinic_rxq *rxq) +{ + struct hinic_rxq_stats *nic_rx_stats = &nic_dev->rx_stats; + struct hinic_rxq_stats rx_stats; + + u64_stats_init(&rx_stats.syncp); + + hinic_rxq_get_stats(rxq, &rx_stats); + + u64_stats_update_begin(&nic_rx_stats->syncp); + nic_rx_stats->bytes += rx_stats.bytes; + nic_rx_stats->pkts += rx_stats.pkts; + u64_stats_update_end(&nic_rx_stats->syncp); + + hinic_rxq_clean_stats(rxq); +} + +static void update_tx_stats(struct hinic_dev *nic_dev, struct hinic_txq *txq) +{ + struct hinic_txq_stats *nic_tx_stats = &nic_dev->tx_stats; + struct hinic_txq_stats tx_stats; + + u64_stats_init(&tx_stats.syncp); + + hinic_txq_get_stats(txq, &tx_stats); + + u64_stats_update_begin(&nic_tx_stats->syncp); + nic_tx_stats->bytes += tx_stats.bytes; + nic_tx_stats->pkts += tx_stats.pkts; + nic_tx_stats->tx_busy += tx_stats.tx_busy; + nic_tx_stats->tx_wake += tx_stats.tx_wake; + nic_tx_stats->tx_dropped += tx_stats.tx_dropped; + u64_stats_update_end(&nic_tx_stats->syncp); + + hinic_txq_clean_stats(txq); +} + +static void update_nic_stats(struct hinic_dev *nic_dev) +{ + int i, num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); + + for (i = 0; i < num_qps; i++) + update_rx_stats(nic_dev, &nic_dev->rxqs[i]); + + for (i = 0; i < num_qps; i++) + update_tx_stats(nic_dev, &nic_dev->txqs[i]); +} + /** * create_txqs - Create the Logical Tx Queues of specific NIC device * @nic_dev: the specific NIC device @@ -303,6 +483,8 @@ static int hinic_close(struct net_device *netdev) netif_carrier_off(netdev); netif_tx_disable(netdev); + update_nic_stats(nic_dev); + up(&nic_dev->mgmt_lock); err = hinic_port_set_func_state(nic_dev, HINIC_FUNC_PORT_DISABLE); @@ -580,6 +762,31 @@ static void hinic_tx_timeout(struct net_device *netdev) netif_err(nic_dev, drv, netdev, "Tx timeout\n"); } +static void hinic_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + struct hinic_rxq_stats *nic_rx_stats; + struct hinic_txq_stats *nic_tx_stats; + + nic_rx_stats = &nic_dev->rx_stats; + nic_tx_stats = &nic_dev->tx_stats; + + down(&nic_dev->mgmt_lock); + + if (nic_dev->flags & HINIC_INTF_UP) + update_nic_stats(nic_dev); + + up(&nic_dev->mgmt_lock); + + stats->rx_bytes = nic_rx_stats->bytes; + stats->rx_packets = nic_rx_stats->pkts; + + stats->tx_bytes = nic_tx_stats->bytes; + stats->tx_packets = nic_tx_stats->pkts; + stats->tx_errors = nic_tx_stats->tx_dropped; +} + static const struct net_device_ops hinic_netdev_ops = { .ndo_open = hinic_open, .ndo_stop = hinic_close, @@ -591,7 +798,7 @@ static const struct net_device_ops hinic_netdev_ops = { .ndo_set_rx_mode = hinic_set_rx_mode, .ndo_start_xmit = hinic_xmit_frame, .ndo_tx_timeout = hinic_tx_timeout, - /* more operations should be filled */ + .ndo_get_stats64 = hinic_get_stats64, }; static void netdev_features_init(struct net_device *netdev) @@ -663,6 +870,8 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, static int nic_dev_init(struct pci_dev *pdev) { struct hinic_rx_mode_work *rx_mode_work; + struct hinic_txq_stats *tx_stats; + struct hinic_rxq_stats *rx_stats; struct hinic_dev *nic_dev; struct net_device *netdev; struct hinic_hwdev *hwdev; @@ -689,6 +898,7 @@ static int nic_dev_init(struct pci_dev *pdev) } netdev->netdev_ops = &hinic_netdev_ops; + netdev->ethtool_ops = &hinic_ethtool_ops; nic_dev = netdev_priv(netdev); nic_dev->netdev = netdev; @@ -702,6 +912,12 @@ static int nic_dev_init(struct pci_dev *pdev) sema_init(&nic_dev->mgmt_lock, 1); + tx_stats = &nic_dev->tx_stats; + rx_stats = &nic_dev->rx_stats; + + u64_stats_init(&tx_stats->syncp); + u64_stats_init(&rx_stats->syncp); + nic_dev->vlan_bitmap = devm_kzalloc(&pdev->dev, VLAN_BITMAP_SIZE(nic_dev), GFP_KERNEL); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 528ec6febd04..4d4e3f05fb5f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -346,3 +346,34 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev, return 0; } + +/** + * hinic_port_get_cap - get port capabilities + * @nic_dev: nic device + * @port_cap: returned port capabilities + * + * Return 0 - Success, negative - Failure + **/ +int hinic_port_get_cap(struct hinic_dev *nic_dev, + struct hinic_port_cap *port_cap) +{ + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif = hwdev->hwif; + struct pci_dev *pdev = hwif->pdev; + u16 out_size; + int err; + + port_cap->func_idx = HINIC_HWIF_FUNC_IDX(hwif); + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_CAP, + port_cap, sizeof(*port_cap), + port_cap, &out_size); + if (err || (out_size != sizeof(*port_cap)) || port_cap->status) { + dev_err(&pdev->dev, + "Failed to get port capabilities, ret = %d\n", + port_cap->status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 17f9d7fc5a0a..9404365195dd 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -45,6 +45,33 @@ enum hinic_func_port_state { HINIC_FUNC_PORT_ENABLE = 2, }; +enum hinic_autoneg_cap { + HINIC_AUTONEG_UNSUPPORTED, + HINIC_AUTONEG_SUPPORTED, +}; + +enum hinic_autoneg_state { + HINIC_AUTONEG_DISABLED, + HINIC_AUTONEG_ACTIVE, +}; + +enum hinic_duplex { + HINIC_DUPLEX_HALF, + HINIC_DUPLEX_FULL, +}; + +enum hinic_speed { + HINIC_SPEED_10MB_LINK = 0, + HINIC_SPEED_100MB_LINK, + HINIC_SPEED_1000MB_LINK, + HINIC_SPEED_10GB_LINK, + HINIC_SPEED_25GB_LINK, + HINIC_SPEED_40GB_LINK, + HINIC_SPEED_100GB_LINK, + + HINIC_SPEED_UNKNOWN = 0xFF, +}; + struct hinic_port_mac_cmd { u8 status; u8 version; @@ -125,6 +152,21 @@ struct hinic_port_func_state_cmd { u8 rsvd2[3]; }; +struct hinic_port_cap { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_idx; + u16 rsvd1; + u8 port_type; + u8 autoneg_cap; + u8 autoneg_state; + u8 duplex; + u8 speed; + u8 rsvd2[3]; +}; + int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -150,4 +192,7 @@ int hinic_port_set_state(struct hinic_dev *nic_dev, int hinic_port_set_func_state(struct hinic_dev *nic_dev, enum hinic_func_port_state state); +int hinic_port_get_cap(struct hinic_dev *nic_dev, + struct hinic_port_cap *port_cap); + #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index b1212e498f95..1d4f712b15a8 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -57,6 +57,25 @@ void hinic_rxq_clean_stats(struct hinic_rxq *rxq) u64_stats_update_end(&rxq_stats->syncp); } +/** + * hinic_rxq_get_stats - get statistics of Rx Queue + * @rxq: Logical Rx Queue + * @stats: return updated stats here + **/ +void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats) +{ + struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&rxq_stats->syncp); + stats->pkts = rxq_stats->pkts; + stats->bytes = rxq_stats->bytes; + } while (u64_stats_fetch_retry(&rxq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + /** * rxq_stats_init - Initialize the statistics of specific queue * @rxq: Logical Rx Queue diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h index 538c8861e8dd..27c9af4b1c12 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -45,6 +45,8 @@ struct hinic_rxq { void hinic_rxq_clean_stats(struct hinic_rxq *rxq); +void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats); + int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, struct net_device *netdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index 90ab2d971383..5bf6a32faa46 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -66,6 +66,28 @@ void hinic_txq_clean_stats(struct hinic_txq *txq) u64_stats_update_end(&txq_stats->syncp); } +/** + * hinic_txq_get_stats - get statistics of Tx Queue + * @txq: Logical Tx Queue + * @stats: return updated stats here + **/ +void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats) +{ + struct hinic_txq_stats *txq_stats = &txq->txq_stats; + unsigned int start; + + u64_stats_update_begin(&stats->syncp); + do { + start = u64_stats_fetch_begin(&txq_stats->syncp); + stats->pkts = txq_stats->pkts; + stats->bytes = txq_stats->bytes; + stats->tx_busy = txq_stats->tx_busy; + stats->tx_wake = txq_stats->tx_wake; + stats->tx_dropped = txq_stats->tx_dropped; + } while (u64_stats_fetch_retry(&txq_stats->syncp, start)); + u64_stats_update_end(&stats->syncp); +} + /** * txq_stats_init - Initialize the statistics of specific queue * @txq: Logical Tx Queue diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h index 7123c7f7e06a..1fa55dce5aa7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h @@ -50,6 +50,8 @@ struct hinic_txq { void hinic_txq_clean_stats(struct hinic_txq *txq); +void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats); + netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, -- cgit v1.2.3-55-g7522 From b553400bb4a65ee4c8bc5c0202cede3c46524e55 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:07 +0800 Subject: net-next/hinic: Add netpoll Add more netdev operation - netpoll. Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_main.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index a417ca2d441c..ae7ad48c9f65 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -787,6 +787,23 @@ static void hinic_get_stats64(struct net_device *netdev, stats->tx_errors = nic_tx_stats->tx_dropped; } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void hinic_netpoll(struct net_device *netdev) +{ + struct hinic_dev *nic_dev = netdev_priv(netdev); + int i, num_qps; + + num_qps = hinic_hwdev_num_qps(nic_dev->hwdev); + for (i = 0; i < num_qps; i++) { + struct hinic_txq *txq = &nic_dev->txqs[i]; + struct hinic_rxq *rxq = &nic_dev->rxqs[i]; + + napi_schedule(&txq->napi); + napi_schedule(&rxq->napi); + } +} +#endif + static const struct net_device_ops hinic_netdev_ops = { .ndo_open = hinic_open, .ndo_stop = hinic_close, @@ -799,6 +816,9 @@ static const struct net_device_ops hinic_netdev_ops = { .ndo_start_xmit = hinic_xmit_frame, .ndo_tx_timeout = hinic_tx_timeout, .ndo_get_stats64 = hinic_get_stats64, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = hinic_netpoll, +#endif }; static void netdev_features_init(struct net_device *netdev) -- cgit v1.2.3-55-g7522 From 4d3b632707be4e4b47febc70e256dab4b3752494 Mon Sep 17 00:00:00 2001 From: Aviad Krawczyk Date: Mon, 21 Aug 2017 23:56:08 +0800 Subject: net-next/hinic: Add Maintainer Update MAINTAINERS file Signed-off-by: Aviad Krawczyk Signed-off-by: Zhao Chen Signed-off-by: David S. Miller --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 11e1bcec9cbb..9d47ed980fff 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6240,6 +6240,13 @@ L: linux-input@vger.kernel.org S: Maintained F: drivers/input/touchscreen/htcpen.c +HUAWEI ETHERNET DRIVER +M: Aviad Krawczyk +L: netdev@vger.kernel.org +S: Supported +F: Documentation/networking/hinic.txt +F: drivers/net/ethernet/huawei/hinic/ + HUGETLB FILESYSTEM M: Nadia Yvette Chambers S: Maintained -- cgit v1.2.3-55-g7522 From a8e8b7ff35175ab0a55ef0fa8560c3d5ffdb2818 Mon Sep 17 00:00:00 2001 From: Salil Date: Mon, 21 Aug 2017 17:05:24 +0100 Subject: net: hns3: Add support to change MTU in HNS3 hardware This patch adds the following support to the HNS3 driver: 1. Support to change the Maximum Transmission Unit of a port in the HNS NIC hardware. 2. Initializes the supported MTU range for the netdevice. Signed-off-by: lipeng Signed-off-by: Salil Mehta Reviewed-by: Andrew Lunn Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller --- .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c | 34 ++++++++++++++++++++++ .../net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h | 1 + 2 files changed, 35 insertions(+) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c index e731f87f3c46..1c3e29447891 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c @@ -1278,11 +1278,42 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, return ret; } +static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + bool if_running = netif_running(netdev); + int ret; + + if (!h->ae_algo->ops->set_mtu) + return -EOPNOTSUPP; + + /* if this was called with netdev up then bring netdevice down */ + if (if_running) { + (void)hns3_nic_net_stop(netdev); + msleep(100); + } + + ret = h->ae_algo->ops->set_mtu(h, new_mtu); + if (ret) { + netdev_err(netdev, "failed to change MTU in hardware %d\n", + ret); + return ret; + } + + /* if the netdev was running earlier, bring it up again */ + if (if_running && hns3_nic_net_open(netdev)) + ret = -EINVAL; + + return ret; +} + static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_open = hns3_nic_net_open, .ndo_stop = hns3_nic_net_stop, .ndo_start_xmit = hns3_nic_net_xmit, .ndo_set_mac_address = hns3_nic_net_set_mac_address, + .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, .ndo_get_stats64 = hns3_nic_get_stats64, .ndo_setup_tc = hns3_nic_setup_tc, @@ -2752,6 +2783,9 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_reg_netdev_fail; } + /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ + netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + return ret; out_reg_netdev_fail: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h index a6e8f15a4669..7e8746189747 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h @@ -76,6 +76,7 @@ enum hns3_nic_state { #define HNS3_RING_NAME_LEN 16 #define HNS3_BUFFER_SIZE_2048 2048 #define HNS3_RING_MAX_PENDING 32768 +#define HNS3_MAX_MTU 9728 #define HNS3_BD_SIZE_512_TYPE 0 #define HNS3_BD_SIZE_1024_TYPE 1 -- cgit v1.2.3-55-g7522 From b6d08bd81d3044dcd73ea45fa6c28cc049224d41 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Mon, 21 Aug 2017 22:36:50 +0530 Subject: net: ethernet: make ptp_clock_info const Make these const as they are only used in a copy operation. Done using Coccinelle. @match disable optional_qualifier@ identifier s; @@ static struct ptp_clock_info s = {...}; @ref@ position p; identifier match.s; @@ s@p @good1@ position ref.p; identifier match.s,f,c; expression e; @@ ( e = s@p | e = s@p.f | c(...,s@p.f,...) | c(...,s@p,...) ) @bad depends on !good1@ position ref.p; identifier match.s; @@ s@p @depends on forall !bad disable optional_qualifier@ identifier match.s; @@ static + const struct ptp_clock_info s; Signed-off-by: Bhumika Goyal Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/net/ethernet/adi/bfin_mac.c | 2 +- drivers/net/ethernet/cadence/macb_ptp.c | 2 +- drivers/net/ethernet/freescale/gianfar_ptp.c | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 2 +- drivers/net/ethernet/ti/cpts.c | 2 +- drivers/net/ethernet/tile/tilegx.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c index a9ac58c351a0..a251de8d9a91 100644 --- a/drivers/net/ethernet/adi/bfin_mac.c +++ b/drivers/net/ethernet/adi/bfin_mac.c @@ -986,7 +986,7 @@ static int bfin_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info bfin_ptp_caps = { +static const struct ptp_clock_info bfin_ptp_caps = { .owner = THIS_MODULE, .name = "BF518 clock", .max_adj = 0, diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 67cca08472b7..2220c771092b 100755 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -192,7 +192,7 @@ static int gem_ptp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info gem_ptp_caps_template = { +static const struct ptp_clock_info gem_ptp_caps_template = { .owner = THIS_MODULE, .name = GEM_PTP_TIMER_NAME, .max_adj = 0, diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index 721be13081f9..544114281ea7 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -411,7 +411,7 @@ static int ptp_gianfar_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_gianfar_caps = { +static const struct ptp_clock_info ptp_gianfar_caps = { .owner = THIS_MODULE, .name = "gianfar clock", .max_adj = 512000, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index d71bd80c5b5b..e471a903c654 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -152,7 +152,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp, } /* structure describing a PTP hardware clock */ -static struct ptp_clock_info stmmac_ptp_clock_ops = { +static const struct ptp_clock_info stmmac_ptp_clock_ops = { .owner = THIS_MODULE, .name = "stmmac_ptp_clock", .max_adj = 62500000, diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index c2121d214f08..e7b76f6b4f67 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -298,7 +298,7 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp) return (long)delay; } -static struct ptp_clock_info cpts_info = { +static const struct ptp_clock_info cpts_info = { .owner = THIS_MODULE, .name = "CTPS timer", .max_adj = 1000000, diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index aec95382ea5c..c00102b8145a 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -873,7 +873,7 @@ static int ptp_mpipe_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_mpipe_caps = { +static const struct ptp_clock_info ptp_mpipe_caps = { .owner = THIS_MODULE, .name = "mPIPE clock", .max_adj = 999999999, -- cgit v1.2.3-55-g7522 From 7d47e9a2056f7d4babde19f58c8d236e60308043 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Mon, 21 Aug 2017 23:01:12 +0530 Subject: ptp: make ptp_clock_info const Make these const as they are only used in a copy operation. Done using Coccinelle. @match disable optional_qualifier@ identifier s; @@ static struct ptp_clock_info s = {...}; @ref@ position p; identifier match.s; @@ s@p @good1@ position ref.p; identifier match.s,f,c; expression e; @@ ( e = s@p | e = s@p.f | c(...,s@p.f,...) | c(...,s@p,...) ) @bad depends on !good1@ position ref.p; identifier match.s; @@ s@p @depends on forall !bad disable optional_qualifier@ identifier match.s; @@ static + const struct ptp_clock_info s; Signed-off-by: Bhumika Goyal Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/ptp/ptp_dte.c | 2 +- drivers/ptp/ptp_ixp46x.c | 2 +- drivers/ptp/ptp_kvm.c | 2 +- drivers/ptp/ptp_pch.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c index faf6f7a83713..6edd3b9c7f01 100644 --- a/drivers/ptp/ptp_dte.c +++ b/drivers/ptp/ptp_dte.c @@ -221,7 +221,7 @@ static int ptp_dte_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_dte_caps = { +static const struct ptp_clock_info ptp_dte_caps = { .owner = THIS_MODULE, .name = "DTE PTP timer", .max_adj = 50000000, diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c index 344a3bac210b..1171ffd210b3 100644 --- a/drivers/ptp/ptp_ixp46x.c +++ b/drivers/ptp/ptp_ixp46x.c @@ -236,7 +236,7 @@ static int ptp_ixp_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_ixp_caps = { +static const struct ptp_clock_info ptp_ixp_caps = { .owner = THIS_MODULE, .name = "IXP46X timer", .max_adj = 66666655, diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c index bb865695d7a6..2b1b212c219e 100644 --- a/drivers/ptp/ptp_kvm.c +++ b/drivers/ptp/ptp_kvm.c @@ -150,7 +150,7 @@ static int ptp_kvm_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_kvm_caps = { +static const struct ptp_clock_info ptp_kvm_caps = { .owner = THIS_MODULE, .name = "KVM virtual PTP", .max_adj = 0, diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c index 3aa22ae4d94c..b3285175f20f 100644 --- a/drivers/ptp/ptp_pch.c +++ b/drivers/ptp/ptp_pch.c @@ -509,7 +509,7 @@ static int ptp_pch_enable(struct ptp_clock_info *ptp, return -EOPNOTSUPP; } -static struct ptp_clock_info ptp_pch_caps = { +static const struct ptp_clock_info ptp_pch_caps = { .owner = THIS_MODULE, .name = "PCH timer", .max_adj = 50000000, -- cgit v1.2.3-55-g7522 From ad530a1d403a70b5473578e17b65d14132926b86 Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Mon, 21 Aug 2017 12:35:56 -0700 Subject: liquidio: move macro definition to a proper place The macro LIO_CMD_WAIT_TM is not specific to the PF driver; it can be used by the VF driver too, so move its definition from a PF-specific header file to one that's common to PF and VF. Signed-off-by: Veerasenareddy Burru Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h | 2 -- drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h index dee604651ba7..2aba5247b6d8 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -24,8 +24,6 @@ #include "cn23xx_pf_regs.h" -#define LIO_CMD_WAIT_TM 100 - /* Register address and configuration for a CN23XX devices. * If device specific changes need to be made then add a struct to include * device specific fields as shown in the commented section diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index 906e30aadadc..d0076c191cee 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -237,6 +237,8 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry, #define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 #define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 +#define LIO_CMD_WAIT_TM 100 + /* RX(packets coming from wire) Checksum verification flags */ /* TCP/UDP csum */ #define CNNIC_L4SUM_VERIFIED 0x1 -- cgit v1.2.3-55-g7522 From ee5b1fac5641515a80ed1b15a84713629b29516d Mon Sep 17 00:00:00 2001 From: Veerasenareddy Burru Date: Mon, 21 Aug 2017 12:35:59 -0700 Subject: liquidio: make VF driver notify NIC firmware of MTU change Signed-off-by: Veerasenareddy Burru Signed-off-by: Felix Manlunas Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/lio_vf_main.c | 27 ++++++++++++++++++---- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index 0402b18d4689..2e993ce43b66 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -1544,14 +1544,31 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) */ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) { - struct lio *lio = GET_LIO(netdev); + struct octnic_ctrl_pkt nctrl; + struct octeon_device *oct; + struct lio *lio; + int ret = 0; - lio->mtu = new_mtu; + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); - netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n", - netdev->mtu, new_mtu); + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; + nctrl.ncmd.s.param1 = new_mtu; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.wait_time = LIO_CMD_WAIT_TM; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); + return -EIO; + } - netdev->mtu = new_mtu; + lio->mtu = new_mtu; return 0; } -- cgit v1.2.3-55-g7522 From fcba1569a0c87cea40404af9a29b319f4e491839 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Mon, 21 Aug 2017 19:22:37 -0700 Subject: hv_netvsc: Clean up unused parameter from netvsc_get_hash() The parameter "sk" is not in use. Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index b33f0507c373..4677d214cc1f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -193,7 +193,7 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, /* Azure hosts don't support non-TCP port numbers in hashing yet. We compute * hash for non-TCP traffic with only IP numbers. */ -static inline u32 netvsc_get_hash(struct sk_buff *skb, struct sock *sk) +static inline u32 netvsc_get_hash(struct sk_buff *skb) { struct flow_keys flow; u32 hash; @@ -227,7 +227,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sock *sk = skb->sk; int q_idx; - q_idx = ndc->tx_send_table[netvsc_get_hash(skb, sk) & + q_idx = ndc->tx_send_table[netvsc_get_hash(skb) & (VRSS_SEND_TAB_SIZE - 1)]; /* If queue index changed record the new value */ -- cgit v1.2.3-55-g7522 From 4c0e2cbfd9b519722529526f4d87f8aab3c32ffd Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Mon, 21 Aug 2017 19:22:38 -0700 Subject: hv_netvsc: Clean up unused parameter from netvsc_get_rss_hash_opts() The parameter "nvdev" is not in use. Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 4677d214cc1f..d8612b1a8e4e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -1228,8 +1228,7 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) } static int -netvsc_get_rss_hash_opts(struct netvsc_device *nvdev, - struct ethtool_rxnfc *info) +netvsc_get_rss_hash_opts(struct ethtool_rxnfc *info) { info->data = RXH_IP_SRC | RXH_IP_DST; @@ -1267,7 +1266,7 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return 0; case ETHTOOL_GRXFH: - return netvsc_get_rss_hash_opts(nvdev, info); + return netvsc_get_rss_hash_opts(info); } return -EOPNOTSUPP; } -- cgit v1.2.3-55-g7522 From 4823eb2f3af44e2b9f7f02bed5a211e9ce79051f Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Mon, 21 Aug 2017 19:22:39 -0700 Subject: hv_netvsc: Add ethtool handler to set and get UDP hash levels The patch add the functions to switch UDP hash level between L3 and L4 by ethtool command. UDP over IPv4 and v6 can be set differently. The default hash level is L4. We currently only allow switching TX hash level from within the guests. On Azure, fragmented UDP packets have high loss rate with L4 hashing. Using L3 hashing is recommended in this case. For example, for UDP over IPv4 on eth0: To include UDP port numbers in hasing: ethtool -N eth0 rx-flow-hash udp4 sdfn To exclude UDP port numbers in hasing: ethtool -N eth0 rx-flow-hash udp4 sd To show UDP hash level: ethtool -n eth0 rx-flow-hash udp4 Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 2 ++ drivers/net/hyperv/netvsc_drv.c | 78 ++++++++++++++++++++++++++++++++++++----- 2 files changed, 72 insertions(+), 8 deletions(-) diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 9198dd1240ed..ff1c0c8d5e0d 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -720,6 +720,8 @@ struct net_device_context { u32 tx_send_table[VRSS_SEND_TAB_SIZE]; /* Ethtool settings */ + bool udp4_l4_hash; + bool udp6_l4_hash; u8 duplex; u32 speed; struct netvsc_ethtool_stats eth_stats; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index d8612b1a8e4e..c0c4c9195a3f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -190,10 +190,12 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } -/* Azure hosts don't support non-TCP port numbers in hashing yet. We compute - * hash for non-TCP traffic with only IP numbers. +/* Azure hosts don't support non-TCP port numbers in hashing for fragmented + * packets. We can use ethtool to change UDP hash level when necessary. */ -static inline u32 netvsc_get_hash(struct sk_buff *skb) +static inline u32 netvsc_get_hash( + struct sk_buff *skb, + const struct net_device_context *ndc) { struct flow_keys flow; u32 hash; @@ -204,7 +206,11 @@ static inline u32 netvsc_get_hash(struct sk_buff *skb) if (!skb_flow_dissect_flow_keys(skb, &flow, 0)) return 0; - if (flow.basic.ip_proto == IPPROTO_TCP) { + if (flow.basic.ip_proto == IPPROTO_TCP || + (flow.basic.ip_proto == IPPROTO_UDP && + ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) || + (flow.basic.n_proto == htons(ETH_P_IPV6) && + ndc->udp6_l4_hash)))) { return skb_get_hash(skb); } else { if (flow.basic.n_proto == htons(ETH_P_IP)) @@ -227,7 +233,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev, struct sock *sk = skb->sk; int q_idx; - q_idx = ndc->tx_send_table[netvsc_get_hash(skb) & + q_idx = ndc->tx_send_table[netvsc_get_hash(skb, ndc) & (VRSS_SEND_TAB_SIZE - 1)]; /* If queue index changed record the new value */ @@ -891,6 +897,9 @@ static void netvsc_init_settings(struct net_device *dev) { struct net_device_context *ndc = netdev_priv(dev); + ndc->udp4_l4_hash = true; + ndc->udp6_l4_hash = true; + ndc->speed = SPEED_UNKNOWN; ndc->duplex = DUPLEX_FULL; } @@ -1228,7 +1237,8 @@ static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data) } static int -netvsc_get_rss_hash_opts(struct ethtool_rxnfc *info) +netvsc_get_rss_hash_opts(struct net_device_context *ndc, + struct ethtool_rxnfc *info) { info->data = RXH_IP_SRC | RXH_IP_DST; @@ -1236,9 +1246,20 @@ netvsc_get_rss_hash_opts(struct ethtool_rxnfc *info) case TCP_V4_FLOW: case TCP_V6_FLOW: info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; - /* fallthrough */ + break; + case UDP_V4_FLOW: + if (ndc->udp4_l4_hash) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + + break; + case UDP_V6_FLOW: + if (ndc->udp6_l4_hash) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + + break; + case IPV4_FLOW: case IPV6_FLOW: break; @@ -1266,11 +1287,51 @@ netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, return 0; case ETHTOOL_GRXFH: - return netvsc_get_rss_hash_opts(info); + return netvsc_get_rss_hash_opts(ndc, info); } return -EOPNOTSUPP; } +static int netvsc_set_rss_hash_opts(struct net_device_context *ndc, + struct ethtool_rxnfc *info) +{ + if (info->data == (RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (info->flow_type == UDP_V4_FLOW) + ndc->udp4_l4_hash = true; + else if (info->flow_type == UDP_V6_FLOW) + ndc->udp6_l4_hash = true; + else + return -EOPNOTSUPP; + + return 0; + } + + if (info->data == (RXH_IP_SRC | RXH_IP_DST)) { + if (info->flow_type == UDP_V4_FLOW) + ndc->udp4_l4_hash = false; + else if (info->flow_type == UDP_V6_FLOW) + ndc->udp6_l4_hash = false; + else + return -EOPNOTSUPP; + + return 0; + } + + return -EOPNOTSUPP; +} + +static int +netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info) +{ + struct net_device_context *ndc = netdev_priv(ndev); + + if (info->cmd == ETHTOOL_SRXFH) + return netvsc_set_rss_hash_opts(ndc, info); + + return -EOPNOTSUPP; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void netvsc_poll_controller(struct net_device *dev) { @@ -1469,6 +1530,7 @@ static const struct ethtool_ops ethtool_ops = { .set_channels = netvsc_set_channels, .get_ts_info = ethtool_op_get_ts_info, .get_rxnfc = netvsc_get_rxnfc, + .set_rxnfc = netvsc_set_rxnfc, .get_rxfh_key_size = netvsc_get_rxfh_key_size, .get_rxfh_indir_size = netvsc_rss_indir_size, .get_rxfh = netvsc_get_rxfh, -- cgit v1.2.3-55-g7522 From 3b0c34580b7ab9bb1d4a375e427af6a75ca45821 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Mon, 21 Aug 2017 19:22:40 -0700 Subject: hv_netvsc: Update netvsc Document for UDP hash level setting Update Documentation/networking/netvsc.txt for UDP hash level setting and related info. Signed-off-by: Haiyang Zhang Signed-off-by: David S. Miller --- Documentation/networking/netvsc.txt | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/Documentation/networking/netvsc.txt b/Documentation/networking/netvsc.txt index 4ddb4e4b0426..fa8d86356791 100644 --- a/Documentation/networking/netvsc.txt +++ b/Documentation/networking/netvsc.txt @@ -21,11 +21,23 @@ Features -------------------- Hyper-V supports receive side scaling. For TCP, packets are distributed among available queues based on IP address and port - number. Current versions of Hyper-V host, only distribute UDP - packets based on the IP source and destination address. - The port number is not used as part of the hash value for UDP. - Fragmented IP packets are not distributed between queues; - all fragmented packets arrive on the first channel. + number. + + For UDP, we can switch UDP hash level between L3 and L4 by ethtool + command. UDP over IPv4 and v6 can be set differently. The default + hash level is L4. We currently only allow switching TX hash level + from within the guests. + + On Azure, fragmented UDP packets have high loss rate with L4 + hashing. Using L3 hashing is recommended in this case. + + For example, for UDP over IPv4 on eth0: + To include UDP port numbers in hasing: + ethtool -N eth0 rx-flow-hash udp4 sdfn + To exclude UDP port numbers in hasing: + ethtool -N eth0 rx-flow-hash udp4 sd + To show UDP hash level: + ethtool -n eth0 rx-flow-hash udp4 Generic Receive Offload, aka GRO -------------------------------- -- cgit v1.2.3-55-g7522 From 49c71586a6a94de168a26ec454eb8ea282ddd827 Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Mon, 21 Aug 2017 23:33:48 -0700 Subject: tcp: Get a proper dst before checking it. tcp_peer_is_proven needs a proper route to make the determination, but dst always is NULL. This bug may be there at the beginning of git tree. This does not look serious enough to deserve backports to stable versions. Signed-off-by: Tonghao Zhang Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ddc854728a60..ab908949ee95 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6111,6 +6111,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (tmp_opt.tstamp_ok) tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb); + dst = af_ops->route_req(sk, &fl, req); + if (!dst) + goto drop_and_free; + if (!want_cookie && !isn) { /* Kill the following clause, if you dislike this way. */ if (!net->ipv4.sysctl_tcp_syncookies && @@ -6131,11 +6135,6 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, isn = af_ops->init_seq(skb); } - if (!dst) { - dst = af_ops->route_req(sk, &fl, req); - if (!dst) - goto drop_and_free; - } tcp_ecn_create_request(req, skb, sk, dst); -- cgit v1.2.3-55-g7522 From 111993692741a7044e6c01b428cecf1071de3d0b Mon Sep 17 00:00:00 2001 From: Tonghao Zhang Date: Mon, 21 Aug 2017 23:33:49 -0700 Subject: tcp: Remove the unused parameter for tcp_try_fastopen. Signed-off-by: Tonghao Zhang Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- include/net/tcp.h | 3 +-- net/ipv4/tcp_fastopen.c | 6 ++---- net/ipv4/tcp_input.c | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index afdab3781425..a995004ae946 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1533,8 +1533,7 @@ int tcp_fastopen_reset_cipher(void *key, unsigned int len); void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb); struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct tcp_fastopen_cookie *foc, - struct dst_entry *dst); + struct tcp_fastopen_cookie *foc); void tcp_fastopen_init_key_once(bool publish); bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie); diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index ce9c7fef200f..e3c33220c418 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -171,7 +171,6 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) static struct sock *tcp_fastopen_create_child(struct sock *sk, struct sk_buff *skb, - struct dst_entry *dst, struct request_sock *req) { struct tcp_sock *tp; @@ -278,8 +277,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk) */ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, struct request_sock *req, - struct tcp_fastopen_cookie *foc, - struct dst_entry *dst) + struct tcp_fastopen_cookie *foc) { struct tcp_fastopen_cookie valid_foc = { .len = -1 }; bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; @@ -312,7 +310,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, * data in SYN_RECV state. */ fastopen: - child = tcp_fastopen_create_child(sk, skb, dst, req); + child = tcp_fastopen_create_child(sk, skb, req); if (child) { foc->len = -1; NET_INC_STATS(sock_net(sk), diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ab908949ee95..d3421ee9a10a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6150,7 +6150,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_openreq_init_rwin(req, sk, dst); if (!want_cookie) { tcp_reqsk_record_syn(sk, req, skb); - fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); + fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc); } if (fastopen_sk) { af_ops->send_synack(fastopen_sk, dst, &fl, req, -- cgit v1.2.3-55-g7522 From 58e0c0db85097923a46c5dae56d526f61b26c75e Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Tue, 22 Aug 2017 13:41:19 +0530 Subject: net: ethernet: ax88796: make mdiobb_ops const Make this const as it is only stored in a const field of a mdiobb_ctrl structure. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/net/ethernet/8390/ax88796.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 05d9d3e2e92e..245554707163 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -585,7 +585,7 @@ static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) return reg_memr & AX_MEMR_MDI ? 1 : 0; } -static struct mdiobb_ops bb_ops = { +static const struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = ax_bb_mdc, .set_mdio_dir = ax_bb_dir, -- cgit v1.2.3-55-g7522 From 94494733ba02653d376839cd6ebc761f0cefc4f2 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Tue, 22 Aug 2017 13:45:59 +0530 Subject: net: ethernet: freescale: fs_enet: make mdiobb_ops const Make this const as it is only stored in a const field of a mdiobb_ctrl structure. Signed-off-by: Bhumika Goyal Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 1f015edcca22..c8e5d889bd81 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -100,7 +100,7 @@ static inline void mdc(struct mdiobb_ctrl *ctrl, int what) in_be32(bitbang->dat); } -static struct mdiobb_ops bb_ops = { +static const struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = mdc, .set_mdio_dir = mdio_dir, -- cgit v1.2.3-55-g7522 From 41a130f7c61d2adb303852d3fca938ad138a96d8 Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Tue, 22 Aug 2017 13:43:29 +0530 Subject: net: mdio-gpio: make mdiobb_ops const Make this const as it is only stored in a const field of a mdiobb_ctrl structure. Signed-off-by: Bhumika Goyal Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/mdio-gpio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 7faa79b254ef..4333c6e14742 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -116,7 +116,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what) gpiod_set_value(bitbang->mdc, what); } -static struct mdiobb_ops mdio_gpio_ops = { +static const struct mdiobb_ops mdio_gpio_ops = { .owner = THIS_MODULE, .set_mdc = mdc_set, .set_mdio_dir = mdio_dir, -- cgit v1.2.3-55-g7522 From ab2fb7e3240d24c68f854aa1b972fe415d8d1b3e Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 22 Aug 2017 11:39:57 -0400 Subject: udp: remove unreachable ufo branches Remove two references to ufo in the udp send path that are no longer reachable now that ufo has been removed. Commit 85f1bd9a7b5a ("udp: consistently apply ufo or fragmentation") is a fix to ufo. It is safe to revert what remains of it. Also, no skb can enter ip_append_page with skb_is_gso true now that skb_shinfo(skb)->gso_type is no longer set in ip_append_page/_data. Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/ipv4/ip_output.c | 12 ++++-------- net/ipv4/udp.c | 2 +- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 73b0b15245b6..e8e675be60ec 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1223,15 +1223,11 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, cork->length += size; while (size > 0) { - if (skb_is_gso(skb)) { - len = size; - } else { + /* Check if the remaining data fits into current packet. */ + len = mtu - skb->len; + if (len < size) + len = maxfraglen - skb->len; - /* Check if the remaining data fits into current packet. */ - len = mtu - skb->len; - if (len < size) - len = maxfraglen - skb->len; - } if (len <= 0) { struct sk_buff *skb_prev; int alloclen; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 25fb14490d6a..bf6c406bf5e7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -809,7 +809,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); - else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */ + else if (sk->sk_no_check_tx) { /* UDP csum off */ skb->ip_summed = CHECKSUM_NONE; goto send; -- cgit v1.2.3-55-g7522 From 84e54fe0a5eaed696dee4019c396f8396f5a908b Mon Sep 17 00:00:00 2001 From: William Tu Date: Tue, 22 Aug 2017 09:40:28 -0700 Subject: gre: introduce native tunnel support for ERSPAN The patch adds ERSPAN type II tunnel support. The implementation is based on the draft at [1]. One of the purposes is for Linux box to be able to receive ERSPAN monitoring traffic sent from the Cisco switch, by creating a ERSPAN tunnel device. In addition, the patch also adds ERSPAN TX, so Linux virtual switch can redirect monitored traffic to the ERSPAN tunnel device. The traffic will be encapsulated into ERSPAN and sent out. The implementation reuses tunnel key as ERSPAN session ID, and field 'erspan' as ERSPAN Index fields: ./ip link add dev ers11 type erspan seq key 100 erspan 123 \ local 172.16.1.200 remote 172.16.1.100 To use the above device as ERSPAN receiver, configure Nexus 5000 switch as below: monitor session 100 type erspan-source erspan-id 123 vrf default destination ip 172.16.1.200 source interface Ethernet1/11 both source interface Ethernet1/12 both no shut monitor erspan origin ip-address 172.16.1.100 global [1] https://tools.ietf.org/html/draft-foschiano-erspan-01 [2] iproute2 patch: http://marc.info/?l=linux-netdev&m=150306086924951&w=2 [3] test script: http://marc.info/?l=linux-netdev&m=150231021807304&w=2 Signed-off-by: William Tu Signed-off-by: Meenakshi Vohra Cc: Alexey Kuznetsov Cc: Hideaki YOSHIFUJI Signed-off-by: David S. Miller --- include/net/erspan.h | 61 ++++++++++ include/net/ip_tunnels.h | 3 + include/uapi/linux/if_ether.h | 1 + include/uapi/linux/if_tunnel.h | 1 + net/ipv4/ip_gre.c | 269 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 335 insertions(+) create mode 100644 include/net/erspan.h diff --git a/include/net/erspan.h b/include/net/erspan.h new file mode 100644 index 000000000000..ca94fc86865e --- /dev/null +++ b/include/net/erspan.h @@ -0,0 +1,61 @@ +#ifndef __LINUX_ERSPAN_H +#define __LINUX_ERSPAN_H + +/* + * GRE header for ERSPAN encapsulation (8 octets [34:41]) -- 8 bytes + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * |0|0|0|1|0|00000|000000000|00000| Protocol Type for ERSPAN | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Sequence Number (increments per packet per session) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Note that in the above GRE header [RFC1701] out of the C, R, K, S, + * s, Recur, Flags, Version fields only S (bit 03) is set to 1. The + * other fields are set to zero, so only a sequence number follows. + * + * ERSPAN Type II header (8 octets [42:49]) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Ver | VLAN | COS | En|T| Session ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Reserved | Index | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * GRE proto ERSPAN type II = 0x88BE, type III = 0x22EB + */ + +#define ERSPAN_VERSION 0x1 + +#define VER_MASK 0xf000 +#define VLAN_MASK 0x0fff +#define COS_MASK 0xe000 +#define EN_MASK 0x1800 +#define T_MASK 0x0400 +#define ID_MASK 0x03ff +#define INDEX_MASK 0xfffff + +enum erspan_encap_type { + ERSPAN_ENCAP_NOVLAN = 0x0, /* originally without VLAN tag */ + ERSPAN_ENCAP_ISL = 0x1, /* originally ISL encapsulated */ + ERSPAN_ENCAP_8021Q = 0x2, /* originally 802.1Q encapsulated */ + ERSPAN_ENCAP_INFRAME = 0x3, /* VLAN tag perserved in frame */ +}; + +struct erspan_metadata { + __be32 index; /* type II */ +}; + +struct erspanhdr { + __be16 ver_vlan; +#define VER_OFFSET 12 + __be16 session_id; +#define COS_OFFSET 13 +#define EN_OFFSET 11 +#define T_OFFSET 10 + struct erspan_metadata md; +}; + +#endif diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 520809912f03..625c29329372 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -115,6 +115,9 @@ struct ip_tunnel { u32 o_seqno; /* The last output seqno */ int tun_hlen; /* Precalculated header length */ + /* This field used only by ERSPAN */ + u32 index; /* ERSPAN type II index */ + struct dst_cache dst_cache; struct ip_tunnel_parm parms; diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h index 5bc9bfd816b7..efeb1190c2ca 100644 --- a/include/uapi/linux/if_ether.h +++ b/include/uapi/linux/if_ether.h @@ -66,6 +66,7 @@ #define ETH_P_ATALK 0x809B /* Appletalk DDP */ #define ETH_P_AARP 0x80F3 /* Appletalk AARP */ #define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ +#define ETH_P_ERSPAN 0x88BE /* ERSPAN type II */ #define ETH_P_IPX 0x8137 /* IPX over DIX */ #define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ #define ETH_P_PAUSE 0x8808 /* IEEE Pause frames. See 802.3 31B */ diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h index 6792d1967d31..2e520883c054 100644 --- a/include/uapi/linux/if_tunnel.h +++ b/include/uapi/linux/if_tunnel.h @@ -134,6 +134,7 @@ enum { IFLA_GRE_COLLECT_METADATA, IFLA_GRE_IGNORE_DF, IFLA_GRE_FWMARK, + IFLA_GRE_ERSPAN_INDEX, __IFLA_GRE_MAX, }; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7a7829e839c2..6e8a62289e03 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -48,6 +48,7 @@ #include #include #include +#include /* Problems & solutions @@ -115,6 +116,7 @@ static int ipgre_tunnel_init(struct net_device *dev); static unsigned int ipgre_net_id __read_mostly; static unsigned int gre_tap_net_id __read_mostly; +static unsigned int erspan_net_id __read_mostly; static void ipgre_err(struct sk_buff *skb, u32 info, const struct tnl_ptk_info *tpi) @@ -246,6 +248,56 @@ static void gre_err(struct sk_buff *skb, u32 info) ipgre_err(skb, info, &tpi); } +static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, + int gre_hdr_len) +{ + struct net *net = dev_net(skb->dev); + struct metadata_dst *tun_dst = NULL; + struct ip_tunnel_net *itn; + struct ip_tunnel *tunnel; + struct erspanhdr *ershdr; + const struct iphdr *iph; + __be32 session_id; + __be32 index; + int len; + + itn = net_generic(net, erspan_net_id); + iph = ip_hdr(skb); + len = gre_hdr_len + sizeof(*ershdr); + + if (unlikely(!pskb_may_pull(skb, len))) + return -ENOMEM; + + iph = ip_hdr(skb); + ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len); + + /* The original GRE header does not have key field, + * Use ERSPAN 10-bit session ID as key. + */ + session_id = cpu_to_be32(ntohs(ershdr->session_id)); + tpi->key = session_id; + index = ershdr->md.index; + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, + tpi->flags | TUNNEL_KEY, + iph->saddr, iph->daddr, tpi->key); + + if (tunnel) { + if (__iptunnel_pull_header(skb, + gre_hdr_len + sizeof(*ershdr), + htons(ETH_P_TEB), + false, false) < 0) + goto drop; + + tunnel->index = ntohl(index); + skb_reset_mac_header(skb); + ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); + return PACKET_RCVD; + } +drop: + kfree_skb(skb); + return PACKET_RCVD; +} + static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, struct ip_tunnel_net *itn, int hdr_len, bool raw_proto) { @@ -328,6 +380,11 @@ static int gre_rcv(struct sk_buff *skb) if (hdr_len < 0) goto drop; + if (unlikely(tpi.proto == htons(ETH_P_ERSPAN))) { + if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) + return 0; + } + if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD) return 0; @@ -503,6 +560,81 @@ free_skb: return NETDEV_TX_OK; } +static inline u8 tos_to_cos(u8 tos) +{ + u8 dscp, cos; + + dscp = tos >> 2; + cos = dscp >> 3; + return cos; +} + +static void erspan_build_header(struct sk_buff *skb, + __be32 id, u32 index, bool truncate) +{ + struct iphdr *iphdr = ip_hdr(skb); + struct ethhdr *eth = eth_hdr(skb); + enum erspan_encap_type enc_type; + struct erspanhdr *ershdr; + struct qtag_prefix { + __be16 eth_type; + __be16 tci; + } *qp; + u16 vlan_tci = 0; + + enc_type = ERSPAN_ENCAP_NOVLAN; + + /* If mirrored packet has vlan tag, extract tci and + * perserve vlan header in the mirrored frame. + */ + if (eth->h_proto == htons(ETH_P_8021Q)) { + qp = (struct qtag_prefix *)(skb->data + 2 * ETH_ALEN); + vlan_tci = ntohs(qp->tci); + enc_type = ERSPAN_ENCAP_INFRAME; + } + + skb_push(skb, sizeof(*ershdr)); + ershdr = (struct erspanhdr *)skb->data; + memset(ershdr, 0, sizeof(*ershdr)); + + ershdr->ver_vlan = htons((vlan_tci & VLAN_MASK) | + (ERSPAN_VERSION << VER_OFFSET)); + ershdr->session_id = htons((u16)(ntohl(id) & ID_MASK) | + ((tos_to_cos(iphdr->tos) << COS_OFFSET) & COS_MASK) | + (enc_type << EN_OFFSET & EN_MASK) | + ((truncate << T_OFFSET) & T_MASK)); + ershdr->md.index = htonl(index & INDEX_MASK); +} + +static netdev_tx_t erspan_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct ip_tunnel *tunnel = netdev_priv(dev); + bool truncate = false; + + if (gre_handle_offloads(skb, false)) + goto free_skb; + + if (skb_cow_head(skb, dev->needed_headroom)) + goto free_skb; + + if (skb->len > dev->mtu) { + pskb_trim(skb, dev->mtu); + truncate = true; + } + + /* Push ERSPAN header */ + erspan_build_header(skb, tunnel->parms.o_key, tunnel->index, truncate); + tunnel->parms.o_flags &= ~TUNNEL_KEY; + __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); + return NETDEV_TX_OK; + +free_skb: + kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + static netdev_tx_t gre_tap_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -828,6 +960,39 @@ out: return ipgre_tunnel_validate(tb, data, extack); } +static int erspan_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + __be16 flags = 0; + int ret; + + if (!data) + return 0; + + ret = ipgre_tap_validate(tb, data, extack); + if (ret) + return ret; + + /* ERSPAN should only have GRE sequence and key flag */ + flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); + flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); + if (flags != (GRE_SEQ | GRE_KEY)) + return -EINVAL; + + /* ERSPAN Session ID only has 10-bit. Since we reuse + * 32-bit key field as ID, check it's range. + */ + if (data[IFLA_GRE_IKEY] && + (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK)) + return -EINVAL; + + if (data[IFLA_GRE_OKEY] && + (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK)) + return -EINVAL; + + return 0; +} + static int ipgre_netlink_parms(struct net_device *dev, struct nlattr *data[], struct nlattr *tb[], @@ -892,6 +1057,13 @@ static int ipgre_netlink_parms(struct net_device *dev, if (data[IFLA_GRE_FWMARK]) *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]); + if (data[IFLA_GRE_ERSPAN_INDEX]) { + t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); + + if (t->index & ~INDEX_MASK) + return -EINVAL; + } + return 0; } @@ -949,6 +1121,36 @@ static const struct net_device_ops gre_tap_netdev_ops = { .ndo_fill_metadata_dst = gre_fill_metadata_dst, }; +static int erspan_tunnel_init(struct net_device *dev) +{ + struct ip_tunnel *tunnel = netdev_priv(dev); + int t_hlen; + + tunnel->tun_hlen = 8; + tunnel->parms.iph.protocol = IPPROTO_GRE; + t_hlen = tunnel->hlen + sizeof(struct iphdr) + sizeof(struct erspanhdr); + + dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; + dev->mtu = ETH_DATA_LEN - t_hlen - 4; + dev->features |= GRE_FEATURES; + dev->hw_features |= GRE_FEATURES; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + return ip_tunnel_init(dev); +} + +static const struct net_device_ops erspan_netdev_ops = { + .ndo_init = erspan_tunnel_init, + .ndo_uninit = ip_tunnel_uninit, + .ndo_start_xmit = erspan_xmit, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = ip_tunnel_change_mtu, + .ndo_get_stats64 = ip_tunnel_get_stats64, + .ndo_get_iflink = ip_tunnel_get_iflink, + .ndo_fill_metadata_dst = gre_fill_metadata_dst, +}; + static void ipgre_tap_setup(struct net_device *dev) { ether_setup(dev); @@ -1041,6 +1243,8 @@ static size_t ipgre_get_size(const struct net_device *dev) nla_total_size(1) + /* IFLA_GRE_FWMARK */ nla_total_size(4) + + /* IFLA_GRE_ERSPAN_INDEX */ + nla_total_size(4) + 0; } @@ -1083,12 +1287,25 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; } + if (t->index) + if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) + goto nla_put_failure; + return 0; nla_put_failure: return -EMSGSIZE; } +static void erspan_setup(struct net_device *dev) +{ + ether_setup(dev); + dev->netdev_ops = &erspan_netdev_ops; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + ip_tunnel_setup(dev, erspan_net_id); +} + static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_LINK] = { .type = NLA_U32 }, [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, @@ -1107,6 +1324,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG }, [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 }, [IFLA_GRE_FWMARK] = { .type = NLA_U32 }, + [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 }, }; static struct rtnl_link_ops ipgre_link_ops __read_mostly = { @@ -1139,6 +1357,21 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { .get_link_net = ip_tunnel_get_link_net, }; +static struct rtnl_link_ops erspan_link_ops __read_mostly = { + .kind = "erspan", + .maxtype = IFLA_GRE_MAX, + .policy = ipgre_policy, + .priv_size = sizeof(struct ip_tunnel), + .setup = erspan_setup, + .validate = erspan_validate, + .newlink = ipgre_newlink, + .changelink = ipgre_changelink, + .dellink = ip_tunnel_dellink, + .get_size = ipgre_get_size, + .fill_info = ipgre_fill_info, + .get_link_net = ip_tunnel_get_link_net, +}; + struct net_device *gretap_fb_dev_create(struct net *net, const char *name, u8 name_assign_type) { @@ -1202,6 +1435,26 @@ static struct pernet_operations ipgre_tap_net_ops = { .size = sizeof(struct ip_tunnel_net), }; +static int __net_init erspan_init_net(struct net *net) +{ + return ip_tunnel_init_net(net, erspan_net_id, + &erspan_link_ops, "erspan0"); +} + +static void __net_exit erspan_exit_net(struct net *net) +{ + struct ip_tunnel_net *itn = net_generic(net, erspan_net_id); + + ip_tunnel_delete_net(itn, &erspan_link_ops); +} + +static struct pernet_operations erspan_net_ops = { + .init = erspan_init_net, + .exit = erspan_exit_net, + .id = &erspan_net_id, + .size = sizeof(struct ip_tunnel_net), +}; + static int __init ipgre_init(void) { int err; @@ -1216,6 +1469,10 @@ static int __init ipgre_init(void) if (err < 0) goto pnet_tap_faied; + err = register_pernet_device(&erspan_net_ops); + if (err < 0) + goto pnet_erspan_failed; + err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); if (err < 0) { pr_info("%s: can't add protocol\n", __func__); @@ -1230,13 +1487,21 @@ static int __init ipgre_init(void) if (err < 0) goto tap_ops_failed; + err = rtnl_link_register(&erspan_link_ops); + if (err < 0) + goto erspan_link_failed; + return 0; +erspan_link_failed: + rtnl_link_unregister(&ipgre_tap_ops); tap_ops_failed: rtnl_link_unregister(&ipgre_link_ops); rtnl_link_failed: gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); add_proto_failed: + unregister_pernet_device(&erspan_net_ops); +pnet_erspan_failed: unregister_pernet_device(&ipgre_tap_net_ops); pnet_tap_faied: unregister_pernet_device(&ipgre_net_ops); @@ -1247,9 +1512,11 @@ static void __exit ipgre_fini(void) { rtnl_link_unregister(&ipgre_tap_ops); rtnl_link_unregister(&ipgre_link_ops); + rtnl_link_unregister(&erspan_link_ops); gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); unregister_pernet_device(&ipgre_tap_net_ops); unregister_pernet_device(&ipgre_net_ops); + unregister_pernet_device(&erspan_net_ops); } module_init(ipgre_init); @@ -1257,5 +1524,7 @@ module_exit(ipgre_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_RTNL_LINK("gre"); MODULE_ALIAS_RTNL_LINK("gretap"); +MODULE_ALIAS_RTNL_LINK("erspan"); MODULE_ALIAS_NETDEV("gre0"); MODULE_ALIAS_NETDEV("gretap0"); +MODULE_ALIAS_NETDEV("erspan0"); -- cgit v1.2.3-55-g7522 From 81b6630ff7210356fe1843572543c76674e90450 Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Tue, 22 Aug 2017 19:08:21 +0200 Subject: net: mvpp2: unify register definitions coding style Cosmetic patch to use the same formatting rules on all register definitions. Signed-off-by: Antoine Tenart Tested-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 88 ++++++++++++++++++------------------ 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 39bc8fbbdd65..34c679f25fec 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -193,18 +193,18 @@ #define MVPP2_MAX_ISR_RX_THRESHOLD 0xfffff0 #define MVPP21_ISR_RXQ_GROUP_REG(port) (0x5400 + 4 * (port)) -#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 +#define MVPP22_ISR_RXQ_GROUP_INDEX_REG 0x5400 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf -#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 +#define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK 0x380 -#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 -#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 -#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 +#define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG 0x5404 +#define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK 0x1f +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK 0xf00 +#define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET 8 #define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) #define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) @@ -272,7 +272,7 @@ #define MVPP2_BM_VIRT_RLS_REG 0x64c0 #define MVPP22_BM_ADDR_HIGH_RLS_REG 0x64c4 #define MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK 0xff -#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 +#define MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK 0xff00 #define MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT 8 /* TX Scheduler registers */ @@ -314,57 +314,57 @@ /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 -#define MVPP2_GMAC_PORT_EN_MASK BIT(0) -#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 -#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc -#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) #define MVPP2_GMAC_CTRL_1_REG 0x4 -#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) -#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) -#define MVPP2_GMAC_PCS_LB_EN_BIT 6 -#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) -#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 #define MVPP2_GMAC_CTRL_2_REG 0x8 -#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) -#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) -#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) -#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) #define MVPP2_GMAC_AUTONEG_CONFIG 0xc -#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) -#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) -#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) -#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) -#define MVPP2_GMAC_AN_SPEED_EN BIT(7) -#define MVPP2_GMAC_FC_ADV_EN BIT(9) -#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) -#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c -#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 -#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) #define MVPP22_GMAC_CTRL_4_REG 0x90 -#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) -#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) -#define MVPP22_CTRL4_SYNC_BYPASS BIT(6) -#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) +#define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) +#define MVPP22_CTRL4_DP_CLK_SEL BIT(5) +#define MVPP22_CTRL4_SYNC_BYPASS BIT(6) +#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, * relative to port->base. */ #define MVPP22_XLG_CTRL0_REG 0x100 -#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) -#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) -#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) +#define MVPP22_XLG_CTRL0_PORT_EN BIT(0) +#define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) #define MVPP22_XLG_CTRL3_REG 0x11c -#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) -#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) +#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) /* SMI registers. PPv2.2 only, relative to priv->iface_base. */ #define MVPP22_SMI_MISC_CFG_REG 0x1204 -#define MVPP22_SMI_POLLING_EN BIT(10) +#define MVPP22_SMI_POLLING_EN BIT(10) #define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00) -- cgit v1.2.3-55-g7522 From 1068ec79d9cb5481ccfa6ffacdcf174636227b5d Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Tue, 22 Aug 2017 19:08:22 +0200 Subject: net: mvpp2: fix the synchronization module bypass macro name The macro defining the bit to toggle to bypass or not the synchronization module is wrongly named. Writing 1 will disable bypass. This patch s/MVPP22_CTRL4_SYNC_BYPASS/MVPP22_CTRL4_SYNC_BYPASS_DIS/. Signed-off-by: Antoine Tenart Tested-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 34c679f25fec..03b7ced1082f 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -346,7 +346,7 @@ #define MVPP22_GMAC_CTRL_4_REG 0x90 #define MVPP22_CTRL4_EXT_PIN_GMII_SEL BIT(0) #define MVPP22_CTRL4_DP_CLK_SEL BIT(5) -#define MVPP22_CTRL4_SYNC_BYPASS BIT(6) +#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6) #define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7) /* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0, @@ -4269,7 +4269,7 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port) else val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; val &= ~MVPP22_CTRL4_DP_CLK_SEL; - val |= MVPP22_CTRL4_SYNC_BYPASS; + val |= MVPP22_CTRL4_SYNC_BYPASS_DIS; val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); } -- cgit v1.2.3-55-g7522 From 2055d6268d755fdc3f96f1d0bbf22c6164dacfbf Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Tue, 22 Aug 2017 19:08:23 +0200 Subject: net: mvpp2: move the mii configuration in the ndo_open path This moves the mii configuration in the ndo_open path, to allow handling different mii configurations later and to switch between these configurations at runtime. Signed-off-by: Antoine Tenart Tested-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 03b7ced1082f..087eb793ab4d 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5987,6 +5987,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port) /* Enable interrupts on all CPUs */ mvpp2_interrupts_enable(port); + mvpp2_port_mii_set(port); mvpp2_port_enable(port); phy_start(ndev->phydev); netif_tx_start_all_queues(port->dev); @@ -6949,7 +6950,6 @@ static int mvpp2_port_probe(struct platform_device *pdev, goto err_free_stats; } - mvpp2_port_mii_set(port); mvpp2_port_periodic_xon_disable(port); if (priv->hw_version == MVPP21) -- cgit v1.2.3-55-g7522 From 3919357fb0bbdbc18366cf59cbf0f16c608f81fe Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Tue, 22 Aug 2017 19:08:24 +0200 Subject: net: mvpp2: initialize the GMAC when using a port This adds a routine to initialize the GMAC at the port level when using a port. This wasn't done until this commit, and the mvpp2 driver was relying on the bootloader/firmware initialization. This doesn't mean everything is configured in the mvpp2 driver now, but it helps reducing the gap. Signed-off-by: Antoine Tenart Tested-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 123 ++++++++++++++++++++++++++++------- 1 file changed, 98 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 087eb793ab4d..2ab7366ac90f 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -315,6 +315,7 @@ /* Per-port registers */ #define MVPP2_GMAC_CTRL_0_REG 0x0 #define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_PORT_TYPE_MASK BIT(1) #define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 #define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc #define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) @@ -326,16 +327,21 @@ #define MVPP2_GMAC_SA_LOW_OFFS 7 #define MVPP2_GMAC_CTRL_2_REG 0x8 #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) +#define MVPP2_GMAC_DISABLE_PADDING BIT(5) #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) #define MVPP2_GMAC_AUTONEG_CONFIG 0xc #define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) #define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_IN_BAND_AUTONEG BIT(2) +#define MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS BIT(3) #define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) #define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVPP2_GMAC_AN_SPEED_EN BIT(7) #define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_FLOW_CTRL_AUTONEG BIT(11) #define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c @@ -4245,6 +4251,92 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) /* Port configuration routines */ +static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) +{ + u32 val; + + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + val |= MVPP22_CTRL4_SYNC_BYPASS_DIS | MVPP22_CTRL4_DP_CLK_SEL | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; + writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val |= MVPP2_GMAC_DISABLE_PADDING; + val &= ~MVPP2_GMAC_FLOW_CTRL_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + } else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { + val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); + val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | + MVPP22_CTRL4_SYNC_BYPASS_DIS | + MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; + val &= ~MVPP22_CTRL4_DP_CLK_SEL; + writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); + + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val &= ~MVPP2_GMAC_DISABLE_PADDING; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + } + + /* The port is connected to a copper PHY */ + val = readl(port->base + MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_PORT_TYPE_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); + + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val |= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | + MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG | + MVPP2_GMAC_AN_DUPLEX_EN; + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) + val |= MVPP2_GMAC_IN_BAND_AUTONEG; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + +static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) +{ + u32 val; + + /* Force link down */ + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; + val |= MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); + + /* Set the GMAC in a reset state */ + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val |= MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + /* Configure the PCS and in-band AN */ + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + if (port->phy_interface == PHY_INTERFACE_MODE_SGMII) { + val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; + } else if (port->phy_interface == PHY_INTERFACE_MODE_RGMII || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { + val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; + val |= MVPP2_GMAC_PORT_RGMII_MASK; + } + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + mvpp2_port_mii_gmac_configure_mode(port); + + /* Unset the GMAC reset state */ + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); + val &= ~MVPP2_GMAC_PORT_RESET_MASK; + writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + + /* Stop forcing link down */ + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); + val &= ~MVPP2_GMAC_FORCE_LINK_DOWN; + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); +} + static void mvpp22_port_mii_set(struct mvpp2_port *port) { u32 val; @@ -4262,38 +4354,19 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port) writel(val, port->base + MVPP22_XLG_CTRL3_REG); } - - val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); - if (port->phy_interface == PHY_INTERFACE_MODE_RGMII) - val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL; - else - val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; - val &= ~MVPP22_CTRL4_DP_CLK_SEL; - val |= MVPP22_CTRL4_SYNC_BYPASS_DIS; - val |= MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; - writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); } static void mvpp2_port_mii_set(struct mvpp2_port *port) { - u32 val; - if (port->priv->hw_version == MVPP22) mvpp22_port_mii_set(port); - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG); - - switch (port->phy_interface) { - case PHY_INTERFACE_MODE_SGMII: - val |= MVPP2_GMAC_INBAND_AN_MASK; - break; - case PHY_INTERFACE_MODE_RGMII: - val |= MVPP2_GMAC_PORT_RGMII_MASK; - default: - val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; - } - - writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); + if (port->phy_interface == PHY_INTERFACE_MODE_RGMII || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || + port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID || + port->phy_interface == PHY_INTERFACE_MODE_SGMII) + mvpp2_port_mii_gmac_configure(port); } static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) -- cgit v1.2.3-55-g7522 From 77321959feb9bdcfeeb2a2154d6fb831fb1991c2 Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Tue, 22 Aug 2017 19:08:25 +0200 Subject: net: mvpp2: initialize the XLG MAC when using a port This adds a routine to initialize the XLG MAC at the port level when using a port and the XAUI/10GKR interface mode. This wasn't done until this commit, and the mvpp2 driver was relying on the bootloader/firmware initialization. This doesn't mean everything is configured in the mvpp2 driver now, but it helps reducing the gap. Signed-off-by: Antoine Tenart Tested-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 2ab7366ac90f..64e0a1bae7d0 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -361,6 +361,7 @@ #define MVPP22_XLG_CTRL0_REG 0x100 #define MVPP22_XLG_CTRL0_PORT_EN BIT(0) #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) +#define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) #define MVPP22_XLG_CTRL3_REG 0x11c @@ -368,6 +369,11 @@ #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) #define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13) +#define MVPP22_XLG_CTRL4_REG 0x184 +#define MVPP22_XLG_CTRL4_FWD_FC BIT(5) +#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6) +#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12) + /* SMI registers. PPv2.2 only, relative to priv->iface_base. */ #define MVPP22_SMI_MISC_CFG_REG 0x1204 #define MVPP22_SMI_POLLING_EN BIT(10) @@ -4337,6 +4343,23 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); } +static void mvpp2_port_mii_xlg_configure(struct mvpp2_port *port) +{ + u32 val; + + if (port->gop_id != 0) + return; + + val = readl(port->base + MVPP22_XLG_CTRL0_REG); + val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN; + writel(val, port->base + MVPP22_XLG_CTRL0_REG); + + val = readl(port->base + MVPP22_XLG_CTRL4_REG); + val &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC; + val |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC; + writel(val, port->base + MVPP22_XLG_CTRL4_REG); +} + static void mvpp22_port_mii_set(struct mvpp2_port *port) { u32 val; @@ -4367,6 +4390,8 @@ static void mvpp2_port_mii_set(struct mvpp2_port *port) port->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID || port->phy_interface == PHY_INTERFACE_MODE_SGMII) mvpp2_port_mii_gmac_configure(port); + else if (port->phy_interface == PHY_INTERFACE_MODE_10GKR) + mvpp2_port_mii_xlg_configure(port); } static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port) -- cgit v1.2.3-55-g7522 From 76eb1b1de5b6467c78bb72311dbf29eea1f10a3a Mon Sep 17 00:00:00 2001 From: Stefan Chulski Date: Tue, 22 Aug 2017 19:08:26 +0200 Subject: net: mvpp2: set maximum packet size for 10G ports Set maximum packet size for XLG 10G ports. Missing maximum packet size for XLG configuration will cause kernel panic if oversized packet is received by port. Signed-off-by: Stefan Chulski Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 64e0a1bae7d0..b2d2b4c9d5af 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -363,7 +363,9 @@ #define MVPP22_XLG_CTRL0_MAC_RESET_DIS BIT(1) #define MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN BIT(7) #define MVPP22_XLG_CTRL0_MIB_CNT_DIS BIT(14) - +#define MVPP22_XLG_CTRL1_REG 0x104 +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT BIT(0) +#define MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK 0x1fff #define MVPP22_XLG_CTRL3_REG 0x11c #define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13) #define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13) @@ -4498,6 +4500,18 @@ static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) writel(val, port->base + MVPP2_GMAC_CTRL_0_REG); } +/* Change maximum receive size of the port */ +static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port) +{ + u32 val; + + val = readl(port->base + MVPP22_XLG_CTRL1_REG); + val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK; + val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP22_XLG_CTRL1_FRAMESIZELIMIT; + writel(val, port->base + MVPP22_XLG_CTRL1_REG); +} + /* Set defaults to the MVPP2 port */ static void mvpp2_defaults_set(struct mvpp2_port *port) { @@ -6076,7 +6090,13 @@ static void mvpp2_start_dev(struct mvpp2_port *port) struct net_device *ndev = port->dev; int i; - mvpp2_gmac_max_rx_size_set(port); + if (port->gop_id == 0 && + (port->phy_interface == PHY_INTERFACE_MODE_XAUI || + port->phy_interface == PHY_INTERFACE_MODE_10GKR)) + mvpp2_xlg_max_rx_size_set(port); + else + mvpp2_gmac_max_rx_size_set(port); + mvpp2_txp_max_tx_size_set(port); for (i = 0; i < port->nqvecs; i++) -- cgit v1.2.3-55-g7522 From f84bf386f3956c4a02c773a59c9c61033c00a5b5 Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Tue, 22 Aug 2017 19:08:27 +0200 Subject: net: mvpp2: initialize the GoP The patch adds GoP (group of ports) initialization functions. The mvpp2 driver was relying on the firmware/bootloader initialization; this patch moves this setup to the mvpp2 driver. Signed-off-by: Antoine Tenart Tested-by: Marcin Wojtas Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 170 +++++++++++++++++++++++++++++++++++ 1 file changed, 170 insertions(+) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index b2d2b4c9d5af..02c62cbbfe51 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -30,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -388,6 +390,38 @@ #define MVPP2_QUEUE_NEXT_DESC(q, index) \ (((index) < (q)->last_desc) ? ((index) + 1) : 0) +/* XPCS registers. PPv2.2 only */ +#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000) +#define MVPP22_MPCS_CTRL 0x14 +#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10) +#define MVPP22_MPCS_CLK_RESET 0x14c +#define MAC_CLK_RESET_SD_TX BIT(0) +#define MAC_CLK_RESET_SD_RX BIT(1) +#define MAC_CLK_RESET_MAC BIT(2) +#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4) +#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11) + +/* XPCS registers. PPv2.2 only */ +#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000) +#define MVPP22_XPCS_CFG0 0x0 +#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3) +#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5) + +/* System controller registers. Accessed through a regmap. */ +#define GENCONF_SOFT_RESET1 0x1108 +#define GENCONF_SOFT_RESET1_GOP BIT(6) +#define GENCONF_PORT_CTRL0 0x1110 +#define GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT BIT(1) +#define GENCONF_PORT_CTRL0_RX_DATA_SAMPLE BIT(29) +#define GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR BIT(31) +#define GENCONF_PORT_CTRL1 0x1114 +#define GENCONF_PORT_CTRL1_EN(p) BIT(p) +#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28) +#define GENCONF_CTRL0 0x1120 +#define GENCONF_CTRL0_PORT0_RGMII BIT(0) +#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1) +#define GENCONF_CTRL0_PORT1_RGMII BIT(2) + /* Various constants */ /* Coalescing */ @@ -731,6 +765,11 @@ struct mvpp2 { */ void __iomem *swth_base[MVPP2_MAX_THREADS]; + /* On PPv2.2, some port control registers are located into the system + * controller space. These registers are accessible through a regmap. + */ + struct regmap *sysctrl_base; + /* Common clocks */ struct clk *pp_clk; struct clk *gop_clk; @@ -4259,6 +4298,123 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask) /* Port configuration routines */ +static void mvpp22_gop_init_rgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII; + else if (port->gop_id == 3) + val |= GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); +} + +static void mvpp22_gop_init_sgmii(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT | + GENCONF_PORT_CTRL0_RX_DATA_SAMPLE; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + if (port->gop_id > 1) { + regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val); + if (port->gop_id == 2) + val &= ~GENCONF_CTRL0_PORT0_RGMII; + else if (port->gop_id == 3) + val &= ~GENCONF_CTRL0_PORT1_RGMII_MII; + regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val); + } +} + +static void mvpp22_gop_init_10gkr(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id); + void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id); + u32 val; + + /* XPCS */ + val = readl(xpcs + MVPP22_XPCS_CFG0); + val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) | + MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3)); + val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2); + writel(val, xpcs + MVPP22_XPCS_CFG0); + + /* MPCS */ + val = readl(mpcs + MVPP22_MPCS_CTRL); + val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN; + writel(val, mpcs + MVPP22_MPCS_CTRL); + + val = readl(mpcs + MVPP22_MPCS_CLK_RESET); + val &= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC | + MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX); + val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1); + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); + + val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET; + val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX; + writel(val, mpcs + MVPP22_MPCS_CLK_RESET); +} + +static int mvpp22_gop_init(struct mvpp2_port *port) +{ + struct mvpp2 *priv = port->priv; + u32 val; + + if (!priv->sysctrl_base) + return 0; + + switch (port->phy_interface) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (port->gop_id == 0) + goto invalid_conf; + mvpp22_gop_init_rgmii(port); + break; + case PHY_INTERFACE_MODE_SGMII: + mvpp22_gop_init_sgmii(port); + break; + case PHY_INTERFACE_MODE_10GKR: + if (port->gop_id != 0) + goto invalid_conf; + mvpp22_gop_init_10gkr(port); + break; + default: + goto unsupported_conf; + } + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val); + val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) | + GENCONF_PORT_CTRL1_EN(port->gop_id); + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val); + + regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val); + val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR; + regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val); + + regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val); + val |= GENCONF_SOFT_RESET1_GOP; + regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val); + +unsupported_conf: + return 0; + +invalid_conf: + netdev_err(port->dev, "Invalid port configuration\n"); + return -EINVAL; +} + static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port) { u32 val; @@ -6105,6 +6261,9 @@ static void mvpp2_start_dev(struct mvpp2_port *port) /* Enable interrupts on all CPUs */ mvpp2_interrupts_enable(port); + if (port->priv->hw_version == MVPP22) + mvpp22_gop_init(port); + mvpp2_port_mii_set(port); mvpp2_port_enable(port); phy_start(ndev->phydev); @@ -7350,6 +7509,17 @@ static int mvpp2_probe(struct platform_device *pdev) priv->iface_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->iface_base)) return PTR_ERR(priv->iface_base); + + priv->sysctrl_base = + syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "marvell,system-controller"); + if (IS_ERR(priv->sysctrl_base)) + /* The system controller regmap is optional for dt + * compatibility reasons. When not provided, the + * configuration of the GoP relies on the + * firmware/bootloader. + */ + priv->sysctrl_base = NULL; } for (i = 0; i < MVPP2_MAX_THREADS; i++) { -- cgit v1.2.3-55-g7522 From 7afe461ee64c03a857adfc146fcda1c98bb6a8ca Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Tue, 22 Aug 2017 19:08:28 +0200 Subject: Documentation/bindings: net: marvell-pp2: add the system controller This patch documents the new marvell,system-controller property used by the Marvell ppv2 network driver. Signed-off-by: Antoine Tenart Tested-by: Marcin Wojtas Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/marvell-pp2.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index 8918ad3ccf14..49484db81583 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt @@ -45,6 +45,7 @@ Optional properties (port): be the name associated to the interrupts listed. Valid names are: "tx-cpu0", "tx-cpu1", "tx-cpu2", "tx-cpu3", "rx-shared". +- marvell,system-controller: a phandle to the system controller. Example for marvell,armada-375-pp2: -- cgit v1.2.3-55-g7522 From 153890b41b0ad467a89a77d48bb96f6bc3b14120 Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 22 Aug 2017 23:41:12 +0530 Subject: net: amd: constify zorro_device_id zorro_device_id are not supposed to change at runtime. All functions working with zorro_device_id provided by work with const zorro_device_id. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/a2065.c | 2 +- drivers/net/ethernet/amd/ariadne.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index ee4b94e3cda9..e22f976a0d18 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -643,7 +643,7 @@ static int a2065_init_one(struct zorro_dev *z, static void a2065_remove_one(struct zorro_dev *z); -static struct zorro_device_id a2065_zorro_tbl[] = { +static const struct zorro_device_id a2065_zorro_tbl[] = { { ZORRO_PROD_CBM_A2065_1 }, { ZORRO_PROD_CBM_A2065_2 }, { ZORRO_PROD_AMERISTAR_A2065 }, diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index 5fd7b15b0574..4b6a5cb85dd2 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -692,7 +692,7 @@ static void ariadne_remove_one(struct zorro_dev *z) free_netdev(dev); } -static struct zorro_device_id ariadne_zorro_tbl[] = { +static const struct zorro_device_id ariadne_zorro_tbl[] = { { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, { 0 } }; -- cgit v1.2.3-55-g7522 From 9695fe6f21e3b06a7086f62851c0df16369fc223 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 22 Aug 2017 12:26:46 -0700 Subject: net: sched: use kvmalloc() for class hash tables High order GFP_KERNEL allocations can stress the host badly. Use modern kvmalloc_array()/kvfree() instead of custom allocations. Signed-off-by: Eric Dumazet Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/sched/sch_api.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0fea0c50b763..aaf552b8e120 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -621,14 +621,10 @@ EXPORT_SYMBOL(qdisc_watchdog_cancel); static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) { - unsigned int size = n * sizeof(struct hlist_head), i; struct hlist_head *h; + unsigned int i; - if (size <= PAGE_SIZE) - h = kmalloc(size, GFP_KERNEL); - else - h = (struct hlist_head *) - __get_free_pages(GFP_KERNEL, get_order(size)); + h = kvmalloc_array(n, sizeof(struct hlist_head), GFP_KERNEL); if (h != NULL) { for (i = 0; i < n; i++) @@ -637,16 +633,6 @@ static struct hlist_head *qdisc_class_hash_alloc(unsigned int n) return h; } -static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n) -{ - unsigned int size = n * sizeof(struct hlist_head); - - if (size <= PAGE_SIZE) - kfree(h); - else - free_pages((unsigned long)h, get_order(size)); -} - void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) { struct Qdisc_class_common *cl; @@ -679,7 +665,7 @@ void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash) clhash->hashmask = nmask; sch_tree_unlock(sch); - qdisc_class_hash_free(ohash, osize); + kvfree(ohash); } EXPORT_SYMBOL(qdisc_class_hash_grow); @@ -699,7 +685,7 @@ EXPORT_SYMBOL(qdisc_class_hash_init); void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash) { - qdisc_class_hash_free(clhash->hash, clhash->hashsize); + kvfree(clhash->hash); } EXPORT_SYMBOL(qdisc_class_hash_destroy); -- cgit v1.2.3-55-g7522 From 4a00aa057759d713e1296ecbc614fa560d569977 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 22 Aug 2017 23:46:06 +0100 Subject: MIPS,bpf: fix missing break in switch statement There is a missing break causing a fall-through and setting ctx.use_bbit_insns to the wrong value. Fix this by adding the missing break. Detected with cppcheck: "Variable 'ctx.use_bbit_insns' is reassigned a value before the old one has been used. 'break;' missing?" Fixes: 8d8d18c3283f ("MIPS,bpf: Fix using smp_processor_id() in preemptible splat.") Signed-off-by: Colin Ian King Acked-by: David Daney Signed-off-by: David S. Miller --- arch/mips/net/ebpf_jit.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 44ddc12cbb0e..7646891c4e9b 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c @@ -1892,6 +1892,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) case CPU_CAVIUM_OCTEON2: case CPU_CAVIUM_OCTEON3: ctx.use_bbit_insns = 1; + break; default: ctx.use_bbit_insns = 0; } -- cgit v1.2.3-55-g7522 From cd36c3a21a400cac9c457394b9adf94e0027c136 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 23 Aug 2017 00:06:09 +0200 Subject: bpf: fix map value attribute for hash of maps Currently, iproute2's BPF ELF loader works fine with array of maps when retrieving the fd from a pinned node and doing a selfcheck against the provided map attributes from the object file, but we fail to do the same for hash of maps and thus refuse to get the map from pinned node. Reason is that when allocating hash of maps, fd_htab_map_alloc() will set the value size to sizeof(void *), and any user space map creation requests are forced to set 4 bytes as value size. Thus, selfcheck will complain about exposed 8 bytes on 64 bit archs vs. 4 bytes from object file as value size. Contract is that fdinfo or BPF_MAP_GET_FD_BY_ID returns the value size used to create the map. Fix it by handling it the same way as we do for array of maps, which means that we leave value size at 4 bytes and in the allocation phase round up value size to 8 bytes. alloc_htab_elem() needs an adjustment in order to copy rounded up 8 bytes due to bpf_fd_htab_map_update_elem() calling into htab_map_update_elem() with the pointer of the map pointer as value. Unlike array of maps where we just xchg(), we're using the generic htab_map_update_elem() callback also used from helper calls, which published the key/value already on return, so we need to ensure to memcpy() the right size. Fixes: bcc6b1b7ebf8 ("bpf: Add hash of maps support") Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Signed-off-by: David S. Miller --- kernel/bpf/hashtab.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index ae822de4a90a..d246905f2bb1 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -662,12 +662,27 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, } } +static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) +{ + return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && + BITS_PER_LONG == 64; +} + +static u32 htab_size_value(const struct bpf_htab *htab, bool percpu) +{ + u32 size = htab->map.value_size; + + if (percpu || fd_htab_map_needs_adjust(htab)) + size = round_up(size, 8); + return size; +} + static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, void *value, u32 key_size, u32 hash, bool percpu, bool onallcpus, struct htab_elem *old_elem) { - u32 size = htab->map.value_size; + u32 size = htab_size_value(htab, percpu); bool prealloc = htab_is_prealloc(htab); struct htab_elem *l_new, **pl_new; void __percpu *pptr; @@ -707,9 +722,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, memcpy(l_new->key, key, key_size); if (percpu) { - /* round up value_size to 8 bytes */ - size = round_up(size, 8); - if (prealloc) { pptr = htab_elem_get_ptr(l_new, key_size); } else { @@ -1220,17 +1232,9 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr) { - struct bpf_map *map; - if (attr->value_size != sizeof(u32)) return ERR_PTR(-EINVAL); - - /* pointer is stored internally */ - attr->value_size = sizeof(void *); - map = htab_map_alloc(attr); - attr->value_size = sizeof(u32); - - return map; + return htab_map_alloc(attr); } static void fd_htab_map_free(struct bpf_map *map) -- cgit v1.2.3-55-g7522 From e4a8e817d3cb2a5108f8bb2e47e81eb25a2c5e30 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 23 Aug 2017 01:47:53 +0200 Subject: bpf: misc xdp redirect cleanups Few cleanups including: bpf_redirect_map() is really XDP only due to the return code. Move it to a more appropriate location where we do the XDP redirect handling and change it's name into bpf_xdp_redirect_map() to make it consistent to the bpf_xdp_redirect() helper. xdp_do_redirect_map() helper can be static since only used out of filter.c file. Drop the goto in xdp_do_generic_redirect() and only return errors directly. In xdp_do_flush_map() only clear ri->map_to_flush which is the arg we're using in that function, ri->map is cleared earlier along with ri->ifindex. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- net/core/filter.c | 72 +++++++++++++++++++++++++------------------------------ 1 file changed, 32 insertions(+), 40 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index fa2115695037..2a0d762a20d8 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1835,29 +1835,6 @@ static const struct bpf_func_proto bpf_redirect_proto = { .arg2_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags) -{ - struct redirect_info *ri = this_cpu_ptr(&redirect_info); - - if (unlikely(flags)) - return XDP_ABORTED; - - ri->ifindex = ifindex; - ri->flags = flags; - ri->map = map; - - return XDP_REDIRECT; -} - -static const struct bpf_func_proto bpf_redirect_map_proto = { - .func = bpf_redirect_map, - .gpl_only = false, - .ret_type = RET_INTEGER, - .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_ANYTHING, -}; - BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); @@ -2506,13 +2483,11 @@ static int __bpf_tx_xdp(struct net_device *dev, err = dev->netdev_ops->ndo_xdp_xmit(dev, xdp); if (err) return err; - if (map) __dev_map_insert_ctx(map, index); else dev->netdev_ops->ndo_xdp_flush(dev); - - return err; + return 0; } void xdp_do_flush_map(void) @@ -2520,16 +2495,14 @@ void xdp_do_flush_map(void) struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct bpf_map *map = ri->map_to_flush; - ri->map = NULL; ri->map_to_flush = NULL; - if (map) __dev_map_flush(map); } EXPORT_SYMBOL_GPL(xdp_do_flush_map); -int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog) +static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog) { struct redirect_info *ri = this_cpu_ptr(&redirect_info); struct bpf_map *map = ri->map; @@ -2545,14 +2518,12 @@ int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, err = -EINVAL; goto out; } - - if (ri->map_to_flush && (ri->map_to_flush != map)) + if (ri->map_to_flush && ri->map_to_flush != map) xdp_do_flush_map(); err = __bpf_tx_xdp(fwd, map, xdp, index); if (likely(!err)) ri->map_to_flush = map; - out: trace_xdp_redirect(dev, fwd, xdp_prog, XDP_REDIRECT, err); return err; @@ -2594,20 +2565,17 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb) ri->ifindex = 0; if (unlikely(!dev)) { bpf_warn_invalid_xdp_redirect(index); - goto err; + return -EINVAL; } if (unlikely(!(dev->flags & IFF_UP))) - goto err; - + return -ENETDOWN; len = dev->mtu + dev->hard_header_len + VLAN_HLEN; if (skb->len > len) - goto err; + return -E2BIG; skb->dev = dev; return 0; -err: - return -EINVAL; } EXPORT_SYMBOL_GPL(xdp_do_generic_redirect); @@ -2620,6 +2588,7 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) ri->ifindex = ifindex; ri->flags = flags; + return XDP_REDIRECT; } @@ -2631,6 +2600,29 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = { .arg2_type = ARG_ANYTHING, }; +BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags) +{ + struct redirect_info *ri = this_cpu_ptr(&redirect_info); + + if (unlikely(flags)) + return XDP_ABORTED; + + ri->ifindex = ifindex; + ri->flags = flags; + ri->map = map; + + return XDP_REDIRECT; +} + +static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { + .func = bpf_xdp_redirect_map, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + bool bpf_helper_changes_pkt_data(void *func) { if (func == bpf_skb_vlan_push || @@ -3233,7 +3225,7 @@ xdp_func_proto(enum bpf_func_id func_id) case BPF_FUNC_redirect: return &bpf_xdp_redirect_proto; case BPF_FUNC_redirect_map: - return &bpf_redirect_map_proto; + return &bpf_xdp_redirect_map_proto; default: return bpf_base_func_proto(func_id); } -- cgit v1.2.3-55-g7522 From af4d045ceeca04946d89453206269aea6c338a8e Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 23 Aug 2017 01:47:54 +0200 Subject: bpf: minor cleanups for dev_map Some minor code cleanups, while going over it I also noticed that we're accounting the bitmap only for one CPU currently, so fix that up as well. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- kernel/bpf/devmap.c | 100 +++++++++++++++++++++------------------------------- 1 file changed, 41 insertions(+), 59 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index fa08181d1c3d..bfecabfd4974 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -48,30 +48,30 @@ * calls will fail at this point. */ #include -#include #include -#include -#include "percpu_freelist.h" -#include "bpf_lru_list.h" -#include "map_in_map.h" struct bpf_dtab_netdev { struct net_device *dev; - int key; - struct rcu_head rcu; struct bpf_dtab *dtab; + unsigned int bit; + struct rcu_head rcu; }; struct bpf_dtab { struct bpf_map map; struct bpf_dtab_netdev **netdev_map; - unsigned long int __percpu *flush_needed; + unsigned long __percpu *flush_needed; struct list_head list; }; static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); +static u64 dev_map_bitmap_size(const union bpf_attr *attr) +{ + return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); +} + static struct bpf_map *dev_map_alloc(union bpf_attr *attr) { struct bpf_dtab *dtab; @@ -95,11 +95,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) dtab->map.map_flags = attr->map_flags; dtab->map.numa_node = bpf_map_attr_numa_node(attr); - err = -ENOMEM; - /* make sure page count doesn't overflow */ cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); - cost += BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); + cost += dev_map_bitmap_size(attr) * num_possible_cpus(); if (cost >= U32_MAX - PAGE_SIZE) goto free_dtab; @@ -110,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) if (err) goto free_dtab; - err = -ENOMEM; /* A per cpu bitfield with a bit per possible net device */ - dtab->flush_needed = __alloc_percpu( - BITS_TO_LONGS(attr->max_entries) * - sizeof(unsigned long), - __alignof__(unsigned long)); + dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr), + __alignof__(unsigned long)); if (!dtab->flush_needed) goto free_dtab; @@ -128,12 +123,12 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) spin_lock(&dev_map_lock); list_add_tail_rcu(&dtab->list, &dev_map_list); spin_unlock(&dev_map_lock); - return &dtab->map; + return &dtab->map; free_dtab: free_percpu(dtab->flush_needed); kfree(dtab); - return ERR_PTR(err); + return ERR_PTR(-ENOMEM); } static void dev_map_free(struct bpf_map *map) @@ -178,9 +173,6 @@ static void dev_map_free(struct bpf_map *map) kfree(dev); } - /* At this point bpf program is detached and all pending operations - * _must_ be complete - */ free_percpu(dtab->flush_needed); bpf_map_area_free(dtab->netdev_map); kfree(dtab); @@ -190,7 +182,7 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); u32 index = key ? *(u32 *)key : U32_MAX; - u32 *next = (u32 *)next_key; + u32 *next = next_key; if (index >= dtab->map.max_entries) { *next = 0; @@ -199,29 +191,16 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) if (index == dtab->map.max_entries - 1) return -ENOENT; - *next = index + 1; return 0; } -void __dev_map_insert_ctx(struct bpf_map *map, u32 key) +void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); unsigned long *bitmap = this_cpu_ptr(dtab->flush_needed); - __set_bit(key, bitmap); -} - -struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) -{ - struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); - struct bpf_dtab_netdev *dev; - - if (key >= map->max_entries) - return NULL; - - dev = READ_ONCE(dtab->netdev_map[key]); - return dev ? dev->dev : NULL; + __set_bit(bit, bitmap); } /* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled @@ -248,7 +227,6 @@ void __dev_map_flush(struct bpf_map *map) continue; netdev = dev->dev; - __clear_bit(bit, bitmap); if (unlikely(!netdev || !netdev->netdev_ops->ndo_xdp_flush)) continue; @@ -261,43 +239,49 @@ void __dev_map_flush(struct bpf_map *map) * update happens in parallel here a dev_put wont happen until after reading the * ifindex. */ -static void *dev_map_lookup_elem(struct bpf_map *map, void *key) +struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab_netdev *dev; - u32 i = *(u32 *)key; - if (i >= map->max_entries) + if (key >= map->max_entries) return NULL; - dev = READ_ONCE(dtab->netdev_map[i]); - return dev ? &dev->dev->ifindex : NULL; + dev = READ_ONCE(dtab->netdev_map[key]); + return dev ? dev->dev : NULL; } -static void dev_map_flush_old(struct bpf_dtab_netdev *old_dev) +static void *dev_map_lookup_elem(struct bpf_map *map, void *key) +{ + struct net_device *dev = __dev_map_lookup_elem(map, *(u32 *)key); + + return dev ? &dev->ifindex : NULL; +} + +static void dev_map_flush_old(struct bpf_dtab_netdev *dev) { - if (old_dev->dev->netdev_ops->ndo_xdp_flush) { - struct net_device *fl = old_dev->dev; + if (dev->dev->netdev_ops->ndo_xdp_flush) { + struct net_device *fl = dev->dev; unsigned long *bitmap; int cpu; for_each_online_cpu(cpu) { - bitmap = per_cpu_ptr(old_dev->dtab->flush_needed, cpu); - __clear_bit(old_dev->key, bitmap); + bitmap = per_cpu_ptr(dev->dtab->flush_needed, cpu); + __clear_bit(dev->bit, bitmap); - fl->netdev_ops->ndo_xdp_flush(old_dev->dev); + fl->netdev_ops->ndo_xdp_flush(dev->dev); } } } static void __dev_map_entry_free(struct rcu_head *rcu) { - struct bpf_dtab_netdev *old_dev; + struct bpf_dtab_netdev *dev; - old_dev = container_of(rcu, struct bpf_dtab_netdev, rcu); - dev_map_flush_old(old_dev); - dev_put(old_dev->dev); - kfree(old_dev); + dev = container_of(rcu, struct bpf_dtab_netdev, rcu); + dev_map_flush_old(dev); + dev_put(dev->dev); + kfree(dev); } static int dev_map_delete_elem(struct bpf_map *map, void *key) @@ -309,8 +293,8 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) if (k >= map->max_entries) return -EINVAL; - /* Use synchronize_rcu() here to ensure any rcu critical sections - * have completed, but this does not guarantee a flush has happened + /* Use call_rcu() here to ensure any rcu critical sections have + * completed, but this does not guarantee a flush has happened * yet. Because driver side rcu_read_lock/unlock only protects the * running XDP program. However, for pending flush operations the * dev and ctx are stored in another per cpu map. And additionally, @@ -334,10 +318,8 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, if (unlikely(map_flags > BPF_EXIST)) return -EINVAL; - if (unlikely(i >= dtab->map.max_entries)) return -E2BIG; - if (unlikely(map_flags == BPF_NOEXIST)) return -EEXIST; @@ -355,7 +337,7 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, return -EINVAL; } - dev->key = i; + dev->bit = i; dev->dtab = dtab; } -- cgit v1.2.3-55-g7522 From e3d0328c76dde0b957f62f8c407b79f1d8fe3ef8 Mon Sep 17 00:00:00 2001 From: William Tu Date: Tue, 22 Aug 2017 17:04:05 -0700 Subject: gre: fix goto statement typo Fix typo: pnet_tap_faied. Signed-off-by: William Tu Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6e8a62289e03..5a20ba9b9b50 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1467,7 +1467,7 @@ static int __init ipgre_init(void) err = register_pernet_device(&ipgre_tap_net_ops); if (err < 0) - goto pnet_tap_faied; + goto pnet_tap_failed; err = register_pernet_device(&erspan_net_ops); if (err < 0) @@ -1503,7 +1503,7 @@ add_proto_failed: unregister_pernet_device(&erspan_net_ops); pnet_erspan_failed: unregister_pernet_device(&ipgre_tap_net_ops); -pnet_tap_faied: +pnet_tap_failed: unregister_pernet_device(&ipgre_net_ops); return err; } -- cgit v1.2.3-55-g7522 From b28547728d4fd42a004df2b662724e16ff778db6 Mon Sep 17 00:00:00 2001 From: Felix Manlunas Date: Tue, 22 Aug 2017 12:46:37 -0700 Subject: liquidio: change manner of detecting whether or not NIC firmware is loaded In the NIC firmware, the 1-bit flag indicating "firmware is loaded" moved from SLI_SCRATCH_1 to SLI_SCRATCH_2 (these are Octeon general-purpose scratch registers). Make the PF driver conform to this change. Remove code that sets the "firmware is loaded" flag because it's now the firmware's job to do that. In the code that detects whether or not the firmware is loaded, don't just rely on checking the "firmware is loaded" flag because that may cause a rare false negative. Add code that deduces whether or not the firmware is loaded; that will never give a false negative. Also bump up driver version to match newer NIC firmware. Signed-off-by: Felix Manlunas Signed-off-by: Derek Chickles Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c | 15 +++++++++++++-- drivers/net/ethernet/cavium/liquidio/lio_main.c | 6 ------ drivers/net/ethernet/cavium/liquidio/liquidio_common.h | 3 ++- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index fbc0d4e008f3..f6c0bad78cd4 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -1442,8 +1442,19 @@ int cn23xx_fw_loaded(struct octeon_device *oct) { u64 val; - val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1); - return (val >> 1) & 1ULL; + /* If there's more than one active PF on this NIC, then that + * implies that the NIC firmware is loaded and running. This check + * prevents a rare false negative that might occur if we only relied + * on checking the SCR2_BIT_FW_LOADED flag. The false negative would + * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even + * though the firmware was already loaded but still booting and has yet + * to set SCR2_BIT_FW_LOADED. + */ + if (atomic_read(oct->adapter_refcount) > 1) + return 1; + + val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); + return (val >> SCR2_BIT_FW_LOADED) & 1ULL; } void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index c2360fe8cef2..e7f54948173f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -4125,12 +4125,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev) dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); return 1; } - /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is - * loaded - */ - if (OCTEON_CN23XX_PF(octeon_dev)) - octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1, - 2ULL); } handshake[octeon_dev->octeon_id].init_ok = 1; diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h index d0076c191cee..3788c8cd082a 100644 --- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -28,7 +28,7 @@ #define LIQUIDIO_PACKAGE "" #define LIQUIDIO_BASE_MAJOR_VERSION 1 #define LIQUIDIO_BASE_MINOR_VERSION 6 -#define LIQUIDIO_BASE_MICRO_VERSION 0 +#define LIQUIDIO_BASE_MICRO_VERSION 1 #define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ __stringify(LIQUIDIO_BASE_MINOR_VERSION) #define LIQUIDIO_MICRO_VERSION "." __stringify(LIQUIDIO_BASE_MICRO_VERSION) @@ -106,6 +106,7 @@ enum octeon_tag_type { #define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2) #define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2) +#define SCR2_BIT_FW_LOADED 63 static inline u32 incr_index(u32 index, u32 count, u32 max) { -- cgit v1.2.3-55-g7522 From 98aaa913b4ed250324429f0a9e6d5f77a3b5276c Mon Sep 17 00:00:00 2001 From: Mike Maloney Date: Tue, 22 Aug 2017 17:08:48 -0400 Subject: tcp: Extend SOF_TIMESTAMPING_RX_SOFTWARE to TCP recvmsg When SOF_TIMESTAMPING_RX_SOFTWARE is enabled for tcp sockets, return the timestamp corresponding to the highest sequence number data returned. Previously the skb->tstamp is overwritten when a TCP packet is placed in the out of order queue. While the packet is in the ooo queue, save the timestamp in the TCB_SKB_CB. This space is shared with the gso_* options which are only used on the tx path, and a previously unused 4 byte hole. When skbs are coalesced either in the sk_receive_queue or the out_of_order_queue always choose the timestamp of the appended skb to maintain the invariant of returning the timestamp of the last byte in the recvmsg buffer. Signed-off-by: Mike Maloney Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- include/net/tcp.h | 9 +++++++- net/ipv4/tcp.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++++ net/ipv4/tcp_input.c | 35 ++++++++++++++++++++++++---- net/ipv4/tcp_ipv4.c | 2 ++ net/ipv6/tcp_ipv6.c | 2 ++ 5 files changed, 108 insertions(+), 5 deletions(-) diff --git a/include/net/tcp.h b/include/net/tcp.h index a995004ae946..c614ff135b66 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -774,6 +774,12 @@ struct tcp_skb_cb { u16 tcp_gso_segs; u16 tcp_gso_size; }; + + /* Used to stash the receive timestamp while this skb is in the + * out of order queue, as skb->tstamp is overwritten by the + * rbnode. + */ + ktime_t swtstamp; }; __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ @@ -790,7 +796,8 @@ struct tcp_skb_cb { __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ eor:1, /* Is skb MSG_EOR marked? */ - unused:6; + has_rxtstamp:1, /* SKB has a RX timestamp */ + unused:5; __u32 ack_seq; /* Sequence number ACK'd */ union { struct { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d25e3bcca66b..0cce4472b4a1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -269,6 +269,7 @@ #include #include #include +#include #include #include @@ -1695,6 +1696,61 @@ int tcp_peek_len(struct socket *sock) } EXPORT_SYMBOL(tcp_peek_len); +static void tcp_update_recv_tstamps(struct sk_buff *skb, + struct scm_timestamping *tss) +{ + if (skb->tstamp) + tss->ts[0] = ktime_to_timespec(skb->tstamp); + else + tss->ts[0] = (struct timespec) {0}; + + if (skb_hwtstamps(skb)->hwtstamp) + tss->ts[2] = ktime_to_timespec(skb_hwtstamps(skb)->hwtstamp); + else + tss->ts[2] = (struct timespec) {0}; +} + +/* Similar to __sock_recv_timestamp, but does not require an skb */ +void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk, + struct scm_timestamping *tss) +{ + struct timeval tv; + bool has_timestamping = false; + + if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) { + if (sock_flag(sk, SOCK_RCVTSTAMP)) { + if (sock_flag(sk, SOCK_RCVTSTAMPNS)) { + put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, + sizeof(tss->ts[0]), &tss->ts[0]); + } else { + tv.tv_sec = tss->ts[0].tv_sec; + tv.tv_usec = tss->ts[0].tv_nsec / 1000; + + put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, + sizeof(tv), &tv); + } + } + + if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) + has_timestamping = true; + else + tss->ts[0] = (struct timespec) {0}; + } + + if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) { + if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE) + has_timestamping = true; + else + tss->ts[2] = (struct timespec) {0}; + } + + if (has_timestamping) { + tss->ts[1] = (struct timespec) {0}; + put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING, + sizeof(*tss), tss); + } +} + /* * This routine copies from a sock struct into the user buffer. * @@ -1716,6 +1772,8 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock, long timeo; struct sk_buff *skb, *last; u32 urg_hole = 0; + struct scm_timestamping tss; + bool has_tss = false; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); @@ -1911,6 +1969,10 @@ skip_copy: if (used + offset < skb->len) continue; + if (TCP_SKB_CB(skb)->has_rxtstamp) { + tcp_update_recv_tstamps(skb, &tss); + has_tss = true; + } if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) goto found_fin_ok; if (!(flags & MSG_PEEK)) @@ -1929,6 +1991,9 @@ skip_copy: * on connected socket. I was just happy when found this 8) --ANK */ + if (has_tss) + tcp_recv_timestamp(msg, sk, &tss); + /* Clean up data we have read: This will do ACK frames. */ tcp_cleanup_rbuf(sk, copied); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d3421ee9a10a..568ccfd6dd37 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4246,9 +4246,15 @@ static void tcp_sack_remove(struct tcp_sock *tp) tp->rx_opt.num_sacks = num_sacks; } +enum tcp_queue { + OOO_QUEUE, + RCV_QUEUE, +}; + /** * tcp_try_coalesce - try to merge skb to prior one * @sk: socket + * @dest: destination queue * @to: prior buffer * @from: buffer to add in queue * @fragstolen: pointer to boolean @@ -4260,6 +4266,7 @@ static void tcp_sack_remove(struct tcp_sock *tp) * Returns true if caller should free @from instead of queueing it */ static bool tcp_try_coalesce(struct sock *sk, + enum tcp_queue dest, struct sk_buff *to, struct sk_buff *from, bool *fragstolen) @@ -4281,6 +4288,15 @@ static bool tcp_try_coalesce(struct sock *sk, TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; + + if (TCP_SKB_CB(from)->has_rxtstamp) { + TCP_SKB_CB(to)->has_rxtstamp = true; + if (dest == OOO_QUEUE) + TCP_SKB_CB(to)->swtstamp = TCP_SKB_CB(from)->swtstamp; + else + to->tstamp = from->tstamp; + } + return true; } @@ -4315,6 +4331,9 @@ static void tcp_ofo_queue(struct sock *sk) } p = rb_next(p); rb_erase(&skb->rbnode, &tp->out_of_order_queue); + /* Replace tstamp which was stomped by rbnode */ + if (TCP_SKB_CB(skb)->has_rxtstamp) + skb->tstamp = TCP_SKB_CB(skb)->swtstamp; if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { SOCK_DEBUG(sk, "ofo packet was already received\n"); @@ -4326,7 +4345,8 @@ static void tcp_ofo_queue(struct sock *sk) TCP_SKB_CB(skb)->end_seq); tail = skb_peek_tail(&sk->sk_receive_queue); - eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen); + eaten = tail && tcp_try_coalesce(sk, RCV_QUEUE, + tail, skb, &fragstolen); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; if (!eaten) @@ -4380,6 +4400,10 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) return; } + /* Stash tstamp to avoid being stomped on by rbnode */ + if (TCP_SKB_CB(skb)->has_rxtstamp) + TCP_SKB_CB(skb)->swtstamp = skb->tstamp; + inet_csk_schedule_ack(sk); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); @@ -4405,7 +4429,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) /* In the typical case, we are adding an skb to the end of the list. * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. */ - if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { + if (tcp_try_coalesce(sk, OOO_QUEUE, tp->ooo_last_skb, + skb, &fragstolen)) { coalesce_done: tcp_grow_window(sk, skb); kfree_skb_partial(skb, fragstolen); @@ -4455,7 +4480,8 @@ coalesce_done: __kfree_skb(skb1); goto merge_right; } - } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { + } else if (tcp_try_coalesce(sk, OOO_QUEUE, skb1, + skb, &fragstolen)) { goto coalesce_done; } p = &parent->rb_right; @@ -4506,7 +4532,8 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int __skb_pull(skb, hdrlen); eaten = (tail && - tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0; + tcp_try_coalesce(sk, RCV_QUEUE, tail, + skb, fragstolen)) ? 1 : 0; tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { __skb_queue_tail(&sk->sk_receive_queue, skb); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5af8b809dfbc..a63486afa7a7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1637,6 +1637,8 @@ int tcp_v4_rcv(struct sk_buff *skb) TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; + TCP_SKB_CB(skb)->has_rxtstamp = + skb->tstamp || skb_hwtstamps(skb)->hwtstamp; lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d79a1af3252e..abba3bc2a3d9 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1394,6 +1394,8 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; + TCP_SKB_CB(skb)->has_rxtstamp = + skb->tstamp || skb_hwtstamps(skb)->hwtstamp; } static int tcp_v6_rcv(struct sk_buff *skb) -- cgit v1.2.3-55-g7522 From 16e781224198be06389f6bd8a757c829bf221ea0 Mon Sep 17 00:00:00 2001 From: Mike Maloney Date: Tue, 22 Aug 2017 17:08:49 -0400 Subject: selftests/net: Add a test to validate behavior of rx timestamps Validate the behavior of the combination of various timestamp socket options, and ensure consistency across ip, udp, and tcp. Signed-off-by: Mike Maloney Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- .../selftests/networking/timestamping/.gitignore | 1 + .../selftests/networking/timestamping/Makefile | 4 +- .../networking/timestamping/rxtimestamp.c | 389 +++++++++++++++++++++ 3 files changed, 393 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/networking/timestamping/rxtimestamp.c diff --git a/tools/testing/selftests/networking/timestamping/.gitignore b/tools/testing/selftests/networking/timestamping/.gitignore index 9e69e982fb38..d9355035e746 100644 --- a/tools/testing/selftests/networking/timestamping/.gitignore +++ b/tools/testing/selftests/networking/timestamping/.gitignore @@ -1,3 +1,4 @@ timestamping +rxtimestamp txtimestamp hwtstamp_config diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index ccbb9edbbbb9..92fb8ee917c5 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile @@ -1,4 +1,6 @@ -TEST_PROGS := hwtstamp_config timestamping txtimestamp +CFLAGS += -I../../../../../usr/include + +TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp all: $(TEST_PROGS) diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c new file mode 100644 index 000000000000..00f286661dcd --- /dev/null +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c @@ -0,0 +1,389 @@ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +struct options { + int so_timestamp; + int so_timestampns; + int so_timestamping; +}; + +struct tstamps { + bool tstamp; + bool tstampns; + bool swtstamp; + bool hwtstamp; +}; + +struct socket_type { + char *friendly_name; + int type; + int protocol; + bool enabled; +}; + +struct test_case { + struct options sockopt; + struct tstamps expected; + bool enabled; +}; + +struct sof_flag { + int mask; + char *name; +}; + +static struct sof_flag sof_flags[] = { +#define SOF_FLAG(f) { f, #f } + SOF_FLAG(SOF_TIMESTAMPING_SOFTWARE), + SOF_FLAG(SOF_TIMESTAMPING_RX_SOFTWARE), + SOF_FLAG(SOF_TIMESTAMPING_RX_HARDWARE), +}; + +static struct socket_type socket_types[] = { + { "ip", SOCK_RAW, IPPROTO_EGP }, + { "udp", SOCK_DGRAM, IPPROTO_UDP }, + { "tcp", SOCK_STREAM, IPPROTO_TCP }, +}; + +static struct test_case test_cases[] = { + { {}, {} }, + { + { so_timestamp: 1 }, + { tstamp: true } + }, + { + { so_timestampns: 1 }, + { tstampns: true } + }, + { + { so_timestamp: 1, so_timestampns: 1 }, + { tstampns: true } + }, + { + { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE }, + {} + }, + { + /* Loopback device does not support hw timestamps. */ + { so_timestamping: SOF_TIMESTAMPING_RX_HARDWARE }, + {} + }, + { + { so_timestamping: SOF_TIMESTAMPING_SOFTWARE }, + {} + }, + { + { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE + | SOF_TIMESTAMPING_RX_HARDWARE }, + {} + }, + { + { so_timestamping: SOF_TIMESTAMPING_SOFTWARE + | SOF_TIMESTAMPING_RX_SOFTWARE }, + { swtstamp: true } + }, + { + { so_timestamp: 1, so_timestamping: SOF_TIMESTAMPING_SOFTWARE + | SOF_TIMESTAMPING_RX_SOFTWARE }, + { tstamp: true, swtstamp: true } + }, +}; + +static struct option long_options[] = { + { "list_tests", no_argument, 0, 'l' }, + { "test_num", required_argument, 0, 'n' }, + { "op_size", required_argument, 0, 's' }, + { "tcp", no_argument, 0, 't' }, + { "udp", no_argument, 0, 'u' }, + { "ip", no_argument, 0, 'i' }, +}; + +static int next_port = 19999; +static int op_size = 10 * 1024; + +void print_test_case(struct test_case *t) +{ + int f = 0; + + printf("sockopts {"); + if (t->sockopt.so_timestamp) + printf(" SO_TIMESTAMP "); + if (t->sockopt.so_timestampns) + printf(" SO_TIMESTAMPNS "); + if (t->sockopt.so_timestamping) { + printf(" SO_TIMESTAMPING: {"); + for (f = 0; f < ARRAY_SIZE(sof_flags); f++) + if (t->sockopt.so_timestamping & sof_flags[f].mask) + printf(" %s |", sof_flags[f].name); + printf("}"); + } + printf("} expected cmsgs: {"); + if (t->expected.tstamp) + printf(" SCM_TIMESTAMP "); + if (t->expected.tstampns) + printf(" SCM_TIMESTAMPNS "); + if (t->expected.swtstamp || t->expected.hwtstamp) { + printf(" SCM_TIMESTAMPING {"); + if (t->expected.swtstamp) + printf("0"); + if (t->expected.swtstamp && t->expected.hwtstamp) + printf(","); + if (t->expected.hwtstamp) + printf("2"); + printf("}"); + } + printf("}\n"); +} + +void do_send(int src) +{ + int r; + char *buf = malloc(op_size); + + memset(buf, 'z', op_size); + r = write(src, buf, op_size); + if (r < 0) + error(1, errno, "Failed to sendmsg"); + + free(buf); +} + +bool do_recv(int rcv, int read_size, struct tstamps expected) +{ + const int CMSG_SIZE = 1024; + + struct scm_timestamping *ts; + struct tstamps actual = {}; + char cmsg_buf[CMSG_SIZE]; + struct iovec recv_iov; + struct cmsghdr *cmsg; + bool failed = false; + struct msghdr hdr; + int flags = 0; + int r; + + memset(&hdr, 0, sizeof(hdr)); + hdr.msg_iov = &recv_iov; + hdr.msg_iovlen = 1; + recv_iov.iov_base = malloc(read_size); + recv_iov.iov_len = read_size; + + hdr.msg_control = cmsg_buf; + hdr.msg_controllen = sizeof(cmsg_buf); + + r = recvmsg(rcv, &hdr, flags); + if (r < 0) + error(1, errno, "Failed to recvmsg"); + if (r != read_size) + error(1, 0, "Only received %d bytes of payload.", r); + + if (hdr.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) + error(1, 0, "Message was truncated."); + + for (cmsg = CMSG_FIRSTHDR(&hdr); cmsg != NULL; + cmsg = CMSG_NXTHDR(&hdr, cmsg)) { + if (cmsg->cmsg_level != SOL_SOCKET) + error(1, 0, "Unexpected cmsg_level %d", + cmsg->cmsg_level); + switch (cmsg->cmsg_type) { + case SCM_TIMESTAMP: + actual.tstamp = true; + break; + case SCM_TIMESTAMPNS: + actual.tstampns = true; + break; + case SCM_TIMESTAMPING: + ts = (struct scm_timestamping *)CMSG_DATA(cmsg); + actual.swtstamp = !!ts->ts[0].tv_sec; + if (ts->ts[1].tv_sec != 0) + error(0, 0, "ts[1] should not be set."); + actual.hwtstamp = !!ts->ts[2].tv_sec; + break; + default: + error(1, 0, "Unexpected cmsg_type %d", cmsg->cmsg_type); + } + } + +#define VALIDATE(field) \ + do { \ + if (expected.field != actual.field) { \ + if (expected.field) \ + error(0, 0, "Expected " #field " to be set."); \ + else \ + error(0, 0, \ + "Expected " #field " to not be set."); \ + failed = true; \ + } \ + } while (0) + + VALIDATE(tstamp); + VALIDATE(tstampns); + VALIDATE(swtstamp); + VALIDATE(hwtstamp); +#undef VALIDATE + + free(recv_iov.iov_base); + + return failed; +} + +void config_so_flags(int rcv, struct options o) +{ + int on = 1; + + if (setsockopt(rcv, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) < 0) + error(1, errno, "Failed to enable SO_REUSEADDR"); + + if (o.so_timestamp && + setsockopt(rcv, SOL_SOCKET, SO_TIMESTAMP, + &o.so_timestamp, sizeof(o.so_timestamp)) < 0) + error(1, errno, "Failed to enable SO_TIMESTAMP"); + + if (o.so_timestampns && + setsockopt(rcv, SOL_SOCKET, SO_TIMESTAMPNS, + &o.so_timestampns, sizeof(o.so_timestampns)) < 0) + error(1, errno, "Failed to enable SO_TIMESTAMPNS"); + + if (o.so_timestamping && + setsockopt(rcv, SOL_SOCKET, SO_TIMESTAMPING, + &o.so_timestamping, sizeof(o.so_timestamping)) < 0) + error(1, errno, "Failed to set SO_TIMESTAMPING"); +} + +bool run_test_case(struct socket_type s, struct test_case t) +{ + int port = (s.type == SOCK_RAW) ? 0 : next_port++; + int read_size = op_size; + struct sockaddr_in addr; + bool failed = false; + int src, dst, rcv; + + src = socket(AF_INET, s.type, s.protocol); + if (src < 0) + error(1, errno, "Failed to open src socket"); + + dst = socket(AF_INET, s.type, s.protocol); + if (dst < 0) + error(1, errno, "Failed to open dst socket"); + + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); + addr.sin_port = htons(port); + + if (bind(dst, (struct sockaddr *)&addr, sizeof(addr)) < 0) + error(1, errno, "Failed to bind to port %d", port); + + if (s.type == SOCK_STREAM && (listen(dst, 1) < 0)) + error(1, errno, "Failed to listen"); + + if (connect(src, (struct sockaddr *)&addr, sizeof(addr)) < 0) + error(1, errno, "Failed to connect"); + + if (s.type == SOCK_STREAM) { + rcv = accept(dst, NULL, NULL); + if (rcv < 0) + error(1, errno, "Failed to accept"); + close(dst); + } else { + rcv = dst; + } + + config_so_flags(rcv, t.sockopt); + usleep(20000); /* setsockopt for SO_TIMESTAMPING is asynchronous */ + do_send(src); + + if (s.type == SOCK_RAW) + read_size += 20; /* for IP header */ + failed = do_recv(rcv, read_size, t.expected); + + close(rcv); + close(src); + + return failed; +} + +int main(int argc, char **argv) +{ + bool all_protocols = true; + bool all_tests = true; + int arg_index = 0; + int failures = 0; + int s, t; + char opt; + + while ((opt = getopt_long(argc, argv, "", long_options, + &arg_index)) != -1) { + switch (opt) { + case 'l': + for (t = 0; t < ARRAY_SIZE(test_cases); t++) { + printf("%d\t", t); + print_test_case(&test_cases[t]); + } + return 0; + case 'n': + t = atoi(optarg); + if (t > ARRAY_SIZE(test_cases)) + error(1, 0, "Invalid test case: %d", t); + all_tests = false; + test_cases[t].enabled = true; + break; + case 's': + op_size = atoi(optarg); + break; + case 't': + all_protocols = false; + socket_types[2].enabled = true; + break; + case 'u': + all_protocols = false; + socket_types[1].enabled = true; + break; + case 'i': + all_protocols = false; + socket_types[0].enabled = true; + break; + default: + error(1, 0, "Failed to parse parameters."); + } + } + + for (s = 0; s < ARRAY_SIZE(socket_types); s++) { + if (!all_protocols && !socket_types[s].enabled) + continue; + + printf("Testing %s...\n", socket_types[s].friendly_name); + for (t = 0; t < ARRAY_SIZE(test_cases); t++) { + if (!all_tests && !test_cases[t].enabled) + continue; + + printf("Starting testcase %d...\n", t); + if (run_test_case(socket_types[s], test_cases[t])) { + failures++; + printf("FAILURE in test case "); + print_test_case(&test_cases[t]); + } + } + } + if (!failures) + printf("PASSED.\n"); + return failures; +} -- cgit v1.2.3-55-g7522 From 5f9ae3d9e7e4ad6db0491abc7c4ae5452dbeadd8 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Wed, 23 Aug 2017 10:07:26 +0800 Subject: ipv4: do metrics match when looking up and deleting a route Now when ipv4 route inserts a fib_info, it memcmp fib_metrics. It means ipv4 route identifies one route also with metrics. But when removing a route, it tries to find the route without caring about the metrics. It will cause that the route with right metrics can't be removed. Thomas noticed this issue when doing the testing: 1. add: # ip route append 192.168.7.0/24 dev v window 1000 # ip route append 192.168.7.0/24 dev v window 1001 # ip route append 192.168.7.0/24 dev v window 1002 # ip route append 192.168.7.0/24 dev v window 1003 2. delete: # ip route delete 192.168.7.0/24 dev v window 1002 3. show: 192.168.7.0/24 proto boot scope link window 1001 192.168.7.0/24 proto boot scope link window 1002 192.168.7.0/24 proto boot scope link window 1003 The one with window 1002 wasn't deleted but the first one was. This patch is to do metrics match when looking up and deleting one route. Reported-by: Thomas Haller Signed-off-by: Xin Long Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv4/fib_lookup.h | 1 + net/ipv4/fib_semantics.c | 34 ++++++++++++++++++++++++++++++++++ net/ipv4/fib_trie.c | 3 ++- 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index 769ab87ebc4b..5b2af19cfb5b 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -32,6 +32,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, struct netlink_ext_ack *extack); int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, struct netlink_ext_ack *extack); +bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi); int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos, struct fib_info *fi, unsigned int); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 394d800db50c..57a5d48acee8 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -696,6 +696,40 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi, return 0; } +bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) +{ + struct nlattr *nla; + int remaining; + + if (!cfg->fc_mx) + return true; + + nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { + int type = nla_type(nla); + u32 val; + + if (!type) + continue; + if (type > RTAX_MAX) + return false; + + if (type == RTAX_CC_ALGO) { + char tmp[TCP_CA_NAME_MAX]; + bool ecn_ca = false; + + nla_strlcpy(tmp, nla, sizeof(tmp)); + val = tcp_ca_get_key_by_name(tmp, &ecn_ca); + } else { + val = nla_get_u32(nla); + } + + if (fi->fib_metrics->metrics[type - 1] != val) + return false; + } + + return true; +} + /* * Picture diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 1a6ffb0dab9c..c636650a6a70 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1563,7 +1563,8 @@ int fib_table_delete(struct net *net, struct fib_table *tb, fi->fib_prefsrc == cfg->fc_prefsrc) && (!cfg->fc_protocol || fi->fib_protocol == cfg->fc_protocol) && - fib_nh_match(cfg, fi, extack) == 0) { + fib_nh_match(cfg, fi, extack) == 0 && + fib_metrics_match(cfg, fi)) { fa_to_delete = fa; break; } -- cgit v1.2.3-55-g7522 From f9cbe9a556afca9e82df9aebe4412d93769566b5 Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Wed, 23 Aug 2017 09:46:54 +0200 Subject: net: define the TSO header size in net/tso.h The TSO header size was defined in many drivers. Factorize the code and define its size in net/tso.h. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.h | 1 - drivers/net/ethernet/freescale/fec_main.c | 1 - drivers/net/ethernet/marvell/mv643xx_eth.c | 2 -- drivers/net/ethernet/marvell/mvneta.c | 3 --- include/net/tso.h | 2 ++ 5 files changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 57858522c33c..67d1a3230773 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h @@ -277,7 +277,6 @@ struct snd_queue { u16 xdp_free_cnt; bool is_xdp; -#define TSO_HEADER_SIZE 128 /* For TSO segment's header */ char *tso_hdrs; dma_addr_t tso_hdrs_phys; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index df09b254553d..56f56d6ada9c 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -226,7 +226,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define COPYBREAK_DEFAULT 256 -#define TSO_HEADER_SIZE 128 /* Max number of allowed TCP segments for software TSO */ #define FEC_MAX_TSO_SEGS 100 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 9c94ea9b2b80..fb2d533ae4ef 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -183,8 +183,6 @@ static char mv643xx_eth_driver_version[] = "1.4"; #define DEFAULT_TX_QUEUE_SIZE 512 #define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES) -#define TSO_HEADER_SIZE 128 - /* Max number of allowed TCP segments for software TSO */ #define MV643XX_MAX_TSO_SEGS 100 #define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 0aab74c2a209..35ff1ecfcff0 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -281,9 +281,6 @@ */ #define MVNETA_RSS_LU_TABLE_SIZE 1 -/* TSO header size */ -#define TSO_HEADER_SIZE 128 - /* Max number of Rx descriptors */ #define MVNETA_MAX_RXD 128 diff --git a/include/net/tso.h b/include/net/tso.h index b7be852bfe9d..9a56c39e6d0a 100644 --- a/include/net/tso.h +++ b/include/net/tso.h @@ -3,6 +3,8 @@ #include +#define TSO_HEADER_SIZE 128 + struct tso_t { int next_frag_idx; void *data; -- cgit v1.2.3-55-g7522 From 85affd7e29e78dbf39f6b1e3a3ddf8432168e231 Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Wed, 23 Aug 2017 09:46:55 +0200 Subject: net: mvpp2: unify the txq size define use The txq size is defined by MVPP2_AGGR_TXQ_SIZE, which is sometime not used directly but through variables. As it is a fixed value use the define everywhere in the driver. Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 02c62cbbfe51..eee878809c08 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5284,15 +5284,14 @@ static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause, /* Allocate and initialize descriptors for aggr TXQ */ static int mvpp2_aggr_txq_init(struct platform_device *pdev, - struct mvpp2_tx_queue *aggr_txq, - int desc_num, int cpu, + struct mvpp2_tx_queue *aggr_txq, int cpu, struct mvpp2 *priv) { u32 txq_dma; /* Allocate memory for TX descriptors */ aggr_txq->descs = dma_alloc_coherent(&pdev->dev, - desc_num * MVPP2_DESC_ALIGNED_SIZE, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, &aggr_txq->descs_dma, GFP_KERNEL); if (!aggr_txq->descs) return -ENOMEM; @@ -5313,7 +5312,8 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev, MVPP22_AGGR_TXQ_DESC_ADDR_OFFS; mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma); - mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num); + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), + MVPP2_AGGR_TXQ_SIZE); return 0; } @@ -7445,8 +7445,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv) for_each_present_cpu(i) { priv->aggr_txqs[i].id = i; priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE; - err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], - MVPP2_AGGR_TXQ_SIZE, i, priv); + err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv); if (err < 0) return err; } -- cgit v1.2.3-55-g7522 From 186cd4d4e4144803652212eb0b7413141469feee Mon Sep 17 00:00:00 2001 From: Antoine Ténart Date: Wed, 23 Aug 2017 09:46:56 +0200 Subject: net: mvpp2: software tso support The patch uses the tso API to implement the tso functionality in Marvell PPv2 driver. Using iperf and 10G ports, using TSO shows a significant performance improvement by a factor 2 to reach around 9.5Gbps in TX; as well as a significant CPU usage drop (from 25% to 15%). Signed-off-by: Antoine Tenart Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 171 ++++++++++++++++++++++++++++++++--- 1 file changed, 157 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index eee878809c08..7fa251bf91ae 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -35,6 +35,7 @@ #include #include #include +#include /* RX Fifo Registers */ #define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) @@ -1010,6 +1011,10 @@ struct mvpp2_txq_pcpu { /* Index of the TX DMA descriptor to be cleaned up */ int txq_get_index; + + /* DMA buffer for TSO headers */ + char *tso_headers; + dma_addr_t tso_headers_dma; }; struct mvpp2_tx_queue { @@ -5494,6 +5499,14 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq_pcpu->reserved_num = 0; txq_pcpu->txq_put_index = 0; txq_pcpu->txq_get_index = 0; + + txq_pcpu->tso_headers = + dma_alloc_coherent(port->dev->dev.parent, + MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE, + &txq_pcpu->tso_headers_dma, + GFP_KERNEL); + if (!txq_pcpu->tso_headers) + goto cleanup; } return 0; @@ -5501,6 +5514,11 @@ cleanup: for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->buffs); + + dma_free_coherent(port->dev->dev.parent, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); } dma_free_coherent(port->dev->dev.parent, @@ -5520,6 +5538,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); kfree(txq_pcpu->buffs); + + dma_free_coherent(port->dev->dev.parent, + MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, + txq_pcpu->tso_headers, + txq_pcpu->tso_headers_dma); } if (txq->descs) @@ -6049,6 +6072,123 @@ cleanup: return -ENOMEM; } +static inline void mvpp2_tso_put_hdr(struct sk_buff *skb, + struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int hdr_sz) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, hdr_sz); + + addr = txq_pcpu->tso_headers_dma + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + mvpp2_txdesc_offset_set(port, tx_desc, addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, addr & ~MVPP2_TX_DESC_ALIGN); + + mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) | + MVPP2_TXD_F_DESC | + MVPP2_TXD_PADDING_DISABLE); + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); +} + +static inline int mvpp2_tso_put_data(struct sk_buff *skb, + struct net_device *dev, struct tso_t *tso, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu, + int sz, bool left, bool last) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq); + dma_addr_t buf_dma_addr; + + mvpp2_txdesc_txq_set(port, tx_desc, txq->id); + mvpp2_txdesc_size_set(port, tx_desc, sz); + + buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) { + mvpp2_txq_desc_put(txq); + return -ENOMEM; + } + + mvpp2_txdesc_offset_set(port, tx_desc, + buf_dma_addr & MVPP2_TX_DESC_ALIGN); + mvpp2_txdesc_dma_addr_set(port, tx_desc, + buf_dma_addr & ~MVPP2_TX_DESC_ALIGN); + + if (!left) { + mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC); + if (last) { + mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc); + return 0; + } + } else { + mvpp2_txdesc_cmd_set(port, tx_desc, 0); + } + + mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc); + return 0; +} + +static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev, + struct mvpp2_tx_queue *txq, + struct mvpp2_tx_queue *aggr_txq, + struct mvpp2_txq_pcpu *txq_pcpu) +{ + struct mvpp2_port *port = netdev_priv(dev); + struct tso_t tso; + int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); + int i, len, descs = 0; + + /* Check number of available descriptors */ + if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, + tso_count_descs(skb)) || + mvpp2_txq_reserved_desc_num_proc(port->priv, txq, txq_pcpu, + tso_count_descs(skb))) + return 0; + + tso_start(skb, &tso); + len = skb->len - hdr_sz; + while (len > 0) { + int left = min_t(int, skb_shinfo(skb)->gso_size, len); + char *hdr = txq_pcpu->tso_headers + + txq_pcpu->txq_put_index * TSO_HEADER_SIZE; + + len -= left; + descs++; + + tso_build_hdr(skb, hdr, &tso, left, len == 0); + mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz); + + while (left > 0) { + int sz = min_t(int, tso.size, left); + left -= sz; + descs++; + + if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq, + txq_pcpu, sz, left, len == 0)) + goto release; + tso_build_data(skb, &tso, sz); + } + } + + return descs; + +release: + for (i = descs - 1; i >= 0; i--) { + struct mvpp2_tx_desc *tx_desc = txq->descs + i; + tx_desc_unmap_put(port, txq, tx_desc); + } + return 0; +} + /* Main tx processing */ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) { @@ -6066,6 +6206,10 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) txq_pcpu = this_cpu_ptr(txq->pcpu); aggr_txq = &port->priv->aggr_txqs[smp_processor_id()]; + if (skb_is_gso(skb)) { + frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu); + goto out; + } frags = skb_shinfo(skb)->nr_frags + 1; /* Check number of available descriptors */ @@ -6115,22 +6259,21 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) } } - txq_pcpu->reserved_num -= frags; - txq_pcpu->count += frags; - aggr_txq->count += frags; - - /* Enable transmit */ - wmb(); - mvpp2_aggr_txq_pend_desc_add(port, frags); - - if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) { - struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); - - netif_tx_stop_queue(nq); - } out: if (frags > 0) { struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats); + struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id); + + txq_pcpu->reserved_num -= frags; + txq_pcpu->count += frags; + aggr_txq->count += frags; + + /* Enable transmit */ + wmb(); + mvpp2_aggr_txq_pend_desc_add(port, frags); + + if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) + netif_tx_stop_queue(nq); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; @@ -7255,7 +7398,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, } } - features = NETIF_F_SG | NETIF_F_IP_CSUM; + features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->features = features | NETIF_F_RXCSUM; dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO; dev->vlan_features |= features; -- cgit v1.2.3-55-g7522 From 45b62742df6a27fba71f799cd1a2f721e1c0d584 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 23 Aug 2017 10:08:18 +0200 Subject: mlxsw: spectrum: Offload multichain TC rules Reflect chain index coming down from TC core and create a ruleset per chain. Note that only chain 0, being the implicit chain, is bound to the device for processing. The rest of chains have to be "jumped-to" by actions. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 3 --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 28 +++++++++++++++------- .../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 5 ++-- 4 files changed, 25 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 90a95cdc1626..6e641db16702 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1733,9 +1733,6 @@ mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port, else return -EOPNOTSUPP; - if (f->common.chain_index) - return -EOPNOTSUPP; - switch (f->command) { case TC_CLSFLOWER_REPLACE: return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 8452d1db2f3f..de3aef971e2d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -440,8 +440,8 @@ struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, bool ingress, +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, enum mlxsw_sp_acl_profile profile); void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 508b5fcacd77..8ab331bdff02 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -74,6 +74,7 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) struct mlxsw_sp_acl_ruleset_ht_key { struct net_device *dev; /* dev this ruleset is bound to */ bool ingress; + u32 chain_index; const struct mlxsw_sp_acl_profile_ops *ops; }; @@ -163,7 +164,8 @@ static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, - struct net_device *dev, bool ingress) + struct net_device *dev, bool ingress, + u32 chain_index) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; @@ -171,13 +173,20 @@ static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, ruleset->ht_key.dev = dev; ruleset->ht_key.ingress = ingress; + ruleset->ht_key.chain_index = chain_index; err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); if (err) return err; - err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); - if (err) - goto err_ops_ruleset_bind; + if (!ruleset->ht_key.chain_index) { + /* We only need ruleset with chain index 0, the implicit one, + * to be directly bound to device. The rest of the rulesets + * are bound by "Goto action set". + */ + err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress); + if (err) + goto err_ops_ruleset_bind; + } return 0; err_ops_ruleset_bind: @@ -192,7 +201,8 @@ static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - ops->ruleset_unbind(mlxsw_sp, ruleset->priv); + if (!ruleset->ht_key.chain_index) + ops->ruleset_unbind(mlxsw_sp, ruleset->priv); rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node, mlxsw_sp_acl_ruleset_ht_params); } @@ -212,8 +222,8 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, } struct mlxsw_sp_acl_ruleset * -mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, bool ingress, +mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, enum mlxsw_sp_acl_profile profile) { const struct mlxsw_sp_acl_profile_ops *ops; @@ -229,6 +239,7 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, memset(&ht_key, 0, sizeof(ht_key)); ht_key.dev = dev; ht_key.ingress = ingress; + ht_key.chain_index = chain_index; ht_key.ops = ops; ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, mlxsw_sp_acl_ruleset_ht_params); @@ -239,7 +250,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops); if (IS_ERR(ruleset)) return ruleset; - err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, ingress); + err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev, + ingress, chain_index); if (err) goto err_ruleset_bind; return ruleset; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 95428b41c50f..34872aa52073 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -378,6 +378,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, int err; ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress, + f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) return PTR_ERR(ruleset); @@ -421,7 +422,7 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, struct mlxsw_sp_acl_rule *rule; ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, - ingress, + ingress, f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (IS_ERR(ruleset)) return; @@ -447,7 +448,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, int err; ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev, - ingress, + ingress, f->common.chain_index, MLXSW_SP_ACL_PROFILE_FLOWER); if (WARN_ON(IS_ERR(ruleset))) return -EINVAL; -- cgit v1.2.3-55-g7522 From e457d86ada27cbd2f46ded75d4b4bc06e26d0e2e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 23 Aug 2017 10:08:19 +0200 Subject: net: sched: add couple of goto_chain helpers Add helpers to find out if a gact instance is goto_chain termination action and to get chain index. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/tc_act/tc_gact.h | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h index d576374c4d6f..41afe1ce7b16 100644 --- a/include/net/tc_act/tc_gact.h +++ b/include/net/tc_act/tc_gact.h @@ -15,7 +15,8 @@ struct tcf_gact { }; #define to_gact(a) ((struct tcf_gact *)a) -static inline bool __is_tcf_gact_act(const struct tc_action *a, int act) +static inline bool __is_tcf_gact_act(const struct tc_action *a, int act, + bool is_ext) { #ifdef CONFIG_NET_CLS_ACT struct tcf_gact *gact; @@ -24,7 +25,8 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act) return false; gact = to_gact(a); - if (gact->tcf_action == act) + if ((!is_ext && gact->tcf_action == act) || + (is_ext && TC_ACT_EXT_CMP(gact->tcf_action, act))) return true; #endif @@ -33,12 +35,22 @@ static inline bool __is_tcf_gact_act(const struct tc_action *a, int act) static inline bool is_tcf_gact_shot(const struct tc_action *a) { - return __is_tcf_gact_act(a, TC_ACT_SHOT); + return __is_tcf_gact_act(a, TC_ACT_SHOT, false); } static inline bool is_tcf_gact_trap(const struct tc_action *a) { - return __is_tcf_gact_act(a, TC_ACT_TRAP); + return __is_tcf_gact_act(a, TC_ACT_TRAP, false); +} + +static inline bool is_tcf_gact_goto_chain(const struct tc_action *a) +{ + return __is_tcf_gact_act(a, TC_ACT_GOTO_CHAIN, true); +} + +static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a) +{ + return a->goto_chain->index; } #endif /* __NET_TC_GACT_H */ -- cgit v1.2.3-55-g7522 From 0ade3b6457b5535548c2bdad44b5bde6ebd1b7e2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 23 Aug 2017 10:08:20 +0200 Subject: mlxsw: spectrum_acl: Allow to get group_id value for a ruleset For goto_chain action we need to know group_id of a ruleset to jump to. Provide infrastructure in order to get it. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 7 +++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c | 15 +++++++++++++++ 3 files changed, 24 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index de3aef971e2d..1866f692d3cd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -417,6 +417,7 @@ struct mlxsw_sp_acl_profile_ops { int (*ruleset_bind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, struct net_device *dev, bool ingress); void (*ruleset_unbind)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv); + u16 (*ruleset_group_id)(void *ruleset_priv); size_t rule_priv_size; int (*rule_add)(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -445,6 +446,7 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, enum mlxsw_sp_acl_profile profile); void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset); +u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset); struct mlxsw_sp_acl_rule_info * mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 8ab331bdff02..ef86f044295a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -267,6 +267,13 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset); } +u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset) +{ + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + + return ops->ruleset_group_id(ruleset->priv); +} + static int mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index bc5173f1b5c1..50b40de1fb91 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -295,6 +295,12 @@ mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl); } +static u16 +mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group) +{ + return group->id; +} + static unsigned int mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region) { @@ -1063,6 +1069,14 @@ mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group); } +static u16 +mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv; + + return mlxsw_sp_acl_tcam_group_id(&ruleset->group); +} + static int mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, void *rule_priv, @@ -1099,6 +1113,7 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .ruleset_del = mlxsw_sp_acl_tcam_flower_ruleset_del, .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind, .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind, + .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id, .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule), .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, -- cgit v1.2.3-55-g7522 From dbec8ee95ab3ed231cba59f452531d6ce01fe24f Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 23 Aug 2017 10:08:21 +0200 Subject: mlxsw: spectrum_acl: Provide helper to lookup ruleset We need to lookup ruleset in order to offload goto_chain termination action. This patch adds it. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 4 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c | 45 ++++++++++++++++++---- 2 files changed, 41 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 1866f692d3cd..f8c7f7e930c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -441,6 +441,10 @@ struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, + enum mlxsw_sp_acl_profile profile); +struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, bool ingress, u32 chain_index, enum mlxsw_sp_acl_profile profile); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index ef86f044295a..4b2455e3e079 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -221,6 +221,41 @@ static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset); } +static struct mlxsw_sp_acl_ruleset * +__mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev, + bool ingress, u32 chain_index, + const struct mlxsw_sp_acl_profile_ops *ops) +{ + struct mlxsw_sp_acl_ruleset_ht_key ht_key; + + memset(&ht_key, 0, sizeof(ht_key)); + ht_key.dev = dev; + ht_key.ingress = ingress; + ht_key.chain_index = chain_index; + ht_key.ops = ops; + return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, + mlxsw_sp_acl_ruleset_ht_params); +} + +struct mlxsw_sp_acl_ruleset * +mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, + bool ingress, u32 chain_index, + enum mlxsw_sp_acl_profile profile) +{ + const struct mlxsw_sp_acl_profile_ops *ops; + struct mlxsw_sp_acl *acl = mlxsw_sp->acl; + struct mlxsw_sp_acl_ruleset *ruleset; + + ops = acl->ops->profile_ops(mlxsw_sp, profile); + if (!ops) + return ERR_PTR(-EINVAL); + ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, + chain_index, ops); + if (!ruleset) + return ERR_PTR(-ENOENT); + return ruleset; +} + struct mlxsw_sp_acl_ruleset * mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, bool ingress, u32 chain_index, @@ -228,7 +263,6 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, { const struct mlxsw_sp_acl_profile_ops *ops; struct mlxsw_sp_acl *acl = mlxsw_sp->acl; - struct mlxsw_sp_acl_ruleset_ht_key ht_key; struct mlxsw_sp_acl_ruleset *ruleset; int err; @@ -236,13 +270,8 @@ mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev, if (!ops) return ERR_PTR(-EINVAL); - memset(&ht_key, 0, sizeof(ht_key)); - ht_key.dev = dev; - ht_key.ingress = ingress; - ht_key.chain_index = chain_index; - ht_key.ops = ops; - ruleset = rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key, - mlxsw_sp_acl_ruleset_ht_params); + ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress, + chain_index, ops); if (ruleset) { mlxsw_sp_acl_ruleset_ref_inc(ruleset); return ruleset; -- cgit v1.2.3-55-g7522 From 0ede6ba2a1de08a2a9a5c5ab78e31f1e6a333ed2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 23 Aug 2017 10:08:22 +0200 Subject: mlxsw: spectrum_flower: Offload goto_chain termination action If action is gact goto_chain, offload it to HW by jumping to another ruleset. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_flower.c | 23 ++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 34872aa52073..8aace9a06a5d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -45,7 +45,7 @@ #include "core_acl_flex_keys.h" static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, + struct net_device *dev, bool ingress, struct mlxsw_sp_acl_rule_info *rulei, struct tcf_exts *exts) { @@ -71,6 +71,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, err = mlxsw_sp_acl_rulei_act_trap(rulei); if (err) return err; + } else if (is_tcf_gact_goto_chain(a)) { + u32 chain_index = tcf_gact_goto_chain_index(a); + struct mlxsw_sp_acl_ruleset *ruleset; + u16 group_id; + + ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev, + ingress, + chain_index, + MLXSW_SP_ACL_PROFILE_FLOWER); + if (IS_ERR(ruleset)) + return PTR_ERR(ruleset); + + group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); + mlxsw_sp_acl_rulei_act_jump(rulei, group_id); } else if (is_tcf_mirred_egress_redirect(a)) { int ifindex = tcf_mirred_ifindex(a); struct net_device *out_dev; @@ -246,7 +260,7 @@ static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, - struct net_device *dev, + struct net_device *dev, bool ingress, struct mlxsw_sp_acl_rule_info *rulei, struct tc_cls_flower_offload *f) { @@ -364,7 +378,8 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, if (err) return err; - return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, rulei, f->exts); + return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress, + rulei, f->exts); } int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, @@ -390,7 +405,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress, } rulei = mlxsw_sp_acl_rule_rulei(rule); - err = mlxsw_sp_flower_parse(mlxsw_sp, dev, rulei, f); + err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f); if (err) goto err_flower_parse; -- cgit v1.2.3-55-g7522 From 257a73031d29447ee82fe06d2b97d8564f63276d Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Wed, 23 Aug 2017 11:57:51 +0200 Subject: net/sock: allow the user to set negative peek offset This is necessary to allow the user to disable peeking with offset once it's enabled. Unix sockets already allow the above, with this patch we permit it for udp[6] sockets, too. Fixes: 627d2d6b5500 ("udp: enable MSG_PEEK at non-zero offset") Signed-off-by: Paolo Abeni Acked-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/core/sock.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 0f04d8bff607..dfdd14cac775 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2454,9 +2454,6 @@ EXPORT_SYMBOL(__sk_mem_reclaim); int sk_set_peek_off(struct sock *sk, int val) { - if (val < 0) - return -EINVAL; - sk->sk_peek_off = val; return 0; } -- cgit v1.2.3-55-g7522 From 5719e5eb31e485d9eac8e2303ef9ca2f42c49225 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 23 Aug 2017 10:59:40 +0100 Subject: net: hinic: make functions set_ctrl0 and set_ctrl1 static The functions set_ctrl0 and set_ctrl1 are local to the source and do not need to be in global scope, so make them static. Cleans up sparse warnings: symbol 'set_ctrl0' was not declared. Should it be static? symbol 'set_ctrl1' was not declared. Should it be static? Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c index cd09e6ef3aea..7cb8b9b94726 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c @@ -423,7 +423,7 @@ static irqreturn_t ceq_interrupt(int irq, void *data) return IRQ_HANDLED; } -void set_ctrl0(struct hinic_eq *eq) +static void set_ctrl0(struct hinic_eq *eq) { struct msix_entry *msix_entry = &eq->msix_entry; enum hinic_eq_type type = eq->type; @@ -474,7 +474,7 @@ void set_ctrl0(struct hinic_eq *eq) } } -void set_ctrl1(struct hinic_eq *eq) +static void set_ctrl1(struct hinic_eq *eq) { enum hinic_eq_type type = eq->type; u32 page_size_val, elem_size; -- cgit v1.2.3-55-g7522 From 042a90106b09beff4fa9015d1940e45ce10297ab Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Wed, 23 Aug 2017 16:22:20 +0530 Subject: net: tipc: constify genl_ops genl_ops are not supposed to change at runtime. All functions working with genl_ops provided by work with const genl_ops. So mark the non-const structs as const. Signed-off-by: Arvind Yadav Signed-off-by: David S. Miller --- net/tipc/netlink_compat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 750949dfc1d7..e48f0b2c01b9 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -1217,7 +1217,7 @@ send: return err; } -static struct genl_ops tipc_genl_compat_ops[] = { +static const struct genl_ops tipc_genl_compat_ops[] = { { .cmd = TIPC_GENL_CMD, .doit = tipc_nl_compat_recv, -- cgit v1.2.3-55-g7522 From 60890e046081aef61980dbc812ac5100ad078a87 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 23 Aug 2017 12:59:48 +0100 Subject: gre: remove duplicated assignment of iph iph is being assigned the same value twice; remove the redundant first assignment. (Thanks to Nikolay Aleksandrov for pointing out that the first asssignment should be removed and not the second) Fixes warning: net/ipv4/ip_gre.c:265:2: warning: Value stored to 'iph' is never read Signed-off-by: Colin Ian King Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- net/ipv4/ip_gre.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 5a20ba9b9b50..f70674799fdd 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -262,7 +262,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, int len; itn = net_generic(net, erspan_net_id); - iph = ip_hdr(skb); len = gre_hdr_len + sizeof(*ershdr); if (unlikely(!pskb_may_pull(skb, len))) -- cgit v1.2.3-55-g7522 From d893dc26e3f42e12ae75703c52cc6de5578ff1f5 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 23 Aug 2017 15:09:46 +0100 Subject: selftests/bpf: add a test for a bug in liveness-based pruning Writes in straight-line code should not prevent reads from propagating along jumps. With current verifier code, the jump from 3 to 5 does not add a read mark on 3:R0 (because 5:R0 has a write mark), meaning that the jump from 1 to 3 gets pruned as safe even though R0 is NOT_INIT. Verifier output: 0: (61) r2 = *(u32 *)(r1 +0) 1: (35) if r2 >= 0x0 goto pc+1 R1=ctx(id=0,off=0,imm=0) R2=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R10=fp0 2: (b7) r0 = 0 3: (35) if r2 >= 0x0 goto pc+1 R0=inv0 R1=ctx(id=0,off=0,imm=0) R2=inv(id=0,umax_value=4294967295,var_off=(0x0; 0xffffffff)) R10=fp0 4: (b7) r0 = 0 5: (95) exit from 3 to 5: safe from 1 to 3: safe processed 8 insns, stack depth 0 Signed-off-by: Edward Cree Acked-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c03542c417db..c912734d2364 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -6487,6 +6487,22 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_LWT_IN, }, + { + "liveness pruning and write screening", + .insns = { + /* Get an unknown value */ + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0), + /* branch conditions teach us nothing about R2 */ + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R0 !read_ok", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_LWT_IN, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3-55-g7522 From 63f45f840634ab5fd71bbc07acff915277764068 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 23 Aug 2017 15:10:03 +0100 Subject: bpf/verifier: when pruning a branch, ignore its write marks The fact that writes occurred in reaching the continuation state does not screen off its reads from us, because we're not really its parent. So detect 'not really the parent' in do_propagate_liveness, and ignore write marks in that case. Fixes: dc503a8ad984 ("bpf/verifier: track liveness for pruning") Signed-off-by: Edward Cree Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- kernel/bpf/verifier.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e42c096ba20d..fdbaa6086559 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3436,6 +3436,7 @@ out_free: static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { + bool writes = parent == state->parent; /* Observe write marks */ bool touched = false; /* any changes made? */ int i; @@ -3447,7 +3448,9 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state, for (i = 0; i < BPF_REG_FP; i++) { if (parent->regs[i].live & REG_LIVE_READ) continue; - if (state->regs[i].live == REG_LIVE_READ) { + if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) + continue; + if (state->regs[i].live & REG_LIVE_READ) { parent->regs[i].live |= REG_LIVE_READ; touched = true; } @@ -3460,7 +3463,9 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state, continue; if (parent->spilled_regs[i].live & REG_LIVE_READ) continue; - if (state->spilled_regs[i].live == REG_LIVE_READ) { + if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN)) + continue; + if (state->spilled_regs[i].live & REG_LIVE_READ) { parent->spilled_regs[i].live |= REG_LIVE_READ; touched = true; } -- cgit v1.2.3-55-g7522 From df20cb7ec17577c94ef93fa86c7c80958046a01e Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 23 Aug 2017 15:10:26 +0100 Subject: selftests/bpf: add a test for a pruning bug in the verifier The test makes a read through a map value pointer, then considers pruning a branch where the register holds an adjusted map value pointer. It should not prune, but currently it does. Signed-off-by: Alexei Starovoitov [ecree@solarflare.com: added test-name and patch description] Signed-off-by: Edward Cree Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- tools/testing/selftests/bpf/test_verifier.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index c912734d2364..353d17015641 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -6503,6 +6503,34 @@ static struct bpf_test tests[] = { .result = REJECT, .prog_type = BPF_PROG_TYPE_LWT_IN, }, + { + "varlen_map_value_access pruning", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES), + BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1), + BPF_MOV32_IMM(BPF_REG_1, 0), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, + offsetof(struct test_val, foo)), + BPF_EXIT_INSN(), + }, + .fixup_map2 = { 3 }, + .errstr_unpriv = "R0 leaks addr", + .errstr = "R0 unbounded memory access", + .result_unpriv = REJECT, + .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, }; static int probe_filter_length(const struct bpf_insn *fp) -- cgit v1.2.3-55-g7522 From 1b688a19a92223cf2d1892c9d05d64dc397b33e3 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 23 Aug 2017 15:10:50 +0100 Subject: bpf/verifier: remove varlen_map_value_access flag The optimisation it does is broken when the 'new' register value has a variable offset and the 'old' was constant. I broke it with my pointer types unification (see Fixes tag below), before which the 'new' value would have type PTR_TO_MAP_VALUE_ADJ and would thus not compare equal; other changes in that patch mean that its original behaviour (ignore min/max values) cannot be restored. Tests on a sample set of cilium programs show no change in count of processed instructions. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Edward Cree Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf_verifier.h | 1 - kernel/bpf/verifier.c | 41 ++++++++++++----------------------------- 2 files changed, 12 insertions(+), 30 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 91d07efed2ba..d8f131a36fd0 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -125,7 +125,6 @@ struct bpf_verifier_env { u32 id_gen; /* used to generate unique reg IDs */ bool allow_ptr_leaks; bool seen_direct_write; - bool varlen_map_value_access; struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ }; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fdbaa6086559..711bdbd22cea 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -832,11 +832,6 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, */ if (log_level) print_verifier_state(state); - /* If the offset is variable, we will need to be stricter in state - * pruning from now on. - */ - if (!tnum_is_const(reg->var_off)) - env->varlen_map_value_access = true; /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our @@ -3247,9 +3242,8 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) } /* Returns true if (rold safe implies rcur safe) */ -static bool regsafe(struct bpf_reg_state *rold, - struct bpf_reg_state *rcur, - bool varlen_map_access, struct idpair *idmap) +static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, + struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ @@ -3281,22 +3275,14 @@ static bool regsafe(struct bpf_reg_state *rold, tnum_is_unknown(rold->var_off); } case PTR_TO_MAP_VALUE: - if (varlen_map_access) { - /* If the new min/max/var_off satisfy the old ones and - * everything else matches, we are OK. - * We don't care about the 'id' value, because nothing - * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) - */ - return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && - range_within(rold, rcur) && - tnum_in(rold->var_off, rcur->var_off); - } else { - /* If the ranges/var_off were not the same, but - * everything else was and we didn't do a variable - * access into a map then we are a-ok. - */ - return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0; - } + /* If the new min/max/var_off satisfy the old ones and + * everything else matches, we are OK. + * We don't care about the 'id' value, because nothing + * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) + */ + return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && + range_within(rold, rcur) && + tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. @@ -3380,7 +3366,6 @@ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { - bool varlen_map_access = env->varlen_map_value_access; struct idpair *idmap; bool ret = false; int i; @@ -3391,8 +3376,7 @@ static bool states_equal(struct bpf_verifier_env *env, return false; for (i = 0; i < MAX_BPF_REG; i++) { - if (!regsafe(&old->regs[i], &cur->regs[i], varlen_map_access, - idmap)) + if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } @@ -3412,7 +3396,7 @@ static bool states_equal(struct bpf_verifier_env *env, continue; if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE], &cur->spilled_regs[i / BPF_REG_SIZE], - varlen_map_access, idmap)) + idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. @@ -3555,7 +3539,6 @@ static int do_check(struct bpf_verifier_env *env) init_reg_state(regs); state->parent = NULL; insn_idx = 0; - env->varlen_map_value_access = false; for (;;) { struct bpf_insn *insn; u8 class; -- cgit v1.2.3-55-g7522 From 8e9cd9ce90d48369b2c5ddd79fe3d4a4cb1ccb56 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Wed, 23 Aug 2017 15:11:21 +0100 Subject: bpf/verifier: document liveness analysis The liveness tracking algorithm is quite subtle; add comments to explain it. Signed-off-by: Edward Cree Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- include/linux/bpf_verifier.h | 13 +++++++++++++ kernel/bpf/verifier.c | 28 +++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 1 deletion(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index d8f131a36fd0..b8d200f60a40 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -21,6 +21,19 @@ */ #define BPF_MAX_VAR_SIZ INT_MAX +/* Liveness marks, used for registers and spilled-regs (in stack slots). + * Read marks propagate upwards until they find a write mark; they record that + * "one of this state's descendants read this reg" (and therefore the reg is + * relevant for states_equal() checks). + * Write marks collect downwards and do not propagate; they record that "the + * straight-line code that reached this state (from its parent) wrote this reg" + * (and therefore that reads propagated from this state or its descendants + * should not propagate to its parent). + * A state with a write mark can receive read marks; it just won't propagate + * them to its parent, since the write mark is a property, not of the state, + * but of the link between it and its parent. See mark_reg_read() and + * mark_stack_slot_read() in kernel/bpf/verifier.c. + */ enum bpf_reg_liveness { REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 711bdbd22cea..d690c7dd1f1a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3417,6 +3417,12 @@ out_free: return ret; } +/* A write screens off any subsequent reads; but write marks come from the + * straight-line code between a state and its parent. When we arrive at a + * jump target (in the first iteration of the propagate_liveness() loop), + * we didn't arrive by the straight-line code, so read marks in state must + * propagate to parent regardless of state's write marks. + */ static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { @@ -3457,6 +3463,15 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state, return touched; } +/* "parent" is "a state from which we reach the current state", but initially + * it is not the state->parent (i.e. "the state whose straight-line code leads + * to the current state"), instead it is the state that happened to arrive at + * a (prunable) equivalent of the current state. See comment above + * do_propagate_liveness() for consequences of this. + * This function is just a more efficient way of calling mark_reg_read() or + * mark_stack_slot_read() on each reg in "parent" that is read in "state", + * though it requires that parent != state->parent in the call arguments. + */ static void propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { @@ -3485,6 +3500,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. + * If we have any write marks in env->cur_state, they + * will prevent corresponding reads in the continuation + * from reaching our parent (an explored_state). Our + * own state will get the read marks recorded, but + * they'll be immediately forgotten as we're pruning + * this state and will pop a new one. */ propagate_liveness(&sl->state, &env->cur_state); return 1; @@ -3508,7 +3529,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ env->cur_state.parent = &new_sl->state; - /* clear liveness marks in current state */ + /* clear write marks in current state: the writes we did are not writes + * our child did, so they don't screen off its reads from us. + * (There are no read marks in current state, because reads always mark + * their parent and current state never has children yet. Only + * explored_states can get read marks.) + */ for (i = 0; i < BPF_REG_FP; i++) env->cur_state.regs[i].live = REG_LIVE_NONE; for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) -- cgit v1.2.3-55-g7522 From d2aaa3dc419994eefa21de971bb1f544c42541c7 Mon Sep 17 00:00:00 2001 From: Shubham Bansal Date: Wed, 23 Aug 2017 21:29:10 +0530 Subject: bpf, doc: Add arm32 as arch supporting eBPF JIT As eBPF JIT support for arm32 was added recently with commit 39c13c204bb1150d401e27d41a9d8b332be47c49, it seems appropriate to add arm32 as arch with support for eBPF JIT in bpf and sysctl docs as well. Signed-off-by: Shubham Bansal Acked-by: Alexei Starovoitov Acked-by: Daniel Borkmann Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 4 ++-- Documentation/sysctl/net.txt | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 6a0df8df6c43..e5e33bac2068 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -596,8 +596,8 @@ skb pointer). All constraints and restrictions from bpf_check_classic() apply before a conversion to the new layout is being done behind the scenes! Currently, the classic BPF format is being used for JITing on most 32-bit -architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64 perform JIT -compilation from eBPF instruction set. +architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64, arm32 perform +JIT compilation from eBPF instruction set. Some core changes of the new internal format: diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index 28596e03220b..b67044a2575f 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt @@ -46,13 +46,13 @@ translate these BPF proglets into native CPU instructions. There are two flavors of JITs, the newer eBPF JIT currently supported on: - x86_64 - arm64 + - arm32 - ppc64 - sparc64 - mips64 - s390x And the older cBPF JIT supported on the following archs: - - arm - mips - ppc - sparc -- cgit v1.2.3-55-g7522 From a5e2da6e9787187ff104c34aa048419703c1f9cb Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 24 Aug 2017 03:20:11 +0200 Subject: bpf: netdev is never null in __dev_map_flush No need to test for it in fast-path, every dev in bpf_dtab_netdev is guaranteed to be non-NULL, otherwise dev_map_update_elem() will fail in the first place. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Acked-by: John Fastabend Signed-off-by: David S. Miller --- kernel/bpf/devmap.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index bfecabfd4974..ecf9f99ecc57 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -226,12 +226,10 @@ void __dev_map_flush(struct bpf_map *map) if (unlikely(!dev)) continue; - netdev = dev->dev; __clear_bit(bit, bitmap); - if (unlikely(!netdev || !netdev->netdev_ops->ndo_xdp_flush)) - continue; - - netdev->netdev_ops->ndo_xdp_flush(netdev); + netdev = dev->dev; + if (likely(netdev->netdev_ops->ndo_xdp_flush)) + netdev->netdev_ops->ndo_xdp_flush(netdev); } } -- cgit v1.2.3-55-g7522 From d382b9c007827987c7939e2bd828af43c7af9668 Mon Sep 17 00:00:00 2001 From: Reizer, Eyal Date: Sun, 20 Aug 2017 12:28:18 +0000 Subject: wlcore: add missing nvs file name info for wilink8 The following commits: commit c815fdebef44 ("wlcore: spi: Populate config firmware data") commit d776fc86b82f ("wlcore: sdio: Populate config firmware data") Populated the nvs entry for wilink6 and wilink7 only while it is still needed for wilink8 as well. This broke user space backward compatibility when upgrading from older kernels, as the alternate mac address would not be read from the nvs that is present in the file system (lib/firmware/ti-connectivity/wl1271-nvs.bin) causing mac address change of the wlan interface. This patch fix this and update the structure field with the same default nvs file name that has been used before. In addition, some distros hold a default wl1271-nvs.bin in the file system with a bogus mac address (deadbeef...) that overrides the mac address that is stored inside the device. Warn users about this bogus mac address and use the internal mac address Fixes: c815fdebef44 ("wlcore: spi: Populate config firmware data") Fixes: d776fc86b82f ("wlcore: sdio: Populate config firmware data") Signed-off-by: Eyal Reizer Reviewed-by: Sebastian Reichel Tested-by: Tony Lindgren Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 23 +++++++++++++++++++++++ drivers/net/wireless/ti/wlcore/sdio.c | 1 + drivers/net/wireless/ti/wlcore/spi.c | 1 + drivers/net/wireless/ti/wlcore/wlcore.h | 3 +++ 4 files changed, 28 insertions(+) diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 60aaa850fbd1..c346c021b999 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -6016,6 +6016,8 @@ static int wl1271_register_hw(struct wl1271 *wl) { int ret; u32 oui_addr = 0, nic_addr = 0; + struct platform_device *pdev = wl->pdev; + struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev); if (wl->mac80211_registered) return 0; @@ -6040,6 +6042,27 @@ static int wl1271_register_hw(struct wl1271 *wl) nic_addr = wl->fuse_nic_addr + 1; } + if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) { + wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.\n"); + if (!strcmp(pdev_data->family->name, "wl18xx")) { + wl1271_warning("This default nvs file can be removed from the file system\n"); + } else { + wl1271_warning("Your device performance is not optimized.\n"); + wl1271_warning("Please use the calibrator tool to configure your device.\n"); + } + + if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) { + wl1271_warning("Fuse mac address is zero. using random mac\n"); + /* Use TI oui and a random nic */ + oui_addr = WLCORE_TI_OUI_ADDRESS; + nic_addr = get_random_int(); + } else { + oui_addr = wl->fuse_oui_addr; + /* fuse has the BD_ADDR, the WLAN addresses are the next two */ + nic_addr = wl->fuse_nic_addr + 1; + } + } + wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr); ret = ieee80211_register_hw(wl->hw); diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 2fb38717346f..f8a1fea64e25 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c @@ -230,6 +230,7 @@ static const struct wilink_family_data wl128x_data = { static const struct wilink_family_data wl18xx_data = { .name = "wl18xx", .cfg_name = "ti-connectivity/wl18xx-conf.bin", + .nvs_name = "ti-connectivity/wl1271-nvs.bin", }; static const struct of_device_id wlcore_sdio_of_match_table[] = { diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index fdabb9242cca..62ce54a949e9 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -92,6 +92,7 @@ static const struct wilink_family_data wl128x_data = { static const struct wilink_family_data wl18xx_data = { .name = "wl18xx", .cfg_name = "ti-connectivity/wl18xx-conf.bin", + .nvs_name = "ti-connectivity/wl1271-nvs.bin", }; struct wl12xx_spi_glue { diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index 1827546ba807..95fbedc8ea34 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -40,6 +40,9 @@ /* wl12xx/wl18xx maximum transmission power (in dBm) */ #define WLCORE_MAX_TXPWR 25 +/* Texas Instruments pre assigned OUI */ +#define WLCORE_TI_OUI_ADDRESS 0x080028 + /* forward declaration */ struct wl1271_tx_hw_descr; enum wl_rx_buf_align; -- cgit v1.2.3-55-g7522 From 14cc69